diff --git "a/2272.jsonl" "b/2272.jsonl" new file mode 100644--- /dev/null +++ "b/2272.jsonl" @@ -0,0 +1,719 @@ +{"seq_id":"565929456","text":"import re\nfrom extra_lib.BeautifulSoup import BeautifulStoneSoup, Tag\n\nf = open('./example/example.xml', 'r')\nxml = [ l.strip() for l in f.readlines() ]\nxml = ''.join(xml)\nsoup = BeautifulStoneSoup(xml)\nf.close()\n# Begin XML Translation Grammar\n# result -> (course*) \n# result = BeautifulStoneSoup('')\n# courses -> (cname, student*)\n# courses = [ (c.coursename, c) for c in soup.findAll('course') ]\n\nres = ''\nfor c in soup.findAll('course'):\n res += ''\n s = c.findPrevious('student')\n res += str(c.coursename)\n res += str(s.find('name'))\n res += str(s.grade)\n res += ''\n#print result.prettify()\nres += ''\nresult = BeautifulStoneSoup(res)\nresult.prettify()\n# student -> (name, mark)\n# students = [ (s.find('name'), s.grade, s) for s in [ c.findPrevious('student') for (name, c) in courses] ]\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"442135902","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\n\n'''\nusing selenium to scrape some scrape some dynamic content\n'''\n\n\ndef getDynamicJSContent(url, diver):\n driver.get('https://www.wta.org/go-outside/trip-reports')\n bsObj = BeautifulSoup(driver.page_source, 'html.parser')\n dynamicContent = bsObj.find(\"div\", {\"class\": \"js-tab-target\"})\n return dynamicContent\n\n\nurl = 'https://www.wta.org/go-outside/trip-reports'\ndriver = webdriver.Firefox()\n\ncontent = None\nwhile content == None:\n content = getDynamicJSContent(url, driver)\n if content == None:\n print(\"Inconsistent!\")\n time.sleep(2)\n\npagination = content.find(\"nav\", {\"class\": \"pagination\"})\npages = content.find(\"nav\", {\"class\": \"pagination\"})\nlinks = pages.find_all('a')\nnpages = int(links[-2].text)\n","sub_path":"de/snotel/src/wta/selenium_scrape.py","file_name":"selenium_scrape.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"353254730","text":"# ---Stdlib---\nimport sys\nimport os\nfrom pathlib import Path\nfrom typing import List\n\n# ---Dependencies---\nimport torch\nfrom imageio import imread\nimport matplotlib.pyplot as plt\nimport image_slicer\nimport subprocess\n\nfrom packages.SAND_features.utils import ops\nfrom packages.SAND_features.models import Sand\n\n\ndef split_image(image_name: str) -> None:\n print(f'Splitting {image_name}')\n tiles = image_slicer.slice(image_name, 4)\n image_slicer.save_tiles(tiles)\n\ndef combine_images(output_path: str, img: str, model_name: str) -> None:\n print(f'Combining {img}')\n\n subprocess.run(['ffmpeg','-hide_banner','-i', img + \"_01_01.png\", '-vf', 'crop=1060:520:10:10,scale=1080:540', img + \"_01_01_N.png\"],cwd=output_path, shell=True)\n subprocess.run(['ffmpeg','-hide_banner','-i', img + \"_01_02.png\", '-vf', 'crop=1060:520:10:10,scale=1080:540', img + \"_01_02_N.png\"],cwd=output_path, shell=True)\n subprocess.run(['ffmpeg','-hide_banner','-i', img + \"_02_01.png\", '-vf', 'crop=1060:520:10:10,scale=1080:540', img + \"_02_01_N.png\"],cwd=output_path, shell=True)\n subprocess.run(['ffmpeg','-hide_banner','-i', img + \"_02_02.png\", '-vf', 'crop=1060:520:10:10,scale=1080:540', img + \"_02_02_N.png\"],cwd=output_path, shell=True)\n\n subprocess.run([\"magick\", \"convert\", img + \"_01_01_N.png\", img + \"_01_02_N.png\", \"+append\" ,\"temp.png\"], cwd=output_path, shell=True)\n subprocess.run([\"magick\", \"convert\", img + \"_02_01_N.png\", img + \"_02_02_N.png\", \"+append\" ,\"temp1.png\"], cwd=output_path, shell=True)\n if model_name == '':\n subprocess.run([\"magick\", \"convert\", \"temp.png\", \"temp1.png\", \"-append\" , img + \".png\"], cwd=output_path, shell=True)\n else:\n subprocess.run([\"magick\", \"convert\", \"temp.png\", \"temp1.png\", \"-append\" , model_name + \"_\" + img + \".png\"], cwd=output_path, shell=True)\n os.remove(output_path + \"/\" + img + \"_01_01_N.png\")\n os.remove(output_path + \"/\" + img + \"_01_02_N.png\")\n os.remove(output_path + \"/\" + img + \"_02_01_N.png\")\n os.remove(output_path + \"/\" + img + \"_02_02_N.png\")\n os.remove(output_path + \"/\" + img + \"_01_01.png\")\n os.remove(output_path + \"/\" + img + \"_01_02.png\")\n os.remove(output_path + \"/\" + img + \"_02_01.png\")\n os.remove(output_path + \"/\" + img + \"_02_02.png\")\n os.remove(output_path + \"/\" + \"temp.png\")\n os.remove(output_path + \"/\" + \"temp1.png\")\n\n\n\ndef sand_function(model_name: str, image_path: str, output_path: str, img_index_range: List[int] = None) -> None:\n root = Path(__file__) .parent # Path to repo\n if root not in sys.path:\n sys.path.insert(0, root) # Prepend to path so we can use these modules\n\n model_path = root/'ckpts'\n\n device = ops.get_device()\n\n ckpt_file = Path(model_path, model_name).with_suffix('.pt')\n\n model_name_trim = ''\n\n\n if model_name == '3/ckpt_G':\n model_name_trim = '3G'\n if model_name == '3/ckpt_L':\n model_name_trim = '3L'\n if model_name == '3/ckpt_GL':\n model_name_trim = '3GL'\n if model_name == '10/ckpt_G':\n model_name_trim = '10G'\n if model_name == '10/ckpt_L':\n model_name_trim = '10L'\n if model_name == '10/ckpt_GL':\n model_name_trim = '10GL'\n if model_name == '32/ckpt_G':\n model_name_trim = '32G'\n if model_name == '32/ckpt_L':\n model_name_trim = '32L'\n if model_name == '32/ckpt_GL':\n model_name_trim = '32GL'\n\n\n images = []\n for filename in os.listdir(image_path):\n\n if filename.endswith('.jpg'):\n\n split_image(image_path + '/'+filename)\n os.remove(image_path + '/'+filename)\n \n \n for filename in os.listdir(image_path):\n if filename.endswith('.png'):\n images.append(filename)\n\n images.sort()\n\n start_point = 0\n end_point = len(images) - 1\n if img_index_range:\n start_point = img_index_range[0]\n end_point = img_index_range[1]\n\n for i in range(start_point, end_point + 1):\n print(f'Processing {images[i]}.')\n img_file = Path(image_path + '/' + images[i])\n\n # Load image & convert to torch format\n img_np = imread(img_file)\n img_torch = ops.img2torch(img_np, batched=True).to(device)\n\n # Create & load model (single branch)\n model = Sand.from_ckpt(ckpt_file).to(device)\n model.eval()\n\n # Run inference\n with torch.no_grad():\n features_torch = model(img_torch)\n\n # Convert features into an images we can visualize (by PCA or normalizing)\n features_np = ops.fmap2img(features_torch).squeeze(0)\n plt.imsave(output_path + '/' + images[i], features_np)\n\n curr_img = ''.join(images[i].split())[:-10]\n\n if (i + 1) % 4 == 0:\n combine_images(output_path, curr_img, model_name_trim)\n # combine_images(image_path, curr_img, model_name_trim)\n\n\n\n\n","sub_path":"packages/SAND_features/sand_function.py","file_name":"sand_function.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"548152657","text":"import uuid\nfrom datetime import timedelta\n\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom wallet.models.accounts import Account\nfrom wallet.models.company_accounts import CompanyAccount\n\n\nclass Token(models.Model):\n token_status = [\n ('valid', 'Valid'),\n ('used', 'Used'),\n ]\n token_id = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n created = models.DateField(auto_now_add=True)\n companyToken = models.ForeignKey(\n to=CompanyAccount,\n related_name='fk_companyAccount_token',\n null=True,\n blank=True,\n on_delete=models.SET_NULL\n )\n employeeToken = models.ForeignKey(\n to=Account,\n related_name='fk_account_token',\n null=True,\n blank=True,\n on_delete=models.SET_NULL\n )\n status = models.CharField(max_length=5, choices=token_status, default='valid')\n\n def __str__(self):\n return f'{self.token_id}'\n\n @property\n def expired(self):\n return self.created + timedelta(days=365) < timezone.now().date()\n","sub_path":"backend/tokens/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"355835317","text":"from autoencoder import *\nfrom utils import *\nimport sys\nimport math\n\n\ndef run_autoencoder(fname, sname, epochs=15):\n imgs = load_array(fname)\n imgs = imgs[:, :, :, np.newaxis]\n train_size = int(math.ceil(len(imgs) * 85 / 100))\n train_imgs = imgs[:train_size]\n test_imgs = imgs[train_size:]\n train_imgs = normalise(train_imgs)\n test_imgs = normalise(test_imgs)\n\n ae_model = Autoencoder(train_imgs, train_imgs, test_imgs, test_imgs)\n\n ae_model.train(epochs=epochs)\n model = ae_model.model\n save_model(model, 'trad_ac_model')\n ae_model.plot_results()\n\n error_maps = ae_model.get_error_maps()\n\n sal_maps = load_array(sname)[train_size:]\n print('saliences loaded')\n\n compare_saliences(error_maps, sal_maps, show=True)\n\ndef run_sketch_autoencoder():\n imgs = load_array('Data-Sketch_images')\n imgs = imgs[:, :, :, np.newaxis]\n train_size = int(math.ceil(len(imgs) * 90 / 100))\n train_imgs = imgs[:train_size]\n test_imgs = imgs[train_size:]\n train_imgs = normalise(train_imgs)\n test_imgs = normalise(test_imgs)\n\n model = load_model('trad_sketch_model')\n ae_model = Autoencoder(train_imgs, train_imgs, test_imgs, test_imgs, model=model)\n\n ae_model.train(epochs=4)\n model = ae_model.model\n save_model(model, 'trad_sketch_model')\n ae_model.plot_results(N=7)\n\n error_maps = ae_model.get_error_maps()\n\n sal_maps = load_array('Data-Sketch_fixations')[train_size:]\n\n compare_saliences(error_maps, sal_maps, show=True)\n\ndef run_trained_autoencoder(fname, mname, sname, train=True):\n imgs = load_array(fname)\n imgs = imgs[:, :, :, np.newaxis]\n train_size = int(math.ceil(len(imgs) * 85 / 100))\n train_imgs = imgs[:train_size]\n test_imgs = imgs[train_size:]\n train_imgs = normalise(train_imgs)\n test_imgs = normalise(test_imgs)\n\n model = load_model(mname)\n ae_model = Autoencoder(train_imgs, train_imgs, test_imgs, test_imgs, model = model)\n\n if train:\n ae_model.train(epochs=20)\n model = ae_model.model\n save_model(model, 'trad_ac_model')\n\n ae_model.plot_results()\n error_maps = ae_model.get_error_maps()\n\n sal_maps = load_array(sname)[train_size:]\n print('saliences loaded')\n\n error_maps, sal_maps = shuffle_in_unison(error_maps, sal_maps)\n compare_two_images(error_maps[0], sal_maps[0])\n compare_two_images(error_maps[1], sal_maps[1])\n compare_two_images(error_maps[2], sal_maps[2])\n compare_two_images(error_maps[3], sal_maps[3])\n\n\ndef test_sal(fname, sname):\n imgs = load_array(fname)\n s_map = load_array(sname)\n print(imgs.shape)\n print(s_map.shape)\n\n\ndef test_sals(fname, sname, mname):\n imgs = load_array(fname)\n sal_maps = load_array(sname)\n model = load_model(mname)\n imgs = imgs[:, :, :, np.newaxis]\n train_size = int(math.ceil(len(imgs) * 85 / 100))\n ae_model = Autoencoder(imgs[:train_size], imgs[:train_size], imgs[train_size:], imgs[train_size:], model=model)\n ae_model.train(epochs=1)\n preds = ae_model.predict()\n plt.imshow(preds[0].squeeze())\n plt.show()\n error_maps = np.absolute(preds - imgs[train_size:])\n for i in range(6):\n img = error_maps[i].squeeze()\n plt.imshow(img)\n plt.show()\n\n\ndef main():\n fname = ''\n save_name = 'test_results'\n epochs = 10\n if len(sys.argv) >=2:\n train_name = sys.argv[1]\n if len(sys.argv) >=3:\n test_name = sys.argv[2]\n if len(sys.argv)>=4:\n save_name = sys.argv[3]\n if len(sys.argv)>=5:\n epochs = int(sys.argv[4])\n\n run_trained_autoencoder(train_name, test_name, save_name, train=False)\n #test_sals(train_name, test_name, save_name)\n #run_sketch_autoencoder()\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"407757828","text":"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 设置中文字体\nplt.rcParams['font.family'] = ['SimHei']\n# 设置负号\nplt.rcParams['axes.unicode_minus'] = False\n\n\nn = 1024\nx = np.random.normal(0, 1, n)\ny = np.random.normal(0, 1, n)\n\nplt.scatter(x, y)\n\n# 添加图表标题\nplt.title('绘制散点图')\n\n# 显示图表\nplt.show()","sub_path":"项目实战2:数据可视化与股票数据分析/part01 使用Matplotlib绘制图表/1-7 绘制Matplotlib散点图/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"212477665","text":"from django.db import models\nfrom django.utils.translation import ugettext as _\nimport enum\n# Create your models here.\nclass Status(enum.IntEnum):\n ERROR = 1\n WARNING = 2\n INFO = 3\n SUCCESS = 4\n FLAPPING = 5\n\n\nclass DisplayType(enum.IntEnum):\n WebMonitor = 1\n MonCall = 2\n\n\nclass DisplayCategory(models.Model):\n name = models.CharField(max_length=255, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass DisplayTag(models.Model):\n name = models.CharField(max_length=255, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass DisplayLine(models.Model):\n name = models.CharField(max_length=255, unique=True)\n category = models.ForeignKey(DisplayCategory, blank=True, null=True, on_delete=models.DO_NOTHING)\n tag = models.ManyToManyField(DisplayTag, blank=True)\n type = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in DisplayType))\n last_update = models.DateTimeField(auto_now=True)\n state = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in Status), default=-1)\n\n def __str__(self):\n return self.name\n\nclass DisplayAttribute(models.Model):\n name = models.CharField(max_length=255, help_text=_(\"Name of checking Object\"))\n description = models.TextField(blank=True, null=True)\n detailed_monitoring = models.TextField(blank=True, null=True, help_text=_(\"Monitoring Log\"))\n object_true_state = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in Status))\n object_true_publish = models.BooleanField(default=False)\n object_false_state = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in Status))\n object_false_publish = models.BooleanField(default=False)\n object_flapping_state = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in Status))\n object_flapping_publish = models.BooleanField(default=False)\n state = models.PositiveSmallIntegerField()\n last_update = models.DateTimeField(auto_now=True)\n\n def set_status(self, status):\n\n if (status is True or status is 1 or status is \"success\") and self.object_true_publish is True:\n self.display_line.status = self.object_true_state\n self.display_line.save()\n self.state = 1\n self.save()\n\n if (status is False or status is 0 or status is \"error\" or status is \"failed\") and self.object_false_publish is True:\n self.display_line.status = self.object_false_state\n self.display_line.save()\n self.state = 2\n self.save()\n\n if (status is \"flapping\" or status is -1 or status is None) and self.object_flapping_publish is True:\n self.display_line.status = self.object_flapping_state\n self.display_line.save()\n self.state = 3\n self.save()\n\n class Meta:\n abstract = True\n\n\nclass MonitoringLog(models.Model):\n object_true_state = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in Status))\n object_true_publish = models.BooleanField(default=False)\n object_false_state = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in Status))\n object_false_publish = models.BooleanField(default=False)\n object_flapping_state = models.PositiveSmallIntegerField(choices=((x.value, x.name.title()) for x in Status))\n object_flapping_publish = models.BooleanField(default=False)\n\n log_entry = models.TextField()\n create_timestamp = models.DateTimeField(auto_now=True)","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"328284645","text":"import os\nimport yaml\nimport pytest\nfrom time import time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\nbase_url = os.environ['BASE_URL']\nnot_travis = not('TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true')\n\n\n@pytest.fixture\ndef selenium(selenium):\n selenium.maximize_window()\n return selenium\n\n\n@pytest.fixture\ndef login(selenium):\n creds = yaml.load(open(os.environ['CREDS_YML']))\n selenium.get(base_url)\n selenium.find_element_by_link_text('Login').click()\n selenium.find_element_by_id('id_username').send_keys(creds['username'])\n selenium.find_element_by_id('id_password').send_keys(creds['password'])\n selenium.find_element_by_xpath('//input[@type=\"submit\"]').click()\n assert_body_text(selenium, 'Logout')\n\n\ndef assert_body_text(selenium, *search_texts):\n for search_text in search_texts:\n try:\n WebDriverWait(selenium, 5).until(\n EC.text_to_be_present_in_element(\n (By.TAG_NAME, 'body'), search_text)\n )\n except TimeoutException:\n raise AssertionError(\n '\"%s\" not in body: \\n%s' % (\n search_text,\n selenium.find_element_by_tag_name('body').text\n ))\n\n\n# TESTS:\n\n\ndef test_login_not_required(selenium):\n selenium.get(base_url)\n assert_body_text(selenium, 'Collaboration', 'Statistics', 'About',\n 'Register', 'Login', 'Launch Pad', 'Data Sets',\n 'Analyses', 'Workflows')\n\n selenium.find_element_by_link_text('Statistics').click()\n assert_body_text(selenium, 'Users', 'Groups', 'Files',\n 'Data Sets', 'Workflows', 'Projects')\n\n selenium.find_element_by_link_text('About').click()\n assert_body_text(selenium, 'Background', 'Contact', 'Funding', 'Team',\n 'Most Recent Code for this Instance')\n # TODO: All sections are empty right now\n\n selenium.find_element_by_link_text('Register').click()\n assert_body_text(selenium, 'Sign Up', 'Register for an account',\n 'Indicates a required field',\n 'USERNAME', 'FIRST NAME', 'LAST NAME',\n 'AFFILIATION', 'EMAIL ADDRESS',\n 'PASSWORD (AGAIN)')\n\n selenium.find_element_by_name('username').send_keys('guest')\n selenium.find_element_by_xpath('//input[@type=\"submit\"]').click()\n assert_body_text(selenium, 'Please correct the errors below',\n 'A user with that username already exists',\n 'You must provide a First Name',\n 'You must provide a Last Name',\n 'You must provide an Affiliation',\n 'This field is required')\n\n stamp = str(time()) # Helps prevent collisions when running locally.\n selenium.find_element_by_name('username').send_keys(stamp)\n selenium.find_element_by_name('first_name').send_keys('first')\n selenium.find_element_by_name('last_name').send_keys('last')\n selenium.find_element_by_name('affiliation').send_keys('affiliation')\n selenium.find_element_by_name('email').send_keys('%s@example.org' % stamp)\n selenium.find_element_by_name('password1').send_keys('password')\n selenium.find_element_by_name('password2').send_keys('password')\n\n selenium.find_element_by_xpath('//input[@type=\"submit\"]').click()\n assert_body_text(selenium, 'Registration complete')\n\n if not_travis:\n pytest.set_trace()\n\n\ndef test_upload(selenium, login):\n assert_body_text(selenium, 'Upload', 'Logout')\n\n selenium.find_element_by_link_text('Upload').click()\n assert_body_text(selenium, 'Data Set Import',\n 'Tabular Metadata', 'ISA-Tab Metadata',\n 'PROVIDE METADATA FILE',\n 'Download an example', 'Choose delimiter', 'Select file')\n\n path = os.environ['UPLOAD']\n\n # TODO: File uploads did work in the old UI, but no longer.\n # Can we trigger the event Angular is looking for?\n\n selenium.find_element_by_name('tabular_file').send_keys(path)\n # selenium.execute_script('$(\"[name=tabular_file]\").change()')\n\n # assert_body_text(selenium, 'PREVIEW (5 ROWS)')\n # expected_title = re.sub(r'\\..*$', '', re.sub(r'^.*/', '', path))\n # title_el = selenium.find_element_by_name('title')\n # assert title_el.get_attribute('value') == expected_title\n\n if not_travis:\n pytest.set_trace()\n","sub_path":"selenium/basic_test.py","file_name":"basic_test.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"368813004","text":"import tensorflow as tf\nimport numpy as np\nimport sys, os,cv2\nfrom sklearn.utils import shuffle\nfrom scipy.misc import imread,imresize\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import OneHotEncoder\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nimport imgaug as ia\n\nplt.style.use('seaborn-white')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nnp.random.seed(678)\ntf.set_random_seed(678)\nia.seed(678)\n\n# activation functions\ndef tf_elu(x): return tf.nn.elu(x)\ndef d_tf_elu(x): return tf.cast(tf.greater(x,0),tf.float32) + ( tf_elu(tf.cast(tf.less_equal(x,0),tf.float32) * x) + 1.0)\ndef tf_softmax(x): return tf.nn.softmax(x)\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n# code from: https://github.com/tensorflow/tensorflow/issues/8246\ndef tf_repeat(tensor, repeats):\n \"\"\"\n Args:\n\n input: A Tensor. 1-D or higher.\n repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input\n\n Returns:\n \n A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats\n \"\"\"\n expanded_tensor = tf.expand_dims(tensor, -1)\n multiples = [1] + repeats\n tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)\n repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)\n return repeated_tesnor\n\n# data aug\nseq = iaa.Sequential([\n iaa.Sometimes(0.5,\n iaa.Affine(\n translate_percent={\"x\": (-0.3, 0.3), \"y\": (-0.3, 0.3)},\n rotate=(-10, 10),\n scale={\"x\": (0.5, 1.1), \"y\": (0.5, 1.1)},\n )\n ),\n iaa.Fliplr(1.0), # Horizonatl flips\n], random_order=True) # apply augmenters in random order\n\n# class\nclass CNN():\n \n def __init__(self,k,inc,out,stddev):\n self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=stddev))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n self.v_hat_prev = tf.Variable(tf.zeros_like(self.w))\n\n def getw(self): return self.w\n\n def feedforward(self,input,stride=1,padding='SAME'):\n self.input = input\n self.layer = tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding) \n self.layerA = tf_elu(self.layer)\n return self.layerA \n\n def backprop(self,gradient,learning_rate_change,stride=1,padding='SAME'):\n grad_part_1 = gradient \n grad_part_2 = d_tf_elu(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n\n grad = tf.nn.conv2d_backprop_filter(input = grad_part_3,filter_sizes = self.w.shape,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n grad_pass = tf.nn.conv2d_backprop_input(input_sizes = [batch_size] + list(grad_part_3.shape[1:]),filter= self.w,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n update_w = []\n update_w.append(\n tf.assign( self.m,self.m*beta1 + (1-beta1) * grad )\n )\n v_t = self.v_prev *beta2 + (1-beta2) * grad ** 2 \n\n def f1(): return v_t\n def f2(): return self.v_hat_prev\n\n v_max = tf.cond(tf.greater(tf.reduce_sum(v_t), tf.reduce_sum(self.v_hat_prev) ) , true_fn=f1, false_fn=f2)\n adam_middel = tf.multiply(learning_rate_change/(tf.sqrt(v_max) + adam_e),self.m)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,adam_middel ) ))\n update_w.append(tf.assign( self.v_prev,v_t ))\n update_w.append(tf.assign( self.v_hat_prev,v_max )) \n return grad_pass,update_w \n\n# Def: Simple function to show 9 image with different channels\ndef show_9_images(image,layer_num=None,image_num=None,channel_increase=3,alpha=None,image_index=None,gt=None,predict=None):\n image = (image-image.min())/(image.max()-image.min())\n fig = plt.figure()\n color_channel = 0\n for i in range(1,10):\n ax = fig.add_subplot(3,3,i)\n ax.grid(False)\n ax.set_xticks([])\n ax.set_yticks([])\n if alpha:\n ax.set_title(\"GT: \"+str(gt[i-1])+\" Predict: \"+str(predict[i-1]))\n else:\n ax.set_title(\"Channel : \" + str(color_channel) + \" : \" + str(color_channel+channel_increase-1))\n ax.imshow(np.squeeze(image[:,:,color_channel:color_channel+channel_increase]))\n color_channel = color_channel + channel_increase\n \n if alpha:\n plt.savefig('viz/y_'+str(image_index)+ \"_\" +str(alpha) + \"_alpha_image.png\")\n else:\n plt.savefig('viz/'+str(layer_num) + \"_layer_\"+str(image_num)+\"_image.png\")\n plt.close('all')\n\n# data\nPathDicom = \"../../Dataset/cifar-10-batches-py/\"\nlstFilesDCM = [] # create an empty list\nfor dirName, subdirList, fileList in os.walk(PathDicom):\n for filename in fileList:\n if not \".html\" in filename.lower() and not \".meta\" in filename.lower(): # check whether the file's DICOM\n lstFilesDCM.append(os.path.join(dirName,filename))\n\n# Read the data traind and Test\nbatch0 = unpickle(lstFilesDCM[0])\nbatch1 = unpickle(lstFilesDCM[1])\nbatch2 = unpickle(lstFilesDCM[2])\nbatch3 = unpickle(lstFilesDCM[3])\nbatch4 = unpickle(lstFilesDCM[4])\n\nonehot_encoder = OneHotEncoder(sparse=True)\ntrain_batch = np.vstack((batch0[b'data'],batch1[b'data'],batch2[b'data'],batch3[b'data'],batch4[b'data']))\ntrain_label = np.expand_dims(np.hstack((batch0[b'labels'],batch1[b'labels'],batch2[b'labels'],batch3[b'labels'],batch4[b'labels'])).T,axis=1).astype(np.float32)\ntrain_label = onehot_encoder.fit_transform(train_label).toarray().astype(np.float32)\ntest_batch = unpickle(lstFilesDCM[5])[b'data']\ntest_label = np.expand_dims(np.array(unpickle(lstFilesDCM[5])[b'labels']),axis=0).T.astype(np.float32)\ntest_label = onehot_encoder.fit_transform(test_label).toarray().astype(np.float32)\n# reshape data / # rotate data\ntrain_batch = np.reshape(train_batch,(len(train_batch),3,32,32))\ntest_batch = np.reshape(test_batch,(len(test_batch),3,32,32))\ntrain_batch = np.rot90(np.rot90(train_batch,1,axes=(1,3)),3,axes=(1,2))\ntest_batch = np.rot90(np.rot90(test_batch,1,axes=(1,3)),3,axes=(1,2))\n\n# print out the data shape\nprint(train_batch.shape)\nprint(train_label.shape)\nprint(test_batch.shape)\nprint(test_label.shape)\n\ntest_label = test_label[50:100,:]\ntest_batch = test_batch[50:100,:,:,:]\n\n# simple normalize\ntrain_batch = train_batch/255.0\ntest_batch = test_batch/255.0\n\n# hyper parameter\nnum_epoch = 8\nbatch_size = 50\nprint_size = 1\n\nlearning_rate = 0.0005\nlearnind_rate_decay = 0.0\nbeta1,beta2,adam_e = 0.9,0.9,1e-8\n\n# define class\nchannel_sizes = 128\nl1 = CNN(3,3,channel_sizes,stddev=0.04)\nl2 = CNN(3,channel_sizes,channel_sizes,stddev=0.05)\nl3 = CNN(3,channel_sizes,channel_sizes,stddev=0.06)\n\nl4 = CNN(3,channel_sizes,channel_sizes,stddev=0.04)\nl5 = CNN(3,channel_sizes,channel_sizes,stddev=0.05)\nl6 = CNN(3,channel_sizes,channel_sizes,stddev=0.06)\n\nl7 = CNN(3,channel_sizes,channel_sizes,stddev=0.06)\nl8 = CNN(1,channel_sizes,channel_sizes,stddev=0.05)\nl9 = CNN(1,channel_sizes,10,stddev=0.04)\n\nall_weights = [l1.getw(),l2.getw(),l3.getw(),l4.getw(),l5.getw(),l6.getw(),l7.getw(),l8.getw(),l9.getw()]\n\n# graph\nx = tf.placeholder(shape=[batch_size,32,32,3],dtype=tf.float32)\ny = tf.placeholder(shape=[batch_size,10],dtype=tf.float32)\n\niter_variable = tf.placeholder(tf.float32, shape=())\nlearning_rate_dynamic = tf.placeholder(tf.float32, shape=())\nlearning_rate_change = learning_rate_dynamic * (1.0/(1.0+learnind_rate_decay*iter_variable))\nphase = tf.placeholder(tf.bool)\n\nlayer1 = l1.feedforward(x,padding='SAME')\nlayer2 = l2.feedforward(layer1,padding='SAME')\nlayer3 = l3.feedforward(layer2,padding='SAME')\n\nlayer4_Input = tf.nn.avg_pool(layer3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\nlayer4 = l4.feedforward(layer4_Input,padding='SAME')\nlayer5 = l5.feedforward(layer4,padding='SAME')\nlayer6 = l6.feedforward(layer5,padding='SAME')\n\nlayer7_Input = tf.nn.avg_pool(layer6,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\nlayer7 = l7.feedforward(layer7_Input,padding='SAME')\nlayer8 = l8.feedforward(layer7,padding='VALID')\nlayer9 = l9.feedforward(layer8,padding='VALID')\n\nfinal_global = tf.reduce_mean(layer9,[1,2])\nfinal_soft = tf_softmax(final_global)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=final_global,labels=y) )\ncorrect_prediction = tf.equal(tf.argmax(final_soft, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\ngrad_prepare = tf_repeat(tf.reshape(final_soft-y,[batch_size,1,1,10]),[1,8,8,1])\ngrad9,grad9_up = l9.backprop(grad_prepare,learning_rate_change=learning_rate_change,padding='VALID')\ngrad8,grad8_up = l8.backprop(grad9,learning_rate_change=learning_rate_change,padding='VALID')\ngrad7,grad7_up = l7.backprop(grad8,learning_rate_change=learning_rate_change)\n\ngrad6_Input = tf_repeat(grad7,[1,2,2,1])\ngrad6,grad6_up = l6.backprop(grad6_Input,learning_rate_change=learning_rate_change)\ngrad5,grad5_up = l5.backprop(grad6,learning_rate_change=learning_rate_change)\ngrad4,grad4_up = l4.backprop(grad5,learning_rate_change=learning_rate_change)\n\ngrad3_Input = tf_repeat(grad4,[1,2,2,1])\ngrad3,grad3_up = l3.backprop(grad3_Input,learning_rate_change=learning_rate_change)\ngrad2,grad2_up = l2.backprop(grad3,learning_rate_change=learning_rate_change)\ngrad1,grad1_up = l1.backprop(grad2,learning_rate_change=learning_rate_change)\n\ngrad_update = grad9_up + grad8_up + grad7_up + \\\n grad6_up + grad5_up + grad4_up + \\\n grad3_up + grad2_up + grad1_up \n\n# sess\nwith tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n \n def show_hist_of_weigt(all_weight_list,status='before'):\n fig = plt.figure()\n weight_index = 0\n for i in range(1,4):\n ax = fig.add_subplot(1,3,i)\n ax.grid(False)\n temp_weight_list = all_weight_list[weight_index:weight_index+3]\n for temp_index in range(len(temp_weight_list)):\n current_flat = temp_weight_list[temp_index].flatten()\n ax.hist(current_flat,histtype='step',bins='auto',label=str(temp_index+weight_index))\n ax.legend()\n ax.set_title('From Layer : '+str(weight_index+1)+' to '+str(weight_index+3))\n weight_index = weight_index + 3\n plt.savefig('viz/z_weights_'+str(status)+\"_training.png\")\n plt.close('all')\n\n # ------- histogram of weights before training ------\n show_hist_of_weigt(sess.run(all_weights),status='before')\n # ------- histogram of weights before training ------\n\n train_cota,train_acca = 0,0\n train_cot,train_acc = [],[]\n \n test_cota,test_acca = 0,0\n test_cot,test_acc = [],[]\n\n # start the training\n for iter in range(num_epoch):\n\n train_batch,train_label = shuffle(train_batch,train_label)\n\n for batch_size_index in range(0,len(train_batch),batch_size//2):\n current_batch = train_batch[batch_size_index:batch_size_index+batch_size//2]\n current_batch_label = train_label[batch_size_index:batch_size_index+batch_size//2]\n\n # online data augmentation here \n images_aug1 = seq.augment_images(current_batch.astype(np.float32))\n current_batch = np.vstack((current_batch,images_aug1)).astype(np.float32)\n current_batch_label = np.vstack((current_batch_label,current_batch_label)).astype(np.float32)\n current_batch,current_batch_label = shuffle(current_batch,current_batch_label)\n # online data augmentation here \n\n sess_result = sess.run([cost,accuracy,correct_prediction,grad_update],\n feed_dict={x:current_batch,y:current_batch_label,iter_variable:iter,learning_rate_dynamic:learning_rate,phase:True})\n print(\"Current Iter : \",iter, \" current batch: \",batch_size_index, ' Current cost: ', sess_result[0],' Current Acc: ', sess_result[1],end='\\r')\n train_cota = train_cota + sess_result[0]\n train_acca = train_acca + sess_result[1]\n \n for test_batch_index in range(0,len(test_batch),batch_size):\n current_batch = test_batch[test_batch_index:test_batch_index+batch_size].astype(np.float32)\n current_batch_label = test_label[test_batch_index:test_batch_index+batch_size].astype(np.float32)\n sess_result = sess.run([cost,accuracy,correct_prediction],\n feed_dict={x:current_batch,y:current_batch_label,iter_variable:iter,phase:False})\n print(\"Current Iter : \",iter, \" current batch: \",test_batch_index, ' Current cost: ', sess_result[0],' Current Acc: ', sess_result[1],end='\\r')\n test_acca = sess_result[1] + test_acca\n test_cota = sess_result[0] + test_cota\n\n if iter % print_size==0:\n print(\"\\n---------- Learning Rate : \", learning_rate * (1.0/(1.0+learnind_rate_decay*iter)) )\n print('Train Current cost: ', train_cota/(len(train_batch)/(batch_size//2)),' Current Acc: ', train_acca/(len(train_batch)/(batch_size//2) ),end='\\n')\n print('Test Current cost: ', test_cota/(len(test_batch)/batch_size),' Current Acc: ', test_acca/(len(test_batch)/batch_size),end='\\n')\n print(\"----------\")\n\n train_acc.append(train_acca/(len(train_batch)/(batch_size//2)))\n train_cot.append(train_cota/(len(train_batch)/(batch_size//2)))\n test_acc.append(test_acca/(len(test_batch)/batch_size))\n test_cot.append(test_cota/(len(test_batch)/batch_size))\n test_cota,test_acca = 0,0\n train_cota,train_acca = 0,0\n\n # Normalize the cost of the training\n train_cot = (train_cot-min(train_cot) ) / (max(train_cot)-min(train_cot))\n test_cot = (test_cot-min(test_cot) ) / (max(test_cot)-min(test_cot))\n\n # plot the training and testing graph\n plt.figure()\n plt.plot(range(len(train_acc)),train_acc,color='red',label='acc ovt')\n plt.plot(range(len(train_cot)),train_cot,color='green',label='cost ovt')\n plt.legend()\n plt.title(\"Train Average Accuracy / Cost Over Time\")\n plt.savefig(\"viz/z_Case Train.png\")\n plt.close('all')\n\n plt.figure()\n plt.plot(range(len(test_acc)),test_acc,color='red',label='acc ovt')\n plt.plot(range(len(test_cot)),test_cot,color='green',label='cost ovt')\n plt.legend()\n plt.title(\"Test Average Accuracy / Cost Over Time\")\n plt.savefig(\"viz/z_Case Test.png\")\n plt.close('all')\n\n # ------- histogram of weights after training ------\n show_hist_of_weigt(sess.run(all_weights),status='After')\n # ------- histogram of weights after training ------\n\n\n\n # ------ layer wise activation -------\n layer3_values = sess.run(layer3,feed_dict={x:test_batch})\n for immage_index in range(10):\n show_9_images(layer3_values[immage_index,:,:,:],3,immage_index)\n\n layer6_values = sess.run(layer6,feed_dict={x:test_batch})\n for immage_index in range(10):\n show_9_images(layer6_values[immage_index,:,:,:],6,immage_index)\n\n layer9_values = sess.run(layer9,feed_dict={x:test_batch})\n for immage_index in range(10):\n show_9_images(layer9_values[immage_index,:,:,:],9,immage_index,channel_increase=1)\n # ------ layer wise activation -------\n\n # -------- Interior Gradients -----------\n # portion of code from: https://github.com/ankurtaly/Integrated-Gradients/blob/master/attributions.ipynb\n final_prediction_argmax = None\n final_gt_argmax = None\n def gray_scale(img):\n img = np.average(img, axis=2)\n return np.transpose([img, img, img], axes=[1,2,0])\n\n def normalize(attrs, ptile=99):\n h = np.percentile(attrs, ptile)\n l = np.percentile(attrs, 100-ptile)\n return np.clip(attrs/max(abs(h), abs(l)), -1.0, 1.0) \n\n for alpha_values in [0.01, 0.02, 0.03, 0.04,0.1, 0.5, 0.6, 0.7, 0.8, 1.0]:\n\n # create the counterfactual input and feed it to get the gradient\n test_batch_a = test_batch * alpha_values\n sess_result = sess.run([cost,accuracy,correct_prediction,final_soft,grad1],feed_dict={x:test_batch_a,y:test_label,iter_variable:1.0,phase:False})\n \n # get the final prediction and the ground truth\n final_prediction_argmax = list(np.argmax(sess_result[3],axis=1))[:9]\n final_gt_argmax = list(np.argmax(test_label,axis=1))[:9]\n\n # get the gradients\n returned_gradient_batch = sess_result[4]\n aggregated_gradient = np.expand_dims(np.average(returned_gradient_batch,axis=3),axis=3)\n attrs = abs(np.repeat(aggregated_gradient,3,axis=3))\n attrs = np.clip(attrs/np.percentile(attrs, 99), 0,1)\n\n # interior grad\n interrior_grad = test_batch * attrs\n stacked_grad = interrior_grad[0,:,:,:]\n for indexing in range(1,9):\n stacked_grad = np.vstack((stacked_grad.T,interrior_grad[indexing,:,:,:].T)).T\n \n # show\n show_9_images(stacked_grad,alpha=alpha_values,gt=final_gt_argmax,predict=final_prediction_argmax,image_index='1')\n\n # overlay interior gradient\n image_gray = np.expand_dims(np.average(test_batch,axis=3),axis=3)[:9,:,:,:]\n grad_norm = np.expand_dims(normalize(gray_scale(aggregated_gradient[0,:,:,:])),0)\n for indexing in range(1,9):\n current_image_norm = np.expand_dims(normalize(gray_scale(aggregated_gradient[indexing,:,:,:])),0)\n grad_norm = np.vstack((grad_norm,current_image_norm))\n\n pos_attrs = grad_norm * (grad_norm >= 0.0)\n neg_attrs = -1.0 * grad_norm * (grad_norm < 0.0) \n\n # overlayer\n red_channel = np.zeros_like(grad_norm)\n red_channel[:,:,:,0] = 1.0\n\n blue_channel = np.zeros_like(grad_norm)\n blue_channel[:,:,:,2] = 1.0\n\n attrs_mask = pos_attrs*blue_channel + neg_attrs*red_channel\n vis = 0.6*image_gray + 0.4*attrs_mask\n\n stacked_grad2 = vis[0,:,:,:]\n for indexing in range(1,9):\n stacked_grad2 = np.vstack((stacked_grad2.T,vis[indexing,:,:,:].T)).T\n\n # show\n show_9_images(stacked_grad2,alpha=alpha_values,gt=final_gt_argmax,predict=final_prediction_argmax,image_index='2')\n # -------- Interior Gradients -----------\n\n # -------- Intergral Gradients ----------\n base_line = test_batch * 0.0\n difference = test_batch - base_line\n step_size = 3000\n\n running_example = test_batch * 0.0\n for rim in range(1,step_size+1):\n current_alpha = rim / step_size\n test_batch_a = current_alpha * test_batch\n sess_result = sess.run([cost,accuracy,correct_prediction,final_soft,grad1],feed_dict={x:test_batch_a,y:test_label,iter_variable:1.0,phase:False})\n running_example = running_example + sess_result[4]\n final_prediction_argmax = list(np.argmax(sess_result[3],axis=1))[:9]\n\n running_example = running_example * difference\n attrs = np.expand_dims(np.average(running_example,axis=3),axis=3)\n attrs = abs(np.repeat(attrs,3,axis=3))\n attrs = np.clip(attrs/np.percentile(attrs, 99), 0,1)\n\n # Intergral grad\n Intergral_grad = test_batch * attrs\n stacked_grad = Intergral_grad[0,:,:,:]\n for indexing in range(1,9):\n stacked_grad = np.vstack((stacked_grad.T,Intergral_grad[indexing,:,:,:].T)).T\n\n # show\n show_9_images(stacked_grad,alpha=-99,gt=final_gt_argmax,predict=final_prediction_argmax,image_index='1') \n\n # overlay Intergral gradient\n image_gray = np.expand_dims(np.average(test_batch,axis=3),axis=3)[:9,:,:,:]\n grad_norm = np.expand_dims(normalize(gray_scale(running_example[0,:,:,:])),0)\n for indexing in range(1,9):\n current_image_norm = np.expand_dims(normalize(gray_scale(aggregated_gradient[indexing,:,:,:])),0)\n grad_norm = np.vstack((grad_norm,current_image_norm))\n\n pos_attrs = grad_norm * (grad_norm >= 0.0)\n neg_attrs = -1.0 * grad_norm * (grad_norm < 0.0) \n\n # overlayer\n red_channel = np.zeros_like(grad_norm)\n red_channel[:,:,:,0] = 1.0\n\n blue_channel = np.zeros_like(grad_norm)\n blue_channel[:,:,:,2] = 1.0\n\n attrs_mask = pos_attrs*blue_channel + neg_attrs*red_channel\n vis = 0.6*image_gray + 0.4*attrs_mask\n\n stacked_grad2 = vis[0,:,:,:]\n for indexing in range(1,9):\n stacked_grad2 = np.vstack((stacked_grad2.T,vis[indexing,:,:,:].T)).T\n\n # show\n show_9_images(stacked_grad2,alpha=-99,gt=final_gt_argmax,predict=final_prediction_argmax,image_index='2')\n # -------- Intergral Gradients ----------\n\n\n# -- end code --","sub_path":"Understanding_Concepts/COUNTERFACTUALS/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":20472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"435280724","text":"def sub_lists(list1): \n \n # store all the sublists \n sublist = [[]] \n list1=[int(i)for i in list1] \n # first loop \n for i in range(len(list1) + 1): \n \n # second loop \n for j in range(i + 1, len(list1) + 1): \n \n # slice the subarray \n sub = list1[i:j] \n sublist.append(sub) \n \n \n return sublist\nx=int(input())\nlst=[int(i) for i in input().split()]\ny=sub_lists(lst)\nsum1=sum(y[0])\nfor j in range(1,len(y)):\n if sum1>sum(y[j]):\n sum1=sum(y[j])\nprint(sum1)\n","sub_path":"min sublist sum.py","file_name":"min sublist sum.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"290655547","text":"import re\n\n\nclass BitField(object):\n def __init__(self, enum):\n if not all(isinstance(member.value, int) for member in enum):\n msg = 'Enum {} members must have integer values'.format(repr(enum))\n raise TypeError(msg)\n try:\n assert enum(0)\n msg = 'Cannot construct BitField from {} with a value for 0: {}'\n raise TypeError(msg.format(repr(enum), enum(0)))\n except ValueError:\n # A ValueError is raised if the enum does not have a value for 0\n pass\n self.enum = enum\n\n def __repr__(self):\n return 'BitField({})'.format(self.enum)\n\n def __str__(self):\n return 'BitField({})'.format(self.enum)\n\n def pack(self, arg):\n \"\"\"\n Take a list (or single value) and bitwise-or all the values together\n \"\"\"\n if arg:\n # Handle a variety of inputs: list or single, enum or raw\n if isinstance(arg, list):\n arg_list = arg\n else:\n arg_list = [arg]\n\n # To make usage a bit nice/easier if the elements of the list\n # are strings assume that they are enum names and attempt to\n # convert them to the correct enumeration values.\n value = 0\n for item in arg_list:\n if isinstance(item, self.enum):\n value |= item.value\n elif isinstance(item, str):\n try:\n value |= getattr(self.enum, item).value\n except AttributeError:\n enum_name = re.match(r\"\", str(self.enum)).group(1)\n msg = '{} is not a valid {}'.format(item, enum_name)\n raise ValueError(msg)\n else:\n # Assume that the item is an integer value, convert it to\n # an enum value to ensure it is a valid value for this\n # bitfield.\n value |= self.enum(item).value\n\n return value\n else:\n return 0\n\n def unpack(self, val):\n \"\"\"\n Take a single number and split it out into all values that are present\n \"\"\"\n return frozenset(e for e in self.enum if e.value & val)\n\n def make(self, arg):\n \"\"\"\n Take an input list and return a frozenset\n\n useful for testing\n \"\"\"\n if arg:\n # Handle the same inputs as the pack function\n if isinstance(arg, list):\n values = [self.enum(value) for value in arg]\n else:\n values = [self.enum(arg)]\n else:\n values = []\n\n # return this list as a frozenset\n return frozenset(values)\n","sub_path":"starstruct/bitfield.py","file_name":"bitfield.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"78219567","text":"# complex structure\nperson1 = {\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"age\": 30,\n \"hair_color\": \"Green\",\n \"eyes_color\": \"Brown\",\n \"weight\": 95.4\n}\nperson2 = {\n \"first_name\": \"Artur\",\n \"last_name\": \"Doe\",\n \"age\": 45,\n \"hair_color\": \"Green\",\n \"eyes_color\": \"Brown\",\n \"weight\": 75.4\n}\n\n\ndef get_full_name(person: dict):\n return f\"{person['first_name']} {person['last_name']}\"\n\n\ndef show_info(person: dict):\n print(\"Person:\", get_full_name(person), \"age: \")\n\n\ndef show_all_persons_info(persons: list):\n for x in persons:\n show_info(x)\n\n\n# function to work with it\nshow_info(person1)\nshow_info(person2)\n\n# work with dict global list\nperson_list = [person1, person2]\n\nshow_all_persons_info(person_list)\n","sub_path":"lesson06/01-dicts.py","file_name":"01-dicts.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"568229071","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom gluon import *\nfrom plugin_ckeditor import CKEditor\n\n# def upload():\n # (new_filename, old_filename, length, mime_type) = current.plugin_ckeditor.handle_upload()\n \n # title = os.path.splitext(old_filename)[0]\n \n # result = current.plugin_ckeditor.settings.table_upload.validate_and_insert(\n # title = title,\n # filename = old_filename,\n # upload = new_filename,\n # flength = length,\n # mime_type = mime_type\n # )\n \n # text = ''\n # url = URL(*current.plugin_ckeditor.settings.download_url, args=[new_filename]) #URL('default', 'download', args=[new_filename])\n \n # if not result.id:\n # text = result.errors\n\t# print url\n # return dict(text=text, cknum=request.vars.CKEditorFuncNum, url=url)\n\t\ndef upload():\n\timport os, uuid\n\tfrom PIL import Image\n\tsize=(600,400)\n\t(new_filename, old_filename, length, mime_type) = current.plugin_ckeditor.handle_upload()\n\ttitle = os.path.splitext(old_filename)[0]\n\text = new_filename.split('.')[-1].lower()\n\tthumbName = new_filename\n\tif ext!='gif':\n\t\tim=Image.open(request.folder + 'static/uploads/ckeditor/' + new_filename)\n\t\tim.thumbnail(size,Image.ANTIALIAS)\n\t\tthumbName='images.thumb.%s.jpg' % (uuid.uuid4())\n\t\t\n\t\tim.save(request.folder + 'static/uploads/ckeditor/' + thumbName,'jpeg')\n\t\tpath = os.path.join(request.folder + 'static/uploads/ckeditor/' + new_filename)\n\t\tos.unlink(path)\n\tresult = current.plugin_ckeditor.settings.table_upload.validate_and_insert(title = title,filename = old_filename,upload = thumbName,flength = length,mime_type = mime_type)\n\ttext = ''\n\turl = URL('static', 'uploads/ckeditor', args=[thumbName])\n\tif not result.id:\n\t\ttext = result.errors\n\treturn dict(text=text, cknum=request.vars.CKEditorFuncNum, url=url)\n \ndef browse():\n db = current.plugin_ckeditor.db\n table_upload = current.plugin_ckeditor.settings.table_upload\n browse_filter = current.plugin_ckeditor.settings.browse_filter\n set = db(table_upload.id>0)\n for key, val in browse_filter.items():\n if value[0] == '<':\n set = set(table_upload[key]':\n set = set(table_upload[key]>value[1:])\n elif value[0] == '!':\n set = set(table_upload[key]!=value[1:])\n else:\n set = set(table_upload[key]==value)\n \n rows = set.select(orderby=table_upload.title)\n \n return dict(rows=rows, cknum=request.vars.CKEditorFuncNum)\n \ndef delete():\n filename = request.args(0)\n if not filename:\n raise HTTP(401, 'Required argument filename missing.')\n \n db = current.plugin_ckeditor.db\n table_upload = current.plugin_ckeditor.settings.table_upload\n db(table_upload.upload==filename).delete()\n \n # delete the file from storage\n path = os.path.join(request.folder, 'uploads', filename)\n os.unlink(path)\n\n","sub_path":"controllers/plugin_ckeditor.py","file_name":"plugin_ckeditor.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"95008575","text":"# filename: testvirus.py\r\n# encoding: utf-8\r\n\r\nimport urllib2\r\n_VIRUS_FILE_PATH = \"c:/tmp/nqvirus2.txt\"\r\n\r\ndef excute():\r\n url = 'http://192.168.8.2/s3c/virus_dbs/download/1'\r\n res = urllib2.urlopen(url)\r\n content = res.read()\r\n with file(_VIRUS_FILE_PATH,'w') as f:\r\n f.write(content)\r\n \r\nexcute()","sub_path":" jdemoprojects --username zhb1208/JPythonStudy/url/testvirus.py","file_name":"testvirus.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"197082137","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom skimage.morphology import reconstruction\nfrom skimage import img_as_ubyte\nfrom skimage import img_as_float\n\n\ndef get_green(img):\n G = img[:, :, 1]\n return G\n\n\ndef get_red(img):\n R = img[:, :, 0]\n return R\n\n\ndef get_a(im):\n lab = cv2.cvtColor(im, cv2.COLOR_RGB2LAB)\n A = lab[:, :, 1]\n return A\n\n\ndef calc_asf(im):\n asf = im.copy()\n\n for i in range(3, 70, 2):\n asf = cv2.morphologyEx(asf, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (i, i)), iterations=1)\n asf = cv2.morphologyEx(asf, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (i, i)), iterations=1)\n\n return asf\n\n\ndef shade_cor(im, k):\n asf = calc_asf(im)\n result = (im - asf) + k\n return result\n\n\ndef morph_close(im, i):\n result = cv2.morphologyEx(im, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (i, i)), iterations=1)\n return result\n\n\ndef morph_open(im, i):\n result = cv2.morphologyEx(im, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (i, i)), iterations=1)\n return result\n\n\ndef calculate_hist(img):\n hist, bins = np.histogram(img.flatten(), 256, [0, 256])\n\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max() / cdf.max() # this line not necessary.\n\n plt.plot(cdf_normalized, color='b')\n plt.xlim([0, 256])\n plt.legend(('cdf', 'histogram'), loc='upper left')\n #plt.show()\n\n\ndef threshold(img, t):\n retval, thresholded = cv2.threshold(img, t, 255, cv2.THRESH_BINARY_INV)\n return thresholded\n\n\ndef threshold_bin(img, t):\n retval, thresholded = cv2.threshold(img, t, 255, cv2.THRESH_BINARY)\n return thresholded\n\n\ndef calc_centroid(img):\n rows, cols = img.shape\n sum1 = 0\n sum2 = 0\n sum3 = 0\n for i in range(rows):\n for j in range(cols):\n sum1 += img[i,j]*i\n sum2 += img[i,j]\n sum3 += img[i,j]*j\n\n xc = sum1//sum2\n yc = sum3//sum2\n return xc, yc\n\n\ndef gradient(img):\n str_el = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n gr = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, str_el, iterations=1)\n dil = cv2.dilate(img, str_el, iterations=1)\n er = cv2.erode(img, str_el, iterations=1)\n resu = dil-er\n return gr\n\n\ndef watershed(img, cX, cY, org_img):\n grad = img.copy()\n ext_marker = org_img.copy()\n rows, cols = ext_marker.shape\n for i in range(rows):\n for j in range(cols):\n ext_marker[i,j] = 0\n ext_marker[cX, cY] = 125\n ext_marker = cv2.circle(ext_marker, (cY, cX), 45, (255, 255, 255), 2, 2)\n lbl = ext_marker.astype(np.int32)\n img2 = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n wat = cv2.watershed(img2, lbl)\n cor_wat = wat.astype(np.uint8)\n\n cv2.imwrite('watershed.jpg', wat)\n img_w = cv2.imread('watershed.jpg')\n return img_w\n\n\ndef inside_circle(cX, cY, x, y, r):\n return (x - cX)*(x - cX) + (y - cY)*(y - cY) < r*r\n\n\ndef get_marker(img, cX, cY):\n rows, cols = img.shape\n for x in range(rows):\n for y in range(cols):\n if inside_circle(cX, cY, x, y, 80):\n img[x,y] = 0\n\n return img\n\n\ndef calc_dif(img1, img2):\n diff = cv2.subtract(img1, img2)\n return diff\n\n\ndef reconstruct(img, mask):\n img_fl = img_as_float(img)\n mask_fl = img_as_float(mask)\n\n recons = reconstruction(mask_fl, img_fl)\n cv_image = img_as_ubyte(recons)\n return cv_image\n\n\ndef find_cont(img, org_img):\n img1 = org_img.copy()\n img2 = org_img.copy()\n img_c = img.copy()\n imC, contours, hierarchy = cv2.findContours(img_c, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n cv2.drawContours(org_img, contours, -1, (0, 0, 0), 1)\n return org_img\n\n\ndef show_cup(img, cup):\n cup_op = morph_open(cup, 3)\n cup_cl = morph_close(cup_op, 3)\n thr_cup = threshold(cup_cl, 150)\n cup_img = cv2.cvtColor(thr_cup, cv2.COLOR_BGR2GRAY);\n imC, contours, hierarchy = cv2.findContours(cup_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n cv2.drawContours(img, contours, -1, (0, 0, 0), 1)\n return img\n\n\ndef resize(img, scale):\n width = int(img.shape[1] * scale / 100)\n height = int(img.shape[0] * scale / 100)\n dim = (width, height)\n resized = cv2.resize(img, dim)\n return resized\n\n\ndef glaucoma(img):\n res = resize(img, 30)\n org_image_copy = res.copy()\n # cup region\n chan_A = get_a(res)\n img_copy = chan_A.copy()\n cor = shade_cor(chan_A, 150)\n cl = morph_close(cor, 20)\n calculate_hist(cl)\n (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(cl)\n print(minVal)\n thres = threshold(cl, minVal)\n cX, cY = calc_centroid(thres)\n grad = gradient(cl)\n wshed = watershed(grad, cX, cY, img_copy)\n cup = show_cup(org_image_copy, wshed)\n # optic disc\n chan_G_before = get_green(res)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n chan_G = clahe.apply(chan_G_before)\n g_copy = chan_G.copy()\n marker = get_marker(chan_G, cX, cY)\n rec = reconstruct(g_copy, marker)\n dif = calc_dif(g_copy, rec)\n thrs_disc = threshold_bin(dif, 5)\n opDisc = morph_open(thrs_disc, 3)\n clDisc = morph_close(opDisc, 27)\n contours = find_cont(clDisc, org_image_copy)\n cv2.imwrite('03_g_result.jpg', contours)\n\n\nimg = cv2.imread('03_g.jpg')\nglaucoma(img)\n\n\n","sub_path":"glaucoma.py","file_name":"glaucoma.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"637335916","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLog Toolkit\n~~~~~~~~~~~\nSeries of tools designed to control the logger\n\n:Copyright: (c) 2017 DELL Inc. or its subsidiaries. All Rights Reserved.\n:License: Apache 2.0, see LICENSE for more details.\n:Author: Akash Kwatra\n\nCreated on June 23, 2017\n\"\"\"\n\nimport logging.config\n\nERROR_HEADER = \"-----------------------------ERROR-----------------------------\"\n\n###################################################################################################\n# Configure Data\n###################################################################################################\n\ndef configure_logger_from_yaml(path):\n \"\"\"Attempts to configure root logger from given YAML file\"\"\"\n import yaml\n try:\n with open(path, 'r') as stream:\n config = yaml.load(stream)\n logging.config.dictConfig(config)\n except (FileNotFoundError, yaml.YAMLError) as exc:\n print(\"Could not load logger configuraton from YAML file :: {}\".format(exc))\n\n###################################################################################################\n# Error Logger\n###################################################################################################\n\ndef exception(logger):\n \"\"\"\n Return decorator to log exceptions using the specified logger\n \"\"\"\n def decorator(func):\n \"\"\"Decorate function with a try catch and a log record\"\"\"\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except:\n logger.exception(\"Exception in %s\\n%s\\n\", func.__name__, ERROR_HEADER)\n raise\n return wrapper\n return decorator\n","sub_path":"smi_tests/resttestms/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"216268224","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport json as j\nimport subprocess\nimport click\n\ndef get_yc_json() -> str:\n \"\"\"\n Запускает установленный и настроенный yc кли\n интерфейс для яндекс облока, получает на выходе json.\n \"\"\"\n command_get_nodes = ['yc', 'compute', 'instance', 'list', '--format', 'json']\n result = subprocess.run(command_get_nodes, stdout=subprocess.PIPE)\n return j.loads(result.stdout)\n\n\ndef get_group_list(json) -> list:\n \"\"\"\n Получает на вход json от yc\n Возвращает list со списком групп из json yc\n Имя группы берется из тега ansible_group\n \"\"\"\n group_list = []\n for host_data in json:\n name = host_data['labels']['ansible_group']\n group_list.append(name)\n return group_list\n\ndef get_name_list(json) -> list:\n \"\"\"\n Получает на вход json от yc\n Возвращает list со списком всех имен хостов из json yc\n Имя группы берется из тега ansible_name\n \"\"\"\n name_list = []\n for host_data in json:\n name = host_data['labels']['ansible_name']\n name_list.append(name)\n return name_list\n\ndef get_ip_host(name, json) -> str:\n \"\"\"\n Получает на вход json от yc и имя хоста\n Возвращает NAT IP адрес с 1 интерфейса хоста\n \"\"\"\n for host_data in json:\n host_name = host_data['labels']['ansible_name']\n host_ip = host_data['network_interfaces'][0]['primary_v4_address']['one_to_one_nat']['address']\n if host_name == name:\n return host_ip\n return \"127.0.0.1\"\n\ndef get_list_host_to_group(group, json) -> list:\n \"\"\"\n Получает на вход json от yc и имя группы\n Возвращает list со списком хостов в группе\n \"\"\"\n host_list = []\n for host_data in json:\n host_group = host_data['labels']['ansible_group']\n host_name = host_data['labels']['ansible_name']\n if host_group == group:\n host_list.append(host_name)\n return host_list\n\n\ndef get_vars_group(group, json) -> dict:\n \"\"\"\n Принимает имя группы и json от YC\n возвращает dict с переменными которые находятся в метках lable\n во всех хостах из группы с маской ansible_group_var_\n \"\"\"\n ansible_var_template = 'ansible_group_var_'\n group_vars = dict()\n for host_data in json:\n host_lables = host_data['labels']\n group_name = host_data['labels']['ansible_group']\n if group_name == group:\n for key, value in host_lables.items():\n if key.startswith(ansible_var_template):\n ansible_var_name = key.replace(ansible_var_template,'')\n group_vars[ansible_var_name] = value\n return group_vars\n\ndef get_vars_host(host, json) -> dict:\n \"\"\"\n Возвращает 'dict' все переменные в тегах c название включающим себя значение ansible_var_template\n по умолчанию 'ansible_host_var_'\n \"\"\"\n ansible_var_template = 'ansible_host_var_'\n host_vars = dict()\n for host_data in json:\n host_lables = host_data['labels']\n host_name = host_data['labels']['ansible_name']\n if host_name == host:\n for key, value in host_lables.items():\n if key.startswith(ansible_var_template):\n ansible_var_name = key.replace(ansible_var_template,'')\n host_vars[ansible_var_name] = value\n return host_vars\n\ndef get_inventory_json(json) -> dict:\n \"\"\"\n Получает на вход json от yc\n Возвращает json c inventry для ansible\n \"\"\"\n inventory = {}\n # добавляем группу all\n inventory['all'] = {}\n inventory['all']['children'] = []\n inventory['all']['children'].extend([\"ungrouped\"])\n inventory['all']['children'].extend(get_group_list(json))\n # Добавляем _meta\n inventory['_meta'] = {}\n inventory['_meta']['hostvars'] = {}\n for host in get_name_list(json=json):\n inventory['_meta']['hostvars'][host] = {}\n inventory['_meta']['hostvars'][host]['ansible_host'] = get_ip_host(name=host, json=json)\n inventory['_meta']['hostvars'][host].update(get_vars_host(host=host,json=json))\n for group in get_group_list(json):\n inventory[group] = {}\n inventory[group]['hosts'] = get_list_host_to_group( group=group, json=json)\n inventory[group]['vars'] = get_vars_group(group=group, json=json)\n\n return inventory\n\n@click.command()\n@click.option('--list', is_flag=True, help=\"print inventory\")\ndef main(list) -> str:\n # create_parser.set_defaults(func=main )\n json = get_yc_json()\n inventory = get_inventory_json(json=json)\n print(j.dumps(inventory, sort_keys=True, indent=4))\n # get_vars_host(host=\"dbserver\", json=json)\n\n sys.exit(0)\n\nif __name__== \"__main__\":\n main()\n","sub_path":"kubernetes/ansible/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"613688935","text":"\nfor data in [\"hongzhi\", \"Jakarta\", \"Istanbul\", \"NYC\", \"TKY\", \"SaoPaulo\", \"KualaLampur\"]:\n base = \"edgelist_{}\".format(data)\n\n file1 = open(\"{}_0\".format(base), 'r')\n file2 = open(\"{}_1\".format(base), 'r')\n file3 = open(\"{}_2\".format(base), 'r')\n file4 = open(\"{}_3\".format(base), 'r')\n\n lines1 = [int(line.split()[0]) for line in file1]\n # lines1 = [int(line.split()[0]) for line in file1]\n # lines1 = [int(line.split()[0]) for line in file1]\n # lines1 = [int(line.split()[0]) for line in file1]\n # lines2 = [line for line in file2]\n # lines3 = [line for line in file3]\n # lines4 = [line for line in file4]\n print(data, len(lines1), max(lines1))\n\n\n # print(lines1 == lines2)\n # print(lines3 == lines2)\n # print(lines4 == lines3)\n# print(lines1 == lines2)","sub_path":"Suhi_output/checkdata.py","file_name":"checkdata.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"286830928","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 16:14:14 2018\n\n@author: mike\n\"\"\"\nimport marsmission as mmc\nimport animmm as mma #marstest animation class\nimport rocket as mmr\n\nmm=mmc.marsmission()\namm=mma.animmm(mm)\n#mm=amm.getmarsmission()\n\n#print(mm.getallstates())\n\n#print(mm.control)\n\nmm.state[mmc.ctl.DT]=2000\n\nmm.control[mmc.ctl.FX]=-.001\nmm.control[mmc.ctl.FY]=-.001\n\nrock=mmr.rocket()\n\nmm.rocket=rock\nprint(rock)\n \nprint(rock.payload)\nprint(rock.mass)\nprint(rock.rockprop)\n\nmm.updaterocket()\nprint(rock.payload)\n#print(rock.mass)\n\n\n\n#amm.startanimation()\n","sub_path":"dev/louistest1.py","file_name":"louistest1.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"648014543","text":"#!/usr/bin/python3\n\nfrom grinbase.constants.MysqlConstants import MysqlConstants\nfrom grinbase.dbaccess import database\nfrom grinbase.dbaccess.database import database_details\nfrom grinbase.model.pool_utxo import Pool_utxo\n\nif __name__ == '__main__':\n database.db = database_details(MYSQL_CONSTANTS=MysqlConstants())\n database.db.initialize()\n\n# for i in range(0,10):\n# tmp = Pool_utxo(id=str(i), address=str(i), amount=1.5*i)\n# database.db.createDataObj(tmp)\n\n\n utxo = Pool_utxo.getPayable(0)[0]\n print(utxo)\n locked_utxo = Pool_utxo.get_locked_by_id(utxo.id)\n print(locked_utxo)\n locked_utxo.amount=1.0\n database.db.getSession().begin_nested();\n locked_utxo.amount=7.0\n database.db.getSession().commit()\n database.db.getSession().commit()\n\n utxo = Pool_utxo.getPayable(0)[0]\n print(utxo)\n\n\n# for utxo in Pool_utxo.getPayable(0):\n# Pool_utxo.get_locked_by_id(utxo.id)\n# print(utxo)\n\n\n\n\n","sub_path":"grin-py/grinbase/scripts/testdb.py","file_name":"testdb.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"404836641","text":"# train_baseline_DeepNN.py\n###########################################################\n# Trains and saves baseline's deep neural network model\n###########################################################\n# Expects 2 command line arguments\n# arg1 - filepath to input data (CSV file)\n# arg2 - filepath to user labels\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom numpy import loadtxt\nimport time\nimport sys\n\ntrain_set = loadtxt(sys.argv[1], delimiter=',')\nuser_labels = loadtxt(sys.argv[2], delimiter=',')\n\nuser_features = 2\ngame_features = 6\nuser_game_features = 2\nnum_games = 3000\n\nlayer_size_input = user_features + ((game_features + user_game_features) * num_games)\nlayer_size_hidden_1 = 128\nlayer_size_hidden_2 = 128\nlayer_size_output = 1\n\ninput = keras.Input(shape=(layer_size_input,))\n\nhidden_1 = layers.Dense(layer_size_hidden_1, activation='relu', name='hidden_1')(input)\nhidden_2 = layers.Dense(layer_size_hidden_2, activation='relu', name='hidden_2')(hidden_1)\n\noutput = layers.Dense(layer_size_output, activation='sigmoid', name='output')(hidden_2)\n\nmodel = keras.Model(inputs=input, outputs=output)\n\noptimizer = keras.optimizers.Adam(lr=0.001)\nmodel.compile(optimizer=optimizer, loss=\"mean_squared_error\")\nmodel.fit(train_set, user_labels, epochs=100)\n\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\nmodel.save(\"model_baseline_DeepNN_\"+timestr+\".h5\")","sub_path":"train_baseline_DeepNN.py","file_name":"train_baseline_DeepNN.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"332197977","text":"from windows import *\nimport sys\nimport re\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nimport requests\nimport bs4\nimport time\nimport threading\nimport pyperclip\npath = '../TodayVideoName/'\n\nclass Thread_1(QtCore.QThread):\n # 线程1\n mysignal = QtCore.pyqtSignal(tuple)\n ifcomplete = []\n mutex = QtCore.QMutex()\n def __init__(self, urll):\n super().__init__()\n self.url = urll\n\n def run(self):\n self.getNames()\n\n def getNames(self):\n info = '正在爬取信息...'\n a = (0, info)\n self.mysignal.emit(a)\n names = self.url.split()\n a = 0\n ts = []\n bts2.clear()\n self.ifcomplete.clear()\n for u in names:\n t1 = threading.Thread(target=self.getName, args=(u,)) \n # 守护 !!!\n t1.setDaemon(True) \n # 启动\n t1.start()\n ts.append(t1)\n for t in ts:\n t.join()\n for complete in self.ifcomplete:\n if complete is not True:\n info = \"存在url爬取失败!\"\n a = (0, info)\n self.mysignal.emit(a)\n return 0\n info = '所有url爬取完毕'\n a = (3, info)\n self.mysignal.emit(a)\n return 0\n \n def getName(self, url):\n try:\n r= requests.get(url)\n r.encoding = 'utf-8' \n soup = bs4.BeautifulSoup(r.text, 'lxml')\n fname = soup.find('', {'name': 'keywords'})\n result1 = re.match(r'[0-9]*[A-Z]*\\-[0-9]{6}', str(fname['content']))\n result2 = re.match(r'[0-9]*[A-Z]*\\-[0-9]{5}', str(fname['content']))\n result3 = re.match(r'[0-9]*[A-Z]*\\-[0-9]{4}', str(fname['content']))\n result4 = re.match(r'[0-9]*[A-Z]*\\-[0-9]{3}', str(fname['content']))\n result5 = re.match(r'FC2[PPV]?\\-[0-9]{7}', str(fname['content']))\n result6 = re.match(r'FC2[PPV]?\\-[0-9]{6}', str(fname['content']))\n if result1:\n firstname = result1.group() + '-C '\n elif result2:\n firstname = result2.group() + '-C '\n elif result3:\n firstname = result3.group() + '-C '\n elif result4:\n firstname = result4.group() + '-C '\n elif result5:\n firstname = result5.group() + '-C '\n elif result6:\n firstname = result6.group() + '-C '\n lname = soup.find('', {'name': 'description'})\n firstname += str(lname['content']).split('【影片名称】:')[1].split(\"【出演女优】\")[0]\n lastname = str(lname['content']).split('【出演女优】:')[1].split(\"【影片格式】\")[0]\n videoname = firstname + \" \" + lastname\n self.mutex.lock()\n with open(path + time.strftime(\"%m-%d\") + '.txt', 'a', encoding='UTF-8') as f:\n f.write(videoname + '\\n')\n a = (1, videoname)\n self.mysignal.emit(a)\n time.sleep(0.5)\n souptorrent = soup.find('', {'class': 'blockcode'})\n torrent = str(souptorrent.contents[0]).split('
  • ')[1].split(\"
  • \")[0]\n with open(path + time.strftime(\"%m-%d\") + '-torrent' + '.txt', 'a', encoding='UTF-8') as fi:\n fi.write(torrent + '\\n')\n self.mutex.unlock()\n a = (2, torrent)\n self.mysignal.emit(a)\n except requests.RequestException as e:\n info = \"{}网址访问失败!失败信息为:{}\".format(url, e)\n a = (0, info)\n self.mysignal.emit(a)\n self.ifcomplete.append(False)\n except IndexError as e:\n info = \"{}爬取失败!请检查该网址是否为正确网站!\".format(url)\n a = (0, info)\n self.mysignal.emit(a)\n self.ifcomplete.append(False)\n except IOError as e:\n info = \"{}爬取失败!请检查{}文件夹是否存在!\".format(url, path)\n a = (0, info)\n self.mysignal.emit(a)\n self.ifcomplete.append(False)\n else:\n bts.append(torrent)\n bts2.append(torrent)\n self.ifcomplete.append(True)\n\nbts = []\nbts2 = []\nclass MyWindow(QMainWindow, Ui_Form):\n def __init__(self, parent=None):\n super(MyWindow, self).__init__(parent)\n self.setWindowOpacity(0.8) # 设置窗口透明度\n # self.setAttribute(QtCore.Qt.WA_TranslucentBackground) # 设置窗口背景透明\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint|QtCore.Qt.WindowStaysOnTopHint) # 隐藏边框\n self.setupUi(self)\n self.quit.clicked.connect(self.close) # 点击按钮之后关闭窗口\n self.reduce.clicked.connect(self.showMinimized) #点击按钮之后缩放窗口\n self.copythis.clicked.connect(self.copythisurl)\n self.copyall.clicked.connect(self.copyallurl)\n self.start.clicked.connect(self.threadurl)\n self.textEdit.setContextMenuPolicy(Qt.CustomContextMenu)\n self.textEdit.customContextMenuRequested.connect(self.create_rightmenu)\n self.progressBar.setValue(0)\n #创建右键菜单函数\n def create_rightmenu(self):\n #菜单对象\n self.groupBox_menu = QMenu(self)\n # self.groupBox_menu.setStyleSheet(\"background-color: rgb(148, 213, 172);color: rgb(104, 116, 109);\")\n self.actionA = QAction('回车',self)#创建菜单选项对象\n self.actionA.setShortcut('Enter')#设置快捷键\n self.groupBox_menu.addAction(self.actionA)#把动作A选项���象添加到菜单self.groupBox_menu上\n self.actionA.triggered.connect(self.entertext) #将动作A触发时连接到槽函数 button\n\n self.actionB = QAction('select ALL',self)#创建菜单选项对象\n self.actionB.setShortcut('Ctrl+A')#设置快捷键\n self.groupBox_menu.addAction(self.actionB)#把动作A选项对象添加到菜单self.groupBox_menu上\n self.actionB.triggered.connect(self.textEdit.selectAll) #将动作A触发时连接到槽函数 button\n\n self.groupBox_menu.popup(QCursor.pos())#声明当鼠标在groupBox控件上右击时,在鼠标位置显示右键菜单 ,exec_,popup两个都可以,\n\n def entertext(self):\n self.textEdit.append('')\n\n def mouseMoveEvent(self, event):\n if QtCore.Qt.LeftButton and self.m_flag:\n self._endPos = event.pos() - self._startPos\n self.move(event.globalPos()-self.m_Position)\n\n def mousePressEvent(self, QMouseEvent):\n if QMouseEvent.buttons() == QtCore.Qt.LeftButton:\n self._startPos = QtCore.QPoint(QMouseEvent.x(), QMouseEvent.y())\n self.m_flag = True\n self.m_Position = QMouseEvent.globalPos()-self.pos() # 获取鼠标相对窗口的位置\n QMouseEvent.accept()\n\n def mouseReleaseEvent(self, QMouseEvent):\n self.m_flag = False\n\n def threadurl(self):\n self.urls = str(self.textEdit.toPlainText())\n bts2.clear()\n a = self.urls.split()\n self.b = 0\n self.progressBarimum = 0\n for i in a:\n self.b += 1\n self.progressBar.setMinimum(0)\n self.progressBar.setMaximum(self.b)\n self.progressBar.setValue(0)\n self.thread = Thread_1(self.urls)\n self.thread.mysignal.connect(self.getinfo)\n self.thread.start()\n \n def getinfo(self, info):\n if(info[0] == 1):\n self.textBrowser.append(info[1] + \"----已写入\" + time.strftime(\"%m-%d\") + '.txt')\n elif info[0] == 2:\n self.textBrowser.append(info[1] + \"----已写入\" + time.strftime(\"%m-%d\") + '-torrent' + '.txt')\n self.progressBarimum += 1\n self.progressBar.setValue(self.progressBarimum)\n elif info[0] == 3:\n self.textBrowser.append(info[1])\n self.textEdit.setPlainText('')\n self.thread.quit()\n self.thread.terminate()\n self.thread.wait()\n del self.thread\n else:\n self.textBrowser.append(info[1])\n \n def copythisurl(self):\n torrents = '\\n'.join(bts2)\n pyperclip.copy(torrents)\n self.textBrowser.append('已复制种子!\\n')\n\n def copyallurl(self):\n torrents = '\\n'.join(bts)\n pyperclip.copy(torrents)\n self.textBrowser.append('已复制种子!\\n')\n\n def enterText(self ):\n self.textEdit.insertPlainText('\\n')\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myWin = MyWindow()\n myWin.show()\n sys.exit(app.exec_())","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"435942387","text":"print(\"Anas Ahmed(18b-116-cs),CS-A\")\r\nprint('Programming Ex#6')\r\ndef first_law_of_motion():\r\n Vi= int(input('Enter the initial velocity of the body: '))\r\n a= int(input('Enter the accelaration of the body: '))\r\n t= int (input('Time taken by the body:' ))\r\n Vf= Vi + (a*t)\r\n print('The Final Velocity of the body is:',Vf, ' m/s')\r\ndef second_law_of_motion():\r\n Vi= eval(input('Enter the initial velocity of the body: '))\r\n a= eval(input('Enter the accelaration of the body: '))\r\n t= eval(input('Time taken by the body:' ))\r\n S = Vi*t+ 1/2*(a*(t**2))\r\n print('The distance taken by the body is: ',str(S),'m')\r\ndef third_law_of_motion():\r\n vf = int(input('Enter the final velocity of the body: '))\r\n vi = int(input('Enter the initial velocity of the body: '))\r\n a2= (input(\"What does you want to find,Acceleration or distamce..?\"))\r\n if a2== 'Acceleration':\r\n s= int(input(\"Enter the distance cover by body in metres(m):\"))\r\n a= (vf**2-vi**2)/2*s\r\n print(\"The acceleration of the body at that time will be: \",a,' m/s²')\r\n else:\r\n a= int(input(\"Enter the accelaration of the body in m/s²:\"))\r\n s= abs((vf**2-vi**2)/2*a)\r\n print('The distance covered by the body is:' , s, 'm')\r\n","sub_path":"LAB5_PROGRAMMING EX06.py","file_name":"LAB5_PROGRAMMING EX06.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"241235540","text":"from tornado.web import RequestHandler\n\nfrom lampost.di.config import config_value\nfrom lampost.server.handlers import SessionHandler\nfrom lampost.di.resource import Injected, module_inject\nfrom lampost.util.lputil import ClientError\n\nlog = Injected('log')\nsm = Injected('session_manager')\num = Injected('user_manager')\ndb = Injected('datastore')\njson_encode = Injected('json_encode')\njson_decode = Injected('json_decode')\nperm = Injected('perm')\nedit_update = Injected('edit_update_service')\nmodule_inject(__name__)\n\n\ndef editor_login(session):\n edit_perms = []\n player = session.player\n for perm_level, tab_ids in config_value('editor_tabs').items():\n if perm.has_perm(player, perm_level):\n edit_perms.extend(tab_ids)\n session.append({'editor_login': {'edit_perms': edit_perms, 'playerId': player.dbo_id, 'imm_level': player.imm_level,\n 'playerName': player.name}})\n edit_update.register(session)\n\n\nclass EditConnect(RequestHandler):\n def post(self):\n session_id = self.request.headers.get('X-Lampost-Session')\n session = sm.get_session(session_id)\n if not session:\n session_id, session = sm.start_edit_session()\n session.player = None\n if not session.player:\n content = json_decode(self.request.body.decode())\n game_session = sm.get_session(content.get('gameSessionId'))\n if game_session:\n if getattr(game_session, 'user', None) and game_session.user.dbo_id == content.get('userId'):\n session.player = game_session.player\n else:\n log.warn(\"Edit session connected with non-match user id\")\n session.append({'connect': session_id})\n if session.player:\n editor_login(session)\n else:\n session.append({'connect_only': True})\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(json_encode(session.pull_output()))\n\n\nclass EditLogin(SessionHandler):\n def main(self):\n content = self._content()\n user_name = content.userId.lower()\n try:\n user = um.validate_user(user_name, content.password)\n except ClientError:\n self.session.append({'login_failure': \"Invalid user name or password.\"})\n return\n imm = None\n for player in (db.load_object(player_id, \"player\") for player_id in user.player_ids):\n if player.dbo_id == user_name:\n if player.imm_level:\n imm = player\n break\n self.session.append({'login_failure': '{} is not immortal.'.format(player.name)})\n return\n if player.imm_level and (not imm or player.imm_level > imm.imm_level):\n imm = player\n if imm:\n self.session.player = imm\n editor_login(self.session)\n else:\n self.session.append({'login_failure': 'No immortals on this account.'})\n\n\nclass EditLogout(SessionHandler):\n def main(self):\n edit_update.unregister(self.session)\n self.session.player = None\n self.session.append({'editor_logout': True})\n","sub_path":"lampost/editor/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"624966697","text":"from bs4 import BeautifulSoup\r\nimport urllib.request\r\nimport re\r\n\r\ndef htmlParse(url):\r\n\r\n monthDict = {'January': 1,\r\n 'February': 2,\r\n 'March': 3,\r\n 'April': 4,\r\n 'May': 5,\r\n 'June': 6,\r\n 'July': 7,\r\n 'August': 8,\r\n 'September': 9,\r\n 'October': 10,\r\n 'November': 11,\r\n 'December': 12,\r\n }\r\n\r\n html = urllib.request.urlopen(url)\r\n html = html.read()\r\n #doc = open('test.txt', 'wb')\r\n #doc.write(html)\r\n soup = BeautifulSoup(html, 'html.parser')\r\n imageLink = soup.find(\"meta\", property=\"og:image\")\r\n date = soup.find(\"span\", class_ = \"date-taken-label\")\r\n #print(imageLink['content'])\r\n #print(str(date.contents[0]))\r\n dateString = r'Taken on (\\w+) (\\d+), (\\d+)\\n\\t\\t'\r\n date = re.search(dateString, str(date.contents[0]))\r\n #print (monthDict[date.group(1)])\r\n if date is not None:\r\n return [imageLink['content'], [date.group(3), monthDict[date.group(1)], date.group(2)]] #return url to image and date in [year, month, day] format\r\n return None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n url = \"https://www.flickr.com/photos/rebekahnewton/29446013094/\"\r\n print(htmlParse(url))\r\n\r\n","sub_path":"SearchStoreAnalyze/htmlParser.py","file_name":"htmlParser.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"471404328","text":"from blinkstick import blinkstick\nimport pigpio\nimport time\nfrom time import sleep\n\n\n# Initialize Pi\npi = pigpio.pi()\nif not pi.connected:\n exit()\n\n# Initialize switches\nGPIOA = 18\nGPIOB = 4\n\npi.set_mode(GPIOA, pigpio.INPUT)\npi.set_mode(GPIOB, pigpio.INPUT)\npi.set_pull_up_down(GPIOA, pigpio.PUD_UP)\npi.set_pull_up_down(GPIOB, pigpio.PUD_UP)\n\n# Initialize LEDs \nLEDlist = blinkstick.find_all()\nLED0 = LEDlist[0]\nLED1 = LEDlist[1]\n\n# Initialize Servo\nservo = 14 \n\nMIN_WIDTH=500\nMAX_WIDTH=2500\n\n\n# Start misc functions\n\ndef trial_setting():\n '''\n Read trial settings from experimental protocol to assign optimal reward [1]\n to peckKey A or B\n '''\n settings = {0: \"A\", 1:\"B\"}\n return settings\n \ndef reward_time(choice):\n '''\n This function defines the feeding time in seconds for the optimal \n and suboptimal reward, please change here \n '''\n optimal = 4 # in seconds\n suboptimal = 1 # in seconds\n \n if choice == 0:\n reward = suboptimal # obviously needs to be adapted for the given trial\n else:\n reward = optimal\n \n return reward\n\n\ndef feeder(choice):\n '''\n This functions reads the reward time for the specific choice and \n controls the servo for feeding reward\n '''\n # choose reward for given choice\n reward = reward_time(choice)\n # move feeder forward\n pi.set_servo_pulsewidth(servo, 500)\n sleep(0.82)\n # wait for feeding time\n pi.set_servo_pulsewidth(servo, 0)\n sleep(reward)\n # move feeder backward\n pi.set_servo_pulsewidth(servo, 2500)\n sleep(0.82)\n # stop servo\n pi.set_servo_pulsewidth(servo, 0)\n \n \ndef info():\n for bstick in blinkstick.find_all():\n print (\"Found device:\")\n print (\" Manufacturer: \" + bstick.get_manufacturer())\n print (\" Description: \" + bstick.get_description())\n print (\" Serial: \" + bstick.get_serial())\n print (\" Current Color: \" + bstick.get_color(color_format=\"hex\"))\n\ndef off(LED):\n for led in range(8):\n LED.set_color(0,led,0,0,0)\n \ndef red(LED):\n for led in range(8):\n LED.set_color(0,led,255,0,0)\n\ndef green(LED):\n for led in range(8):\n LED.set_color(0,led,0,255,0)\n\ndef blue(LED):\n for led in range(8):\n LED.set_color(0,led,0,0,255)\n\ndef yellow(LED):\n for led in range(8):\n LED.set_color(0,led,255,200,0)\n \ndef white(LED):\n for led in range(8):\n LED.set_color(0,led,255,255,255)\n\ndef start():\n red(LED0), red(LED1), sleep(1)\n off(LED0), off(LED1), sleep(0.5)\n \n yellow(LED0), yellow(LED1), sleep(1)\n off(LED0), off(LED1), sleep(0.5)\n \n white(LED0), white(LED1), sleep(1)\n off(LED0), off(LED1), sleep(0.5)\n\ndef choice(gpio, level, tick):\n # log choice\n print(\"%d\" %gpio)\n # evaluate choice\n if gpio == 4:\n choice = 0 # suboptimal\n elif gpio == 18:\n choice = 1 # optimal\n # change choice status\n\n # dispense reward\n feeder(choice)\n # stop trial\n off(LED0)\n off(LED1)\n\n# start sequence\nstart()\n\n# activate callback for PeckKeys\ncb1 = pi.callback(GPIOA, pigpio.FALLING_EDGE, choice)\ncb2 = pi.callback(GPIOB, pigpio.FALLING_EDGE, choice)\n\n# start trial\nsettings = trial_setting() # choose trial settings\nred(LED0), green(LED1)\nstart_time = time.time()\n\nmax_time = 300 # 5 min max trial time\n\nwhile True:\n current_time = time.time()\n elapsed_time = current_time - start_time\n\n if elapsed_time > max_time:\n print(\"Finished iterating in: \" + str(int(elapsed_time)) + \" seconds\")\n break\n \nend_time = time.time()\n# stop hardware \noff(LED0), off(LED1)\ncb1.cancel(), cb2.cancel()\npi.stop()\n\n ","sub_path":"archive/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"130991346","text":"import json\nfrom random import random\n\nfrom clients.client import Player\nimport numpy as np\nfrom sklearn import linear_model\n\nclass MatchMaker(Player):\n def __init__(self):\n super(MatchMaker, self).__init__(name=\"Wildcats matchmaker\", is_player=False)\n game_info = json.loads(self.client.receive_data(size=32368*2))\n print('Matchmaker', game_info)\n self.random_candidates_and_scores = game_info['randomCandidateAndScores']\n self.n = game_info['n']\n self.prev_candidate = {'candidate': [], 'score': 0, 'iter': 0}\n self.time_left = 120\n self.round = 0\n self.prev_cand = []\n self.candidates = []\n for i in self.random_candidates_and_scores:\n score = self.random_candidates_and_scores[i]['Score']\n attr = self.random_candidates_and_scores[i]['Attributes']\n self.prev_cand.append(attr)\n self.candidates.append((attr, score))\n\n def play_game(self):\n\n while True:\n candidate = self.my_candidate()\n self.client.send_data(json.dumps(candidate))\n response = json.loads(self.client.receive_data(32368*2))\n if 'game_over' in response:\n if response['match_found']:\n print(\"Perfect Candidate Found\")\n print(\"Total candidates used = \", response['num_iterations'])\n else:\n print(\"Perfect candidate not found - you have failed the player\")\n print(\"Total candidates used = \", response['total_candidates'])\n exit(0)\n else:\n self.prev_candidate = response['prev_candidate']\n self.time_left = response['time_left']\n\n def OLStrain(self):\n for x in np.arange(-1.00, 1.05, 0.05):\n candidate = np.ones(self.n) * x\n score = 0.0\n self.candidates.append((candidate, score))\n # candidates 20 * n\n X = np.array([cand[0] for cand in self.candidates])\n # score 20 * 1\n Y = np.array([np.array(cand[1]) for cand in self.candidates])\n # regularization\n R = np.identity(X.shape[1]) * 0.05\n # weights n * 1\n #print(X.shape, Y.shape, R.shape)\n estimate = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X) + np.dot(R.T, R)), X.T), Y).reshape(self.n)\n return estimate\n\n def Ridgetrain(self):\n X = np.array([cand[0] for cand in self.candidates])\n y = np.array([np.array(cand[1]) for cand in self.candidates])\n clf = linear_model.Ridge(alpha = 1.0)\n clf.fit(X, y)\n return clf.coef_\n\n def SGDtrain(self):\n X = np.array([cand[0] for cand in self.candidates])\n y = np.array([np.array(cand[1]) for cand in self.candidates])\n clf = linear_model.SGDRegressor(max_iter = 1000)\n clf.fit(X, y)\n return clf.coef_\n\n changed_index = []\n def findmin(self, weight):\n pos_minw = 10.0\n neg_minw = 10.0\n pos_id = -1\n neg_id = -1\n for i in range(len(weight)):\n if i in self.changed_index:\n continue\n if weight[i] > 0:\n if weight[i] < pos_minw:\n pos_minw = weight[i]\n pos_id = i\n else:\n if abs(weight[i]) < neg_minw:\n neg_minw = abs(weight[i])\n neg_id = i\n self.changed_index.append(pos_id)\n self.changed_index.append(neg_id)\n return pos_id, neg_id\n\n prev_score = -1.0\n this_score = 0\n prev_pid = 0\n prev_nid = 0\n prevp = 0.0\n prevn = 0.0\n def my_candidate(self):\n \"\"\"\n PLACE YOUR CANDIDATE GENERATION ALGORITHM HERE\n As the matchmaker, you have access to the number of attributes (self.n),\n initial random candidates and their scores (self.random_candidates_and_scores),\n your clock time left (self.time_left)\n and a dictionary of the previous candidate sent (self.prev_candidate) consisting of\n 'candidate' = previous candidate attributes\n 'score' = previous candidate score\n 'iter' = iteration num of previous candidate\n For this function, you must return an array of values that lie between 0 and 1 inclusive and must have four or\n fewer digits of precision. The length of the array should be equal to the number of attributes (self.n)\n \"\"\"\n if self.round > 1:\n self.candidates.append((self.prev_candidate['candidate'], self.prev_candidate['score']))\n self.prev_cand.append(self.prev_candidate['candidate'])\n if self.round > 2:\n self.prev_score = self.this_score\n self.this_score = self.prev_candidate['score']\n print(self.prev_score, self.this_score)\n weights1 = self.OLStrain()\n weights2 = self.Ridgetrain()\n weights3 = self.SGDtrain()\n\n else:\n weights1 = self.OLStrain()\n weights2 = self.Ridgetrain()\n weights3 = self.SGDtrain()\n self.round += 1\n\n candidate = []\n is_pos = []\n is_neg = []\n could_be_pos = []\n could_be_neg = []\n\n avg = []\n for i in range(self.n):\n w1 = weights1[i]\n w2 = weights2[i]\n w3 = weights3[i]\n #avg_w = (w1 + w2 + w3) / 3\n avg_w = w3\n avg.append(avg_w)\n if avg_w > 2.5 / self.n:\n candidate.append(1)\n is_pos.append(i)\n elif avg_w > 0:\n candidate.append(1)\n could_be_pos.append(1)\n elif avg_w < -2.5 / self.n:\n candidate.append(0)\n is_neg.append(i)\n else:\n candidate.append(0)\n could_be_neg.append(i)\n\n\n if candidate in self.prev_cand:\n if self.round > 5 and self.n > 100 and self.this_score < self.prev_score:\n candidate[self.prev_pid] = self.prevp\n candidate[self.prev_nid] = self.prevn\n pid, nid = self.findmin(avg)\n self.prev_pid, self.prev_nid = pid, nid\n self.prevp = candidate[pid]\n self.prevn = candidate[nid]\n candidate[pid] = -candidate[pid]\n candidate[nid] = -candidate[nid]\n if candidate not in self.prev_cand:\n self.prev_cand.append(candidate)\n return candidate\n else:\n self.prev_cand.append(candidate)\n return candidate\n","sub_path":"clients/wildcats_matchmaker.py","file_name":"wildcats_matchmaker.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"237258245","text":"from time import sleep\nfrom pprint import pprint\n\nimport telepot\nfrom telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton\n\nfrom getCasId import getCasId, parseIdentifierFromURL\nfrom icsDecrypter import decrypt_ics\nfrom icsDownloader import download_ics\n\n# buttons\nBUT_START = '/start'\nBUT_SEARCH = '/configure'\nBUT_NEXT = '/next'\nBUT_TODAY = '/today'\nBUT_TOMORROW = '/tomorrow'\nBUT_WEEK = '/week'\nBUT_CANCEL = '/cancel'\nBUT_HELP = '/help'\nBUT_INSERT = '/insert'\nBUT_CAIO = '/caio'\nBUT_YAN = '/yan'\nBUT_DEDE = '/dede'\n\n# hide functions\nHID_EXPORT_DATA = '/export'\nHID_IMPORT_DATA = '/import'\n\n# states\nST_NORMAL = 0\nST_SEARCH = 1\nST_INSERT = 2\n# initialize global variables\nstate = ST_NORMAL\n\n# keyboards\nKBD_NORMAL = [[BUT_TODAY, BUT_TOMORROW, BUT_WEEK], [BUT_SEARCH, BUT_HELP], [BUT_CAIO, BUT_YAN, BUT_DEDE]]\nKBD_SEARCH = [[BUT_CANCEL, BUT_INSERT, BUT_HELP], ]\nKBD_INSERT = [[BUT_CANCEL, BUT_SEARCH, BUT_HELP], ]\n\n# answers\nASW_START = \"Hello friend, I'm a bot and I'm here to help you to know your schedule.\"\nASW_ID = \"I got it! I'll memorize it so you won't need to do this process again.\"\nASW_CANCEL = \"You canceled the action. How can I help you?\"\nASW_ERROR = \"I'm sorry but I couldn't understand your answer.\\n\" \\\n\t\t\t\"Type \" + BUT_HELP + \" to get some instructions.\"\n\ndef getAllUserData():\n\treturn 'not implemented'\n\ndef readUserData(username):\n\tdata = {}\n\tfile = open('./user_data/' + username + '.csv', 'r')\n\tfile = file.read().split(',')\n\tdata['search_str'] = file[0]\n\tdata['id'] = file[1]\n\treturn data\n\ndef writeUserData(username, search_str, id):\n\tdata = search_str + ',' + str(id)\n\tfile = open('./user_data/' + username + '.csv', 'w')\n\tfile.write(data)\n\ndef getIdFromURLorInt(str):\n\ttry:\n\t\tid = int(str)\n\texcept:\n\t\tid = parseIdentifierFromURL(str)\n\treturn id\n\ndef getNextEvent(username):\n\tid = readUserData(username)['id']\n\tfor day in range(10):\n\t\t# download ics schedule and decrypt\n\t\tdownload_ics(id, id, day, day)\n\t\tevents = decrypt_ics('./ics/' + id + '.ics')\n\t\tif len(events)>0:\n\t\t\treturn events[0]\n\t# return None when can't find\n\treturn None\n\ndef getSchedule(start, end, id=0, username=0):\n\tif id == 0:\n\t\tid = readUserData(username)['id']\n\telse:\n\t\tid = str(id)\n\n\t# download ics schedule and decrypt\n\tdownload_ics(id, id, start, end)\n\tevents = decrypt_ics('./ics/' + id + '.ics')\n\n\t# convert data to output string format\n\tsched = ''\n\tfor event in events:\n\t\ttry:\n\t\t\tsched += \"\\n\\n\" + str(event)\n\t\texcept:\n\t\t\tpass\n\tif sched == '':\n\t\tsched = None\n\treturn sched\n\ndef handle(msg):\n\tglobal state\n\t# pprint(msg)\n\n\tcontent_type, chat_type, chat_id = telepot.glance(msg)\n\n\tif content_type is 'text':\n\t\tmsg_text = msg['text']\n\t\tkeyboard_options = []\n\t\tuser_id = str(msg['from']['id'])\n\t\thide_keyboard = False\n\n\t\tprint('user: ' + user_id + ', message: ' + msg_text)\n\n\t\tif HID_IMPORT_DATA in msg_text:\n\t\t\tanswer = getAllUserData()\n\t\telif HID_EXPORT_DATA in msg_text:\n\t\t\tanswer = getAllUserData()\n\n\t\telif state is ST_NORMAL:\n\t\t\tif BUT_START in msg_text: # start\n\t\t\t\tanswer = ASW_START\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_NEXT in msg_text:\n\t\t\t\tevent = getNextEvent(user_id)\n\t\t\t\tif event:\n\t\t\t\t\tanswer = \"Your next event is:\\n\\n\" + str(event)\n\t\t\t\telse:\n\t\t\t\t\tanswer = \"You don't have any event in the next 10 days.\"\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_HELP in msg_text:\n\t\t\t\tanswer = BUT_SEARCH + \" to find your student's number.\\n\" + \\\n\t\t\t\t\t\t BUT_TODAY + \" to know your today's schedule.\\n\" + \\\n\t\t\t\t\t\t BUT_WEEK + \" to know your week's schedule\"\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_SEARCH in msg_text:\n\t\t\t\tanswer = \"I'll find your identifier in CAS. \" \\\n\t\t\t\t\t\t \"Which string should I use to search for you in CAS?\"\n\t\t\t\tstate = ST_SEARCH\n\t\t\telif BUT_TODAY in msg_text:\n\t\t\t\tsched = getSchedule(0, 0, username = user_id)\n\t\t\t\tif sched:\n\t\t\t\t\tanswer = \"Your schedule for today is:\" + sched\n\t\t\t\telse:\n\t\t\t\t\tanswer = \"You don't have any classes today\"\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_TOMORROW in msg_text:\n\t\t\t\tsched = getSchedule(1, 1, username = user_id)\n\t\t\t\tif sched:\n\t\t\t\t\tanswer = \"Your schedule for tomorrow is:\" + sched\n\t\t\t\telse:\n\t\t\t\t\tanswer = \"You don't have any classes tomorrow\"\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_WEEK in msg_text:\n\t\t\t\tanswer = \"Your schedule for this week is:\" + getSchedule(0, 7, username = user_id)\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_CAIO in msg_text:\n\t\t\t\tsched = getSchedule(0, 0, id = 7458)\n\t\t\t\tif sched:\n\t\t\t\t\tanswer = \"Your schedule for today is:\" + sched\n\t\t\t\telse:\n\t\t\t\t\tanswer = \"You don't have any classes today\"\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_YAN in msg_text:\n\t\t\t\tsched = getSchedule(0, 0, id = 5079)\n\t\t\t\tanswer = \"BOA TIME\" + sched\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_DEDE in msg_text:\n\t\t\t\tsched = getSchedule(0, 0, id = 652)\n\t\t\t\tanswer = \"DEDE VIADO\" + sched\n\t\t\t\tstate = ST_NORMAL\n\t\t\telse:\n\t\t\t\tanswer = ASW_ERROR\n\t\t\t\tstate = ST_NORMAL\n\t\telif state is ST_SEARCH:\n\t\t\tif BUT_CANCEL in msg_text:\n\t\t\t\tanswer = ASW_CANCEL\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_HELP in msg_text:\n\t\t\t\tanswer = \"Type \" + BUT_CANCEL + \" to cancel the search.\\n\" \\\n\t\t\t\t\t\t \"Type \" + BUT_INSERT + \" to insert identifier manually.\\n\" \\\n\t\t\t\t\t\t \"Type anything else to search for it.\"\n\t\t\t\tstate = ST_SEARCH\n\t\t\telif BUT_INSERT in msg_text:\n\t\t\t\tanswer = \"Do you know your identifier in the CAS \" \\\n\t\t\t\t\t\t \"or the weblink to download your schedule?\"\n\t\t\t\tstate = ST_INSERT\n\t\t\telse:\n\t\t\t\tsearch_name = str(msg_text).replace('/', '')\n\t\t\t\tpre_answer = \"Ok! I'm searching for '\" + search_name + \\\n\t\t\t\t\t\t\t \"'. This may take one minute or two.\"\n\t\t\t\tbot.sendMessage(chat_id, pre_answer)\n\t\t\t\ttry:\n\t\t\t\t\tid = getCasId(search_name, hide=False)\n\t\t\t\t\twriteUserData(user_id, search_name, id)\n\t\t\t\t\tanswer = \"Your id in the CAS is \" + id + \".\\n\" + ASW_ID\n\t\t\t\texcept:\n\t\t\t\t\tanswer = \"I'm sorry but I couldn't find you searching for '\"\\\n\t\t\t\t\t\t\t + msg_text + \"'. Do you have another string to sugest?\"\n\t\t\t\tstate = ST_NORMAL\n\t\telif state is ST_INSERT:\n\t\t\tif BUT_CANCEL in msg_text:\n\t\t\t\tanswer = ASW_CANCEL\n\t\t\t\tstate = ST_NORMAL\n\t\t\telif BUT_HELP in msg_text:\n\t\t\t\tanswer = \"Type \" + BUT_CANCEL + \" to cancel the insertion.\\n\" \\\n\t\t\t\t\t\t \"Type anything else to save it as your id.\"\n\t\t\t\tstate = ST_INSERT\n\t\t\telif BUT_SEARCH in msg_text:\n\t\t\t\tanswer = \"I'll find your identifier in CAS. \" \\\n\t\t\t\t\t\t \"Which string should I use to search for you in CAS?\"\n\t\t\t\tstate = ST_SEARCH\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tid = getIdFromURLorInt(msg_text)\n\t\t\t\t\twriteUserData(user_id, msg_text, id)\n\t\t\t\t\tanswer = ASW_ID\n\t\t\t\t\tstate = ST_NORMAL\n\t\t\t\texcept Exception as error:\n\t\t\t\t\tprint(error)\n\t\t\t\t\tanswer = ASW_ERROR\n\t\t\t\t\tstate = ST_INSERT\n\n\t\t# change keyboard according to new state\n\t\tif state is ST_NORMAL:\n\t\t\tkeyboard_options = KBD_NORMAL\n\t\telif state is ST_SEARCH:\n\t\t\tkeyboard_options = KBD_SEARCH\n\t\telif state is ST_INSERT:\n\t\t\tkeyboard_options = KBD_INSERT\n\n\t\tbuttons = []\n\t\tkeyboard = ReplyKeyboardMarkup(keyboard=buttons,\n\t\t\t\t\t\t\t\t\t resize_keyboard = True,\n\t\t\t\t\t\t\t\t\t one_time_keyboard = hide_keyboard)\n\t\tfor line in keyboard_options:\n\t\t\tkeyboard_line = []\n\t\t\tfor row in line:\n\t\t\t\tkeyboard_line.append(KeyboardButton(text=row))\n\t\t\tbuttons.append(keyboard_line)\n\n\t\tbot.sendMessage(chat_id, answer, reply_markup=keyboard)\n\nif __name__ == '__main__':\n\t# instantiate bot\n\tTOKEN = open('token.txt', 'r').read()\n\tbot = telepot.Bot(TOKEN)\n\tpprint(bot.getMe())\n\n\t# handling messages\n\tbot.message_loop({'chat': handle})\n\tpprint('Listening ...')\n\n\t# hanging program execution\n\twhile 1:\n\t\tsleep(10)\n","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"318819792","text":"__author__ = 'josh'\n\n''' Runner for allowance database matching, called by USR_analyse'''\nimport re\nimport miles\n\nfrom CLU_FIXED_VALUES import *\nfrom excel_timehack import excel_timehack\nfrom datetime import datetime\nfrom datetime import timedelta\n\ndef compare(allow_obj, sp_obj, errors):\n # compare allowance database GYH(M) to USR GYH(M)\n if allow_obj.GYH_T_Mileage_To_Nominated_Address in ('N/A', 'N', 'No'):\n pass\n\n elif allow_obj.GYH_T_Mileage_To_Nominated_Address != sp_obj.Perm_GYH_Mileage:\n print(bcolors.FAIL + 'found mismatch between allowance DB GYH_T :{} and USR GYH_T {}'.format\n (allow_obj.GYH_T_Mileage_To_Nominated_Address, sp_obj.Perm_GYH_Mileage) + bcolors.ENDC)\n \n # if division by 10 gives a remainder, the gyh(t) mileage was not rounded\n try:\n if allow_obj.GYH_T_Mileage_To_Nominated_Address%10 != 0:\n print(bcolors.FAIL + 'incorrect rounding: {} for allowance DB GYH_T mileage'.format\n (allow_obj.GYH_T_Mileage_To_Nominated_Address) + bcolors.ENDC)\n except TypeError as e:\n pass\n\n if allow_obj.Live_Onboard == 'No' and sp_obj.Perm_SLA_Charged != '':\n print(bcolors.FAIL + 'does not have live onboard in ALWDB but Perm SLA is {}'.format\n (sp_obj.Perm_SLA_Charged) + bcolors.ENDC)\n\n if allow_obj.Live_Onboard not in ('No', 'N') and allow_obj.Live_Onboard != '' and sp_obj.Perm_SLA_Charged == '':\n print(bcolors.FAIL + 'does live onboard in ALWDB but Perm SLA is {}'.format(sp_obj.Perm_SLA_Charged)\n + bcolors.ENDC)\n\n if allow_obj.Live_Onboard == 'Yes':\n if sp_obj.Grade in FIX_VALUES_JUNIOR_RATES_RANKS and sp_obj.Perm_SLA_Charged != FIX_VALUES_JR['Perm_SLA_Charged']:\n print(bcolors.OKBLUE + 'Perm SLA is {} should be {}'.format(sp_obj.Perm_SLA_Charged, FIX_VALUES_JR['Perm_SLA_Charged'])\n + bcolors.ENDC)\n if sp_obj.Grade in FIX_VALUES_SENIOR_RATES_RANKS and sp_obj.Perm_SLA_Charged != FIX_VALUES_SR['Perm_SLA_Charged']:\n print(bcolors.OKBLUE + 'Perm SLA is {} should be {}'.format(sp_obj.Perm_SLA_Charged, FIX_VALUES_SR['Perm_SLA_Charged'])\n + bcolors.ENDC)\n if sp_obj.Grade in FIX_VALUES_JUNIOR_GRUNTERS_RANKS and sp_obj.Perm_SLA_Charged != FIX_VALUES_GRUNTER_JO['Perm_SLA_Charged']:\n print(bcolors.OKBLUE + 'Perm SLA is {} should be {}'.format(sp_obj.Perm_SLA_Charged, FIX_VALUES_GRUNTER_JO['Perm_SLA_Charged'])\n + bcolors.ENDC)\n if sp_obj.Grade in FIX_VALUES_GRUNTER_SO and sp_obj.Perm_SLA_Charged != FIX_VALUES_GRUNTER_SO['Perm_SLA_Charged']:\n print(bcolors.OKBLUE + 'Perm SLA is {} should be G4Z'.format(sp_obj.Perm_SLA_Charged, FIX_VALUES_GRUNTER_SO['Perm_SLA_Charged'],\n + bcolors.ENDC))\n \n if allow_obj.Live_Onboard not in ('Y', 'N'):\n print(bcolors.FAIL + 'Live Onboard Flag MISSING or corrupt/invalid' + bcolors.ENDC)\n # print(allow_obj.__dict__)\n\n try:\n int(allow_obj.Annual_GYH_T_and_HDT_Documention_Check)\n doc_check_anniversary = excel_timehack(allow_obj.Annual_GYH_T_and_HDT_Documention_Check)\n today = datetime.today()\n if doc_check_anniversary < today - timedelta(days=365):\n print(bcolors.FAIL + 'OOD annual GYH/HTD check' + bcolors.ENDC)\n\n except ValueError:\n print(bcolors.FAIL + 'annual GYH/HTD check date not set' + bcolors.ENDC)\n\n postcode_check(allow_obj, errors)\n\ndef postcode_check(allow_obj, errors):\n # pull out postcode from the allowance object, then run it through the Python Miles Module against the global postcode\n # PS is postcode pulled from GYH_T_address line in allowance object\n ps = allow_obj.Full_GYH_T_POSTCODE\n if not str(ps).upper() in ('', 'NA', 'N/A', 'N'):\n try:\n gmaps_distance = round(miles.get_mileage('PL22BG', ps), -1) # return rounded mileage\n print(bcolors.OKGREEN + ' matched GMAPS: {} to ALLOWDB {}'.format(gmaps_distance,\n allow_obj.GYH_T_Mileage_To_Nominated_Address) + bcolors.ENDC)\n if gmaps_distance < 5:\n print(bcolors.FAIL + 'GYH Mileage Rounding FAIL' + bcolors.ENDC)\n else:\n if allow_obj.GYH_T_Mileage_To_Nominated_Address != gmaps_distance:\n print(bcolors.FAIL + 'GMAPS mileage {} != allowance db mileage {}'.format(gmaps_distance,\n allow_obj.GYH_T_Mileage_To_Nominated_Address)+ bcolors.ENDC)\n except TypeError: # TypeError is returned when miles.get_mileage drops out on postcode check\n pass\n\n \n\n\n\n\n","sub_path":"ALW_interpreter.py","file_name":"ALW_interpreter.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"646144522","text":"import numpy as np\nimport cv2\nfrom mtgsdk import Card\nimport urllib\nimport _pickle as pickle\nimport os\nimport scanner\n\ndef dhash(image, hashSize=16):\n # convert the input image to grayscale then\n # resize it, adding a single column (width) so we\n # can compute the horizontal gradient\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n resized = cv2.resize(image, (hashSize + 1, hashSize))\n\n # compute the (relative) horizontal gradient between adjacent\n # column pixels\n diff = resized[:, 1:] > resized[:, :-1]\n\n # convert the difference image to a hash\n return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])\n\ndef hamming_dist(h1,h2):\n return bin(h1^h2).count('1')\n\nfile_path = \"data/CardDatabase.cdb\"\n\nclass CardDatabase:\n cards = None\n dictionary = None\n di = None\n card_art_max_distance = 100\n empty = None\n empty_max_distance = 50\n\n def __init__(self, app_output=None, text_output=None):\n try:\n self = pickle.load(open(file_path, \"rb\"))\n if self.di is None:\n print(\"loaded card database\")\n else:\n self.download(app=app_output, text=text_output)\n except:\n self.save_empty()\n self.download(text=text_output)\n\n def download(self, app=None, text=None):\n # make data folder if it doesn't exist already\n os.makedirs(\"data\", exist_ok=True)\n\n # if dictionary index is None, download Magic the Gathering card data\n if self.di is None:\n if text is not None and app is not None:\n text.value = \"downloading cards...\"\n app.update()\n print(\"downloading cards...\")\n self.cards = Card.all()\n self.di = 0\n self.dictionary = {}\n pickle.dump(self, open(file_path, \"wb\"), -1)\n\n # loop through all cards\n if text is not None and app is not None:\n text.value = \"downloading images and generating dictionary...\"\n app.update()\n print(\"downloading images and generating dictionary...\")\n total = len(self.cards)\n while self.di < total:\n c = self.cards[self.di]\n if text is not None and app is not None:\n text.value = \"{}/{}: {}/{}\".format(self.di, total, c.set, c.name)\n app.update()\n print(\"{}/{}: {}/{}\".format(self.di, total, c.set, c.name))\n\n # make a folder for the current card's set if it doesn't exist already\n filename = \"data/{}/{}{}.jpg\".format(c.set, c.name, c.multiverse_id)\n if not os.path.exists(os.path.dirname(filename)):\n directory = os.path.dirname(filename)\n os.makedirs(directory, exist_ok=True)\n\n # if the card's image has already been downloaded, add it to the dictionary using its art's hash as a key\n if os.path.isfile(filename):\n if text is not None and app is not None:\n text.value += \" : file exists\"\n app.update()\n print(\" file exists\")\n image = cv2.imread(filename)\n self.dictionary[dhash(image[37:172, 20:204])] = self.di\n self.di += 1\n if self.di % 50 is 0:\n pickle.dump(self, open(file_path, \"wb\"), -1)\n continue\n\n # if the card's image is not available, skip it\n if c.image_url is None:\n if text is not None and app is not None:\n text.value += \" : image N/A\"\n app.update()\n print(\" image N/A\")\n self.di += 1\n continue\n\n # keep trying to download the card's image until successful, adding it to the dictionary using its art's\n # hash as a key when done\n print_message = True\n while True:\n try:\n urllib.request.urlretrieve(c.image_url, filename)\n image = cv2.imread(filename)\n self.dictionary[dhash(image[37:172, 20:204])] = self.di\n self.di += 1\n if self.di % 50 is 0:\n pickle.dump(self, open(file_path, \"wb\"), -1)\n break\n except:\n if print_message:\n print_message = False\n if text is not None and app is not None:\n text.value += \" : retrying @ \" + c.image_url\n app.update()\n print(\" \" + c.image_url)\n print(\" retrying...\")\n\n # set di to None to denote completion and then save everything\n self.di = None\n pickle.dump(self, open(file_path, \"wb\"), -1)\n if text is not None and app is not None:\n text.value = \"saved dictionary\"\n app.update()\n print(\"saved dictionary\")\n\n\n def save_empty(self):\n # take a picture of the empty tray and save it, then calculate\n # the hash value\n e = scanner.take_picture()\n cv2.imwrite(\"data/empty.jpg\", e)\n self.empty = dhash(e)\n\n # save the database\n pickle.dump(self, open(file_path, \"wb\"), -1)\n\n def is_empty(self):\n # take a picture, then calculate the hash value, then find\n # the hamming difference between the current hash and the\n # saved hash of the empty tray\n p = scanner.take_picture()\n pic = dhash(p)\n distance = hamming_dist(self.empty, pic)\n\n # the tray is empty if the hamming distance is less than\n # the empty_max_distance\n return distance < self.empty_max_distance\n\n def get_card(self, image):\n # calculate the hash value of the card art\n hash = dhash(image)\n matches = []\n\n # search through the dictionary and add cards that have a\n # hamming distance less than the card_art_max_distance to\n # the list of matches\n for key, value in self.dictionary.items():\n if hamming_dist(hash, key) <= self.card_art_max_distance:\n matches.append((d, cards[value]))\n\n # if there are matches, return the closest one\n if len(matches) > 0:\n matches.sort()\n return matches[0]\n\n return None","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"590895410","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 19 16:53:01 2020\r\n\r\n@author: MAI\r\n\"\"\"\r\nimport numpy as np\r\nfrom vecm import para_vecm\r\nfrom scipy.stats import f , chi2\r\n\r\ndef Where_cross_threshold(trigger_spread, threshold, add_num):\r\n #initialize array\r\n check = np.zeros(trigger_spread.shape)\r\n #put on the condiction\r\n check[(trigger_spread - threshold) > 0] = add_num\r\n check[:,0] = check[:,1]\r\n #Open_trigger_array\r\n check = check[:,1:] - check[:,:-1]\r\n return check\r\n\r\ndef Where_threshold(trigger_spread, threshold, add_num, up):\r\n #initialize array\r\n check = np.zeros(trigger_spread.shape)\r\n #put on the condiction\r\n if up:\r\n check[(trigger_spread - threshold) > 0] = add_num\r\n else:\r\n check[(trigger_spread - threshold) < 0] = add_num\r\n check[:,0] = 0 \r\n return check\r\n\r\ndef tax(payoff,rate):\r\n tax_price = payoff * (1 - rate * (payoff > 0))\r\n return tax_price\r\n\r\ndef CNN_test(st1,st2,sp,v1,v2,tick,DetPos,table,NowOpen,model_CNN):\r\n if NowOpen:\r\n times = 1\r\n else:\r\n times = len(DetPos)\r\n #Array Initialize\r\n AllSprInput = []\r\n AllCharInput = []\r\n pair_pos = np.zeros([len(DetPos)],dtype = int)\r\n count = 0\r\n \r\n character = ['w1','w2','mu','stdev']\r\n TableChar = np.zeros([len(table),5])\r\n TableChar[:,:4] = np.array(table[character])\r\n\r\n if tick:\r\n s = 50\r\n else:\r\n s = 50\r\n for m in range(times):\r\n SprInput = np.zeros([100,5])\r\n CharInput = np.zeros([5])\r\n if not NowOpen:\r\n CharInput[:4] = TableChar[m,:4]\r\n lenth = len(DetPos[m])\r\n else:\r\n lenth = len(DetPos)\r\n for i in range(lenth):\r\n if NowOpen:\r\n index = DetPos[i]\r\n pair = i\r\n else:\r\n index = DetPos[m][i,0]\r\n pair = m\r\n SprInput[:,0] = st1[pair,(s+index):(s+100+index)]\r\n SprInput[:,1] = st2[pair,(s+index):(s+100+index)]\r\n SprInput[:,2] = sp[pair,(s+index):(s+100+index)]\r\n SprInput[:,3] = v1[pair,(s+index):(s+100+index)]\r\n SprInput[:,4] = v2[pair,(s+index):(s+100+index)]\r\n CharInput[4] = index/60\r\n AllSprInput.append(SprInput.copy())\r\n if NowOpen:\r\n CharInput[:4] = TableChar[i,:4]\r\n AllCharInput.append(CharInput.copy())\r\n count += 1\r\n pair_pos[m] = count\r\n AllSprInput = np.array(AllSprInput)\r\n AllCharInput = np.array(AllCharInput)\r\n #Normalize CNN_SpreadInput\r\n #mu\r\n mu = np.zeros([len(AllSprInput),1,5])\r\n mu[:,0,:2] = np.mean(AllSprInput[:,:,:2], axis=1)\r\n mu[:,0,2] = AllCharInput[:,2]\r\n #std\r\n stock_std = np.std(AllSprInput[:,:,:3], axis=1)\r\n std = np.ones([len(AllSprInput),1,5])\r\n std[:,0,:2] = stock_std[:,:2]\r\n std[:,0,2] = AllCharInput[:,3]\r\n #Normalize\r\n AllSprInput = (AllSprInput - mu)/std\r\n AllCharInput[:,:2] = AllCharInput[:,:2]*stock_std[:,:2] / np.expand_dims(stock_std[:,2],axis = 1)\r\n \r\n #CNN_predict\r\n pre = model_CNN.predict([AllSprInput,AllCharInput])\r\n prediction = np.argmax(pre,axis = 1)\r\n \r\n if NowOpen:\r\n return prediction\r\n else:\r\n return [ prediction , pair_pos ]\r\n\r\ndef VAR_model( y , p ): \r\n k = len(y.T) # 幾檔股票\r\n n = len(y) # 資料長度\r\n \r\n xt = np.ones( ( n-p , (k*p)+1 ) )\r\n for i in range(n-p):\r\n a = 1\r\n for j in range(p):\r\n a = np.hstack( (a,y[i+p-j-1]) )\r\n a = a.reshape([1,(k*p)+1])\r\n xt[i] = a\r\n \r\n zt = np.delete(y,np.s_[0:p],axis=0)\r\n xt = np.mat(xt)\r\n zt = np.mat(zt)\r\n\r\n beta = ( xt.T * xt ).I * xt.T * zt # 計算VAR的參數\r\n \r\n A = zt - xt * beta # 計算殘差\r\n sigma = ( (A.T) * A ) / (n-p) # 計算殘差的共變異數矩陣\r\n \r\n return [ sigma , beta ]\r\n\r\n# 配適 VAR(P) 模型 ,並利用BIC選擇落後期數--------------------------------------------------------------\r\ndef order_select( y , max_p ):\r\n \r\n k = len(y.T) # 幾檔股票\r\n n = len(y) # 資料長度\r\n \r\n bic = np.zeros((max_p,1))\r\n for p in range(1,max_p+1):\r\n sigma = VAR_model( y , p )[0]\r\n bic[p-1] = np.log( np.linalg.det(sigma) ) + np.log(n) * p * (k*k) / n\r\n bic_order = int(np.where(bic == np.min(bic))[0] + 1) # 因為期數p從1開始,因此需要加1\r\n \r\n return bic_order\r\n\r\ndef fore_chow(stock1, stock2, model, Flen, give=False, p=0, A=0, ut=0, maxp=5):\r\n \r\n if model == 'model1':\r\n model_name = 'H2'\r\n elif model == 'model2':\r\n model_name = 'H1*'\r\n else:\r\n model_name = 'H1'\r\n \r\n day1 = ( np.vstack( [stock1, stock2] ).T )\r\n day1 = np.log(day1)\r\n h = len(day1) - Flen\r\n k = 2 # 幾檔股票\r\n n = Flen # formation period 資料長度\r\n\r\n if give == False:\r\n y = ( np.vstack( [stock1[0:Flen], stock2[0:Flen]] ).T ) \r\n y = np.log(y)\r\n p = order_select(y,maxp)\r\n at , A, _ = para_vecm(y,model_name,p) \r\n# at , A = para_vecm(y,model_name,p) \r\n ut = np.dot(at,at.T)/len(at.T) \r\n\r\n Remain_A = A.copy()\r\n Remain_ut = ut.copy()\r\n Remain_p = p\r\n \r\n A = A.T \r\n phi_0 = np.eye(k) \r\n A1 = np.delete(A,0,axis=0).T \r\n phi = np.hstack( (np.zeros([k,2*(p-1)]) , phi_0) )\r\n sigma_t = np.dot( np.dot( phi_0 , ut ) , phi_0.T ) # sigma hat \r\n ut_h = []\r\n\r\n for i in range(1,h+1):\r\n lag_mat = day1[ len(day1)-i-p-1 : len(day1)-i , : ] \r\n lag_mat = np.array(lag_mat[::-1]) \r\n if p == 1:\r\n ut_h.append( lag_mat[0].T - ( A[0].T + np.dot( A[1:k*p+1].T , lag_mat[1:2].T ) ).T ) \r\n else:\r\n ut_h.append( lag_mat[0].T - ( A[0].T + np.dot( A[1:k*p+1].T , lag_mat[1:k*p-1].reshape([k*p,1]) ) ).T ) \r\n\r\n for i in range(h-1): \r\n a = phi[:,i*2:len(phi.T)]\r\n phi_i = np.dot( A1 , a.T )\r\n sigma_t = sigma_t + np.dot( np.dot( phi_i , ut ) , phi_i.T ) \r\n phi = np.hstack( (phi , phi_i) )\r\n phi = phi[: , ((p-1)*k):len(phi.T)]\r\n ut_h = np.array(ut_h).reshape([1,h*2])\r\n e_t = np.dot( phi , ut_h.T )\r\n \r\n # 程式防呆,如果 sigma_t inverse 發散,則回傳有結構性斷裂。\r\n try: \r\n tau_h = np.dot(np.dot( e_t.T , np.linalg.inv(sigma_t) ) , e_t ) / k \r\n except: \r\n return Remain_p, Remain_A, Remain_ut, 1 \r\n else: \r\n if tau_h > float(f.ppf(0.99,k,n-k*p+1)):#tau_h > float(chi2.ppf(0.99,k)): \r\n return Remain_p, Remain_A, Remain_ut, 1 # 有結構性斷裂\r\n else: \r\n return Remain_p, Remain_A, Remain_ut, 0","sub_path":"origin/Matrix_function.py","file_name":"Matrix_function.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"143955122","text":"import torch\nimport torch.nn as nn\nfrom .Layers3D import *\n\ndef help(x):\n print(x.std(dim=2).mean())\n\n\nclass Hourglass3D(nn.Module):\n\t\"\"\"docstring for Hourglass3D\"\"\"\n\tdef __init__(self, nChannels = 128, numReductions = 4, nModules = 2, poolKernel = (1,2,2), poolStride = (1,2,2), upSampleKernel = 2, temporal=-1):\n\t\tsuper(Hourglass3D, self).__init__()\n\t\tself.numReductions = numReductions\n\t\tself.nModules = nModules\n\t\tself.nChannels = nChannels\n\t\tself.poolKernel = poolKernel\n\t\tself.poolStride = poolStride\n\t\tself.upSampleKernel = upSampleKernel\n\t\t\"\"\"\n\t\tFor the skip connection, a residual3D module (or sequence of residuaql modules)\n\t\t\"\"\"\n\n\t\t_skip = []\n\t\tfor i in range(self.nModules):\n\t\t\t_skip.append(Residual3D(self.nChannels, self.nChannels, temporal[0][i]))\n\n\t\tself.skip = nn.Sequential(*_skip)\n\t\t\n\t\t\"\"\"\n\t\tFirst pooling to go to smaller dimension then pass input through \n\t\tResidual Module or sequence of Modules then and subsequent cases:\n\t\t\teither pass through Hourglass3D of numReductions-1\n\t\t\tor pass through Residual3D Module or sequence of Modules\n\t\t\"\"\"\n\n\t\tself.mp = nn.MaxPool3d(self.poolKernel, self.poolStride)\n\t\t\n\t\t_afterpool = []\n\t\tfor i in range(self.nModules):\n\t\t\t_afterpool.append(Residual3D(self.nChannels, self.nChannels, temporal[1][i]))\n\n\t\tself.afterpool = nn.Sequential(*_afterpool)\t\n\n\t\tif (numReductions > 1):\n\t\t\tself.hg = Hourglass3D(self.nChannels, self.numReductions-1, self.nModules, self.poolKernel, self.poolStride, self.upSampleKernel, temporal[2])\n\t\telse:\n\t\t\t_num1res = []\n\t\t\tfor i in range(self.nModules):\n\t\t\t\t_num1res.append(Residual3D(self.nChannels,self.nChannels, temporal[2][i]))\n\t\t\t\n\t\t\tself.num1res = nn.Sequential(*_num1res) # doesnt seem that important ?\n\t\t\n\t\t\"\"\"\n\t\tNow another Residual3D Module or sequence of Residual3D Modules\n\t\t\"\"\"\n\t\t\n\t\t_lowres = []\n\t\tfor i in range(self.nModules):\n\t\t\t_lowres.append(Residual3D(self.nChannels,self.nChannels, temporal[3][i]))\n\n\t\tself.lowres = nn.Sequential(*_lowres)\n\n\t\t\"\"\"\n\t\tUpsampling Layer (Can we change this??????) \n\t\tAs per Newell's paper upsamping recommended\n\t\t\"\"\"\n\t\tself.up = nn.Upsample(scale_factor = self.upSampleKernel)\n\t\t\n\t\t\"\"\"\n\t\tIf temporal dimension is odd then after upsampling add a dimension temporally\n\t\tdoing this via 2 kernel 1D convolution with 1 padding along the temporal direction\n\t\t\"\"\"\n\t\t#self.addTemporal = nn.ReplicationPad3d((0,0,0,0,0,1))\n\n\tdef forward(self, input):\n\t\tout1 = input\n\t\t#help(out1)\n\t\tout1 = self.skip(out1)\n\t\t#print('skip %d'%(self.numReductions))\n\t\t#help(out1)\n\t\tout2 = input\n\t\t\n\t\tout2 = self.mp(out2)\n\t\t\n\t\tout2 = self.afterpool(out2)\n\t\t#print('out2 %d'%(self.numReductions))\n\t\t#help(out2)\n\t\tif self.numReductions>1:\n\t\t\tout2 = self.hg(out2)\n\t\telse:\n\t\t\tout2 = self.num1res(out2)\n\t\t#help(out2)\n\t\tout2 = self.lowres(out2)\n\t\t#help(out2)\t\n\t\t\n\t\tN,C,D,H,W = out2.size()\n\t\tout2 = out2.transpose(1,2).contiguous().view(N*D,C,H,W).contiguous()\n\t\tout2 = self.up(out2)\n\t\tN1,C1,H1,W1 = out2.size()\n\t\tout2 = out2.view(N,D,C1,H1,W1).contiguous().transpose(1,2).contiguous()\n\t\treturn out2 + out1\t\n","sub_path":"src/model/HourGlass3D.py","file_name":"HourGlass3D.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"284249177","text":"from node import Node\n\nclass OrderedList:\n\tdef __init__(self):\n\t\tself.head = None\n\n\tdef isEmpty(self):\n\t\treturn self.head == None\n\n\t# This method starts at the head of the linked list\n\t# and traverses through each node on the list, while\n\t# doing so it tallies a count each time it goes to another\n\t# node\n\tdef length(self):\n\t\tcurrent = self.head\n\t\tcount = 0\n\t\twhile current != None:\n\t\t\tcount += 1\n\t\t\tcurrent = current.getNext()\n\n\t\treturn count\n\n\tdef search(self, item):\n\t\tcurrent = self.head\n\t\tfound = False\n\t\tstop = False\n\t\twhile current != None and not found and not stop:\n\t\t\tif current.getData() == item:\n\t\t\t\tfound = True\n\t\t\telse:\n\t\t\t\tif current.getData() > item:\n\t\t\t\t\tstop = True\n\t\t\t\telse:\n\t\t\t\t\tcurrent = current.getNext()\n\t\treturn found\n\n\tdef add(self, item):\n\t\tcurrent = self.head\n\t\tprevious = None\n\t\tstop = False\n\t\twhile current != None and not stop:\n\t\t\tif current.getData() > item:\n\t\t\t\tstop = True\n\t\t\telse:\n\t\t\t\tprevious = current.getNext()\n\n\t\ttemp = Node(item)\n\t\tif previous == None:\n\t\t\ttemp = setNext(self.head)\n\t\t\tself.head = temp\n\t\telse:\n\t\t\ttemp.setNext(current)\n\t\t\tprevious.setNext(temp)\n\n","sub_path":"basic/orderedlist.py","file_name":"orderedlist.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"123001829","text":"from modules.interpreter import handle_input\nfrom modules.utils import get_time\n\n\nclass Assistant:\n\n def __init__(self):\n\n # modules/utils/get_time -> returns the actual time, already formated\n time = get_time()\n print(time)\n\n # the whole program operation\n self.main_loop()\n\n # get_input will only take the user's input\n # it doesn't handle it\n def get_input(self):\n\n # takes the raw user input\n user_input = input('$ ')\n\n # all the commands, with parameters, and etc ...\n commands = user_input.split(' ')\n\n # the main command, for identification purpouses\n command = commands[0]\n\n # returns a tuple\n return (command, commands)\n\n def main_loop(self):\n\n # the program will run, until the user exit it\n while (True):\n # get_input retuns a tuple\n main_command, commands = self.get_input()\n\n # which then returns a response, already formated\n response = handle_input(main_command, commands)\n\n # the response retuns False, if an exception is raised, or if the program ends\n if (response == False):\n break\n\n\nif __name__ == '__main__':\n Assistant()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"272796822","text":"#####################################################################################\n#\n# Copyright (c) Crossbar.io Technologies GmbH\n#\n# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.\n# you have purchased a commercial license), the license terms below apply.\n#\n# Should you enter into a separate license agreement after having received a copy of\n# this software, then the terms of such license agreement replace the terms below at\n# the time at which such license agreement becomes effective.\n#\n# In case a separate license agreement ends, and such agreement ends without being\n# replaced by another separate license agreement, the license terms below apply\n# from the time at which said agreement ends.\n#\n# LICENSE TERMS\n#\n# This program is free software: you can redistribute it and/or modify it under the\n# terms of the GNU Affero General Public License, version 3, as published by the\n# Free Software Foundation. This program is distributed in the hope that it will be\n# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See the GNU Affero General Public License Version 3 for more details.\n#\n# You should have received a copy of the GNU Affero General Public license along\n# with this program. If not, see .\n#\n#####################################################################################\n\nimport os\n\nfrom setuptools import setup, find_packages\n\n\n# read package description\nwith open('README.rst') as f:\n long_description = f.read()\n\n# read package version\nwith open('crossbar/_version.py') as f:\n exec(f.read()) # defines __version__\n\n# we read requirements from requirements*.txt files down below\ninstall_requires = []\nextras_require = {\n 'dev': []\n}\n\n# minimum, open-ended requirements\nreqs = 'requirements-min.txt'\n\n# pinned requirements\n#reqs = 'requirements-pinned.txt'\n\n# pinned & hashed requirements: we cannot use that here sadly,\n# as setuptools doesn't understand hashes ..\n# reqs = 'requirements.txt'\n\nwith open(reqs) as f:\n for line in f.read().splitlines():\n line = line.strip()\n if not line.startswith('#'):\n parts = line.strip().split(';')\n if len(parts) > 1:\n print('Warning: requirements line \"{}\" ignored, as it uses env markers, which are not supported in setuptools'.format(line))\n else:\n install_requires.append(parts)\n\nwith open('requirements-dev.txt') as f:\n for line in f.read().splitlines():\n extras_require['dev'].append(line.strip())\n\n# enforce use of CFFI for LMDB\nos.environ['LMDB_FORCE_CFFI'] = '1'\n\n# enforce use of bundled libsodium\nos.environ['SODIUM_INSTALL'] = 'bundled'\n\n# enforce use of pure Python py-ubjson (no Cython)\nos.environ['PYUBJSON_NO_EXTENSION'] = '1'\n\n# now actually call into setuptools ..\nsetup(\n name='crossbar',\n version=__version__,\n description='Crossbar.io multi-protocol (WAMP/WebSocket, REST/HTTP, MQTT) application router for microservices.',\n long_description=long_description,\n author='Crossbar.io Technologies GmbH',\n url='http://crossbar.io/',\n platforms=('Any'),\n license=\"AGPL3\",\n install_requires=install_requires,\n extras_require=extras_require,\n entry_points={\n # CLI entry function\n 'console_scripts': [\n 'crossbar = crossbar:run'\n ]\n },\n packages=find_packages(),\n include_package_data=True,\n data_files=[('.', ['crossbar/LEGAL', 'crossbar/LICENSE', 'crossbar/LICENSE-FOR-API', 'crossbar/LICENSES-OSS'])],\n zip_safe=False,\n\n # http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\"License :: OSI Approved :: GNU Affero General Public License v3\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: No Input/Output (Daemon)\",\n \"Environment :: Console\",\n \"Framework :: Twisted\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP :: HTTP Servers\",\n \"Topic :: Communications\",\n \"Topic :: Database\",\n \"Topic :: Home Automation\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Embedded Systems\",\n \"Topic :: Software Development :: Object Brokering\",\n \"Topic :: System :: Distributed Computing\",\n \"Topic :: System :: Networking\"],\n keywords='crossbar router autobahn autobahn.ws websocket realtime rfc6455 wamp rpc pubsub oracle postgres postgresql'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"641440889","text":"def dna_to_rna(string):\n # note that this function does not return the rna complement for a dna sting\n # this just transforms all 'T' in the dna to 'U'\n\n rna = ''\n\n for i in range(len(string)):\n if string[i] == 'T':\n rna = rna + 'U'\n else:\n rna = rna + string[i]\n\n return rna\n","sub_path":"Rosalind/RNA.py","file_name":"RNA.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"327488883","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n @Time : 2020/06/21\n @Auth : 晨光\n @File : 显示等待练习.py\n @IDE : PyCharm\n @Email: 624011110@qq.com\n-------------------------------------------------\n\"\"\"\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium import webdriver\n\n'''\n1, 请指出强制等待,隐性等待和显性等待的区别。\n强制等待:固定等待时间,不管元素是否提前加载完成,都需要等待固定的时间,\n 且需要在每处需要添加等待的地方,单独添加等待命令行!\n隐性等待:是全局的,只需要设置一次,所有元素都有固定的等待时间,如果在固定时间前加载完成,\n 会提前结束等待时间。如果超出固定等待时间,就会报错,但隐式等待只能用来等待定位元素被加载,元素被发现。\n显性等待:可以完成多种等待条件,如等待元素可见、等待元素可以被点击、等待窗口打开等等。只是显示等待实现步骤较前面两个,有点复杂。\n\n'''\n\ndriver = webdriver.Chrome()\n\ndriver.implicitly_wait(30)\n\nurl = 'http://www.baidu.com'\n\ndriver.get(url)\n\n# e = driver.find_element('id', 'kw')\n# 等待输入框显示\nwait = WebDriverWait(driver, timeout=5)\nlocator = ('id', 'kw')\ne = wait.until(expected_conditions.visibility_of_element_located(locator))\n\n# 输入柠檬班\ne.send_keys('柠檬班')\n\n# 等待百度一下按钮可以点击\nwait = WebDriverWait(driver, timeout=5)\nlocator = ('id', 'su')\nf = wait.until(expected_conditions.element_to_be_clickable(locator))\n\n# 点击按钮\nf.click()\n\n# 等待元素加载完成\nwait = WebDriverWait(driver,timeout=5)\nlocator = ('partial link text', '柠檬班腾讯课堂')\ne = wait.until(expected_conditions.visibility_of_element_located(locator))\n\n# 柠檬班腾讯课堂超连接\ne.click()\n\n\n\n\n\n","sub_path":"WebClass/web_06_复杂元素定位和等待/作业/显示等待练习.py","file_name":"显示等待练习.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"601624780","text":"import math\nfrom PyQt4 import QtGui\nfrom networkAttr import networkAttr\n\n\n# Class for VC information tables\n# Presents buffer information in the form of tables\n# Shows flits in buffer and their relevant values\nclass BuffInfo(QtGui.QWidget):\n def __init__(self):\n super(BuffInfo, self).__init__()\n # Core Information\n self.current_core = None\n self.current_vn = None\n self.top_table = None\n self.bottom_table = None\n self.cur_buffer_index = None\n\n # Sets up the vc table headers and other attributes\n def setup_vc_tables(self, table, table_loc):\n if networkAttr.CORE_VCS > 4 and table_loc is \"Top\":\n self.top_table = table\n vc_range = math.ceil(networkAttr.CORE_VCS / 2)\n vc_start = 0\n vc_end = vc_range\n elif networkAttr.CORE_VCS > 4 and table_loc is \"Bottom\":\n self.bottom_table = table\n vc_range = math.floor(networkAttr.CORE_VCS / 2)\n vc_start = vc_range\n vc_end = networkAttr.CORE_VCS\n else:\n vc_range = networkAttr.CORE_VCS\n vc_start = 0\n vc_end = networkAttr.CORE_VCS\n\n table.setColumnCount(vc_range)\n table.setRowCount(5)\n\n item = QtGui.QTableWidgetItem()\n table.setVerticalHeaderItem(0, item)\n item = QtGui.QTableWidgetItem()\n table.setVerticalHeaderItem(1, item)\n item = QtGui.QTableWidgetItem()\n table.setVerticalHeaderItem(2, item)\n item = QtGui.QTableWidgetItem()\n table.setVerticalHeaderItem(3, item)\n item = QtGui.QTableWidgetItem()\n table.setVerticalHeaderItem(4, item)\n item = table.verticalHeaderItem(0)\n item.setText(\"Flit Id\")\n item = table.verticalHeaderItem(1)\n item.setText(\"Flit Type\")\n item = table.verticalHeaderItem(2)\n item.setText(\"Flit Route\")\n item = table.verticalHeaderItem(3)\n item.setText(\"Flit Outport\")\n item = table.verticalHeaderItem(4)\n item.setText(\"Flit Src Delay\")\n\n for index, vc_num in enumerate(range(vc_start, vc_end)):\n item = QtGui.QTableWidgetItem()\n table.setHorizontalHeaderItem(index, item)\n item = table.horizontalHeaderItem(index)\n item.setText(\"VC \" + str(vc_num))\n\n # Updates the table for a change in cycle, core, buffer, or vn\n def update_tables(self, vn, core, buffer_index):\n self.current_vn = vn\n self.cur_buffer_index = buffer_index\n self.current_core = core\n # clears the table before each change\n self.clear_tables()\n buffer = self.current_core.buffers[self.cur_buffer_index]\n for flit in buffer.flits:\n flit_table_entries = self.setup_flit_table_items(flit)\n if flit.vc < self.top_table.columnCount():\n for row, entry in enumerate(flit_table_entries):\n self.top_table.setItem(row, flit.vc, entry)\n else:\n for row, entry in enumerate(flit_table_entries):\n self.bottom_table.setItem(row, flit.vc - self.top_table.columnCount(), entry)\n self.update()\n\n # creates table objects for each flit value for input into the table\n @staticmethod\n def setup_flit_table_items(flit):\n flit_qwidgets = []\n # flit Id\n flit_qwidgets.append(QtGui.QTableWidgetItem(str(flit.id)))\n # Flit Type\n flit_qwidgets.append(QtGui.QTableWidgetItem(flit.type))\n # flit Route\n string = str(flit.src) + \" -> \" + str(flit.dest)\n flit_qwidgets.append(QtGui.QTableWidgetItem(string))\n # Flit Outport\n flit_qwidgets.append(QtGui.QTableWidgetItem(flit.outport))\n # Flit Source Delay\n flit_qwidgets.append(QtGui.QTableWidgetItem(str(flit.src_delay)))\n return flit_qwidgets\n\n # sets the top table for the combined table widget\n def set_top_table(self, top_table):\n self.top_table = top_table\n\n # sets the bottom table for the combined table widget\n def set_bottom_table(self, bottom_table):\n self.bottom_table = bottom_table\n\n # clears the table of old information\n def clear_tables(self):\n if self.top_table is not None:\n self.top_table.clearContents()\n if self.bottom_table is not None:\n self.bottom_table.clearContents()\n","sub_path":"BuffInfo.py","file_name":"BuffInfo.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"245110831","text":"from django.core.cache import cache\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom upday.authentication.expire_token_authentication import ExpiringTokenAuthentication\nfrom upday.authentication.student_authentication import StudentAuthentication\nfrom upday.modules.common.service.cache_service import CacheService\nfrom upday.modules.common.service.encrypt_service import EncryptService\nfrom upday.modules.common.service.key_service import KeyService\nfrom upday.modules.common.service.sort_service import SortService\nfrom upday.modules.component.serializer.component_serializer import ComponentSerializer\nfrom upday.modules.component.service.component_service import ComponentService\nfrom upday.modules.question.serializer.question_serializer import PreviewValidator, DeleteSerializer, \\\n ChoiceSerializer, BackwardSerializer, ListValidator, ListSerializer, InsertValidator, \\\n InsertComponentValidator, ShowValidator, QuestionMaterialSerializer, PreemptionValidator, AnswerSerializer, \\\n SaveValidator\nfrom upday.modules.question.service.question_service import QuestionService\nfrom upday.permission.basic_permission import PermissionHub, BasicPermission\nfrom upday.permission.team_permission import TeamPermission\n\nquestion_handler = QuestionService()\n\n\nclass InsertView(APIView):\n \"\"\"\n 插入练习题\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n validator = InsertValidator(data=request.data, context={'request': request})\n validator.is_valid(raise_exception=True)\n practice = validator.validated_data['practice']\n # 获取该练习下的所有练习题\n question_set = question_handler.get_question_set(practice)\n # 创建练习题\n question = question_handler.create(validator.validated_data)\n # 需要插入练习题的位置\n index = validator.validated_data['index']\n question = SortService().insert(question_set, question, index)\n # 获取插入后该question的position\n index = question.position\n encrypt_handler = EncryptService()\n question_id = encrypt_handler.encrypt_id('HASH_KEY_QUESTION_ID', question.id)\n # 创建问题的同时直接创建该问题的标准答案\n answer = question_handler.create_standard_answer(question)\n answer_id = encrypt_handler.encrypt_id('HASH_KEY_ANSWER_ID', answer.id)\n # 清缓存\n CacheService().clear_pratice_cache(practice.id)\n\n return Response(\n data={'result': 'Success', 'index': index, 'question_id': question_id, 'answer_id': answer_id},\n content_type='application/json'\n )\n\n\nclass PreviewListView(APIView):\n \"\"\"\n (后台)获取练习题及相应讲解ID列表\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n validator = ListValidator(data=request.data, context={'request': request})\n validator.is_valid(raise_exception=True)\n practice = validator.validated_data['practice']\n question_set = question_handler.get_question_set(practice)\n # 序列化并展现question_count,question_id,position,tip_id\n serializer = ListSerializer(question_set, many=True)\n\n return Response(\n data={'result': 'Success', 'question_list': serializer.data},\n content_type='application/json'\n )\n\n\nclass PreviewView(APIView):\n \"\"\"\n (后台)题目详情\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n validator = PreviewValidator(data=request.data, context={'request': request})\n validator.is_valid(raise_exception=True)\n question = validator.validated_data['question']\n # 展现该题所在的位置\n index = validator.validated_data['index']\n # 如果该题是选择题,将会获得选项列表,否则获得一个空字符串\n choice_set = validator.validated_data['choice_set']\n choice_serializer = ChoiceSerializer(choice_set, many=True)\n # 获得该练习题的所有组件内容,并排序\n component_set = question_handler.get_component_set(question)\n question_component_serializer = ComponentSerializer(component_set, many=True)\n # 获取该题的标准答案\n standard_answer = question.answer_set.filter(standard=1)[0]\n standard_answer_serializer = AnswerSerializer(standard_answer)\n return Response(\n data={'result': 'Success', 'index': index, 'component_list': question_component_serializer.data,\n 'choice_list': choice_serializer.data, 'standard_answer': standard_answer_serializer.data},\n content_type='application/json'\n )\n\n\nclass ShowView(APIView):\n \"\"\"\n (移动端)获取今日所有习题素材\n \"\"\"\n authentication_classes = (StudentAuthentication,)\n permission_classes = (BasicPermission,)\n\n def post(self, request, *args, **kwargs):\n validator = ShowValidator(data=request.data, context={'request': request})\n validator.is_valid(raise_exception=True)\n practice = validator.validated_data['practice']\n key_handler = KeyService()\n key = key_handler.get_pratice_material_key(practice.id)\n value = cache.get(key)\n if value:\n question_array = value\n else:\n question_set = question_handler.get_question_set(practice)\n serializer = QuestionMaterialSerializer(question_set, many=True)\n question_array = serializer.data\n cache.set(key, question_array, timeout=3600)\n\n return Response(\n data={'result': 'Success', 'question_array': question_array},\n content_type='application/json'\n )\n\n\nclass PreemptionView(APIView):\n \"\"\"\n 抢先体验\n \"\"\"\n authentication_classes = (StudentAuthentication,)\n permission_classes = (BasicPermission,)\n\n def post(self, request, *args, **kwargs):\n validator = PreemptionValidator(data=request.data, context={'request': request})\n validator.is_valid(raise_exception=True)\n practice = validator.validated_data['practice']\n key_handler = KeyService()\n key = key_handler.get_pratice_material_key(practice.id)\n value = cache.get(key)\n if value:\n question_array = value\n else:\n question_set = question_handler.get_question_set(practice)\n serializer = QuestionMaterialSerializer(question_set, many=True)\n question_array = serializer.data\n cache.set(key, question_array, timeout=3600)\n\n return Response(\n data={'result': 'Success', 'question_array': question_array},\n content_type='application/json'\n )\n\n\nclass DeleteView(APIView):\n \"\"\"\n 删除练习题\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n serializer = DeleteSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n practice = serializer.validated_data['practice']\n # 获取该练习下的所有练习题\n question_set = question_handler.get_question_set(practice)\n # 需要插入练习题的位置\n question = serializer.validated_data['question']\n SortService().delete(question_set, question)\n # 清缓存\n CacheService().clear_pratice_cache(practice.id)\n\n return Response(\n data={'result': 'Success'},\n content_type='application/json'\n )\n\n\nclass SaveView(APIView):\n \"\"\"\n 保存练习题\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n validator = SaveValidator(data=request.data, context={'request': request})\n validator.is_valid(raise_exception=True)\n question = validator.validated_data['question']\n question.status = 1\n question.save()\n\n return Response(\n data={'result': 'Success'},\n content_type='application/json'\n )\n\n\n###############################################################################################################\n# ‘练习题’排序\n###############################################################################################################\nclass MoveQuestionBackwardView(APIView):\n \"\"\"\n 后移练习题\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n serializer = BackwardSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n practice = serializer.validated_data['practice']\n question_set = question_handler.get_question_set(practice)\n question = serializer.validated_data['question']\n # 右移(后移)练习题\n sort_handler = SortService()\n question = sort_handler.move_backward(question_set, question)\n index = question.position\n # 清缓存\n CacheService().clear_pratice_cache(practice.id)\n\n return Response(\n data={'result': 'Success', 'index': index},\n content_type='application/json'\n )\n\n\nclass MoveQuestionForwardView(APIView):\n \"\"\"\n 左移练习题,将question前移\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n serializer = BackwardSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n practice = serializer.validated_data['practice']\n question_set = question_handler.get_question_set(practice)\n question = serializer.validated_data['question']\n # 左移(前移)练习题\n sort_handler = SortService()\n question = sort_handler.move_forward(question_set, question)\n index = question.position\n # 清缓存\n CacheService().clear_pratice_cache(practice.id)\n\n return Response(\n data={'result': 'Success', 'index': index},\n content_type='application/json'\n )\n\n\n###############################################################################################################\n# ‘组件’\n###############################################################################################################\n\n\nclass InsertComponentView(APIView):\n \"\"\"\n 插入组件\n \"\"\"\n authentication_classes = (ExpiringTokenAuthentication,)\n permission_classes = (TeamPermission,)\n permission_code = PermissionHub.PERMISSION_ALTER_PROJECT_CONTENT\n\n def post(self, request, *args, **kwargs):\n validator = InsertComponentValidator(data=request.data, context={'request': request})\n validator.is_valid(raise_exception=True)\n question = validator.validated_data['question']\n index = validator.validated_data['index']\n type = validator.validated_data['type']\n component_handler = ComponentService()\n # 插入组件\n component = component_handler.insert_component(question=question, index=index, type=type)\n component_id = EncryptService().encrypt_id('HASH_KEY_COMPONENT_ID', component.id)\n index = component.position\n # 清缓存\n practice = question.practice\n CacheService().clear_pratice_cache(practice.id)\n\n return Response(\n data={'result': 'Success', 'index': index, 'component_id': component_id},\n content_type='application/json'\n )\n","sub_path":"upday/modules/question/views/question_views.py","file_name":"question_views.py","file_ext":"py","file_size_in_byte":12526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"284078161","text":"# Solution 1 - Working - fast rank - #1\n\n# def twoSum(nums: [], target: int): \n # r = dict()\n # for i in range(len(nums)):\n # if target-nums[i] in r:\n # return [r[target-nums[i]],i]\n # else:\n # r[nums[i]]=i\n # return r\n# print(twoSum(nums=[0, 4, 3, 0], target=0))\n\n# Solution 2 - Working - fast rank - #2\ndef twoSum(nums: [], target: int): \n r = []\n for i in range(len(nums)):\n temp = target - nums[i]\n if temp in r:\n return [nums.index(temp), i]\n r.append(nums[i])\n\nprint(twoSum(nums=[0, 4, 3, 0], target=0))\n\n# Solution 3 - Not Working\n\n# def twoSum(nums: [], target: int): \n# value=0\n# index=0\n# for i in range(len(nums)):\n# if(nums[i]<=target):\n# value=target-nums[i]\n# return value\n# if value in nums[i+1:]:\n# index= nums[i+1:].index(value)\n# return index\n# if(index>=0) and (index!=i):\n# return [i,index]\n\n# print(twoSum(nums=[0, 4, 3, 0], target=0))\n","sub_path":"Leetcode/Easy/2Sum.py","file_name":"2Sum.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"179419428","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render, render_to_response, redirect\nfrom .models import BlogPost, BlogComment\nfrom forms import BlogCommentForm\n\ndef PostList (request):\n list = {}\n list['all'] = BlogPost.objects.all()\n return render_to_response('blog/list.html', list)\n\n\ndef PostDetail (request, post_id):\n if request.method == 'POST':\n form = BlogCommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = BlogPost.objects.get(id = post_id)\n comment.save()\n return redirect('/blog/%s' % post_id)\n else:\n form = BlogCommentForm()\n blogpost = BlogPost.objects.get(id = post_id)\n blogcomment = BlogComment.objects.filter(post_id = post_id)\n return render (request, 'blog/post.html', {'blogpost' : BlogPost.objects.get(id = post_id), 'blogcomment':BlogComment.objects.filter(post_id = post_id), 'form': form})\n\n\n\n\n","sub_path":"project_blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"443756555","text":"from .functions.download.local import local_downloader\nfrom .functions.download.googledisk import googledisk_download\nfrom .functions.preprocessing.preprocessing import preprocessing\nfrom .functions.attributes.attributes import attributes\nfrom .functions.combinations.combinations import combinations\nfrom .functions.gridsearch.gridsearch import grid_search\n\n\ndef pipeline():\n df = local_downloader()\n print(\"Downloader from local directory was finished\")\n df = googledisk_download()\n print(\"Downloader from googledisk was finished\")\n df = preprocessing()\n print(\"Preprocessing was finished\")\n df = attributes()\n print(\"Adding attributes was finished\")\n arr = combinations()\n print(\"Creation of combinations was finished\")\n res = grid_search()\n print(\"Gridsearch was finished\")\n\n\nif __name__ == '__main__':\n pipeline()\n","sub_path":"src/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"534686050","text":"\"\"\"Define tests for authentication.\"\"\"\nimport json\n\nimport pytest\n\nimport pyatmo\n\n\ndef test_ClientAuth(auth):\n assert auth.accessToken == (\n \"91763b24c43d3e344f424e8b|880b55a08c758e87ff8755a00c6b8a12\"\n )\n\n\ndef test_ClientAuth_invalid(requests_mock):\n with open(\"fixtures/invalid_grant.json\") as f:\n json_fixture = json.load(f)\n requests_mock.post(\n pyatmo._AUTH_REQ,\n json=json_fixture,\n headers={\"content-type\": \"application/json\"},\n )\n with pytest.raises(pyatmo.NoDevice):\n pyatmo.ClientAuth(\n clientId=\"CLIENT_ID\",\n clientSecret=\"CLIENT_SECRET\",\n username=\"USERNAME\",\n password=\"PASSWORD\",\n )\n","sub_path":"tests/test_pyatmo.py","file_name":"test_pyatmo.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"569615063","text":"from asn1crypto._ffi import null\nfrom flask import Flask, render_template, request, session, redirect, url_for, flash\nfrom pymongo import MongoClient\nfrom flask_paginate import Pagination, get_page_args\nfrom bson.objectid import ObjectId\nfrom bson.json_util import dumps,loads\n\napp = Flask(__name__)\nclient = MongoClient('mongodb+srv://nambn007:nambn007@cluster0.oki5a.mongodb.net/RealEstate?retryWrites=true&w=majority')\napp.secret_key = 'super secret key'\ndb = client.RealEstate\nbatdongsan = db.RealEstateRaw\nusers = db.users\nwishlist = db.wishlist\n\n# def get_users(offset=0, per_page=10):\n# return users[offset: offset + per_page]\n\n@app.route('/')\ndef home():\n if 'user' in session:\n return render_template('home.html', user = loads(session['user']))\n else:\n return render_template('home.html')\n@app.route('/testData', methods = ['GET'])\ndef test():\n items = batdongsan.find({})\n return render_template('1.html', items = items)\n\n@app.route('/dangki', methods = ['POST', 'GET'])\ndef register():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n password = request.form['password']\n user = {\n \"name\" : name,\n \"email\" : email,\n \"password\" : password\n }\n users.insert_one(user)\n flash('Đăng kí thành công.Vui lòng đăng nhập')\n return redirect(url_for('signIn'))\n else:\n return render_template('register.html')\n\n@app.route('/dangnhap', methods = ['POST', 'GET'])\ndef signIn():\n if request.method == 'POST' :\n email = request.form['email']\n password = request.form['password']\n allUsers = users.find()\n for user in allUsers:\n if (user['email'] == email and user['password'] == password):\n session['user'] = dumps(user)\n break\n if ('user' in session):\n return render_template('home.html', user = loads(session['user']))\n else :\n flash('Email hoặc password không chính xác')\n return render_template('signin.html')\n else :\n return render_template('signin.html')\n\n@app.route('/dangxuat', methods = ['GET'])\ndef logOut():\n session.pop('user')\n return render_template('home.html')\n\n# @app.route('/xemTin', methods = ['GET', 'POST'])\n# def viewPost():\n# if 'user' in session:\n# sq_from = 0\n# sq_to = 10000\n# pr_from = 0\n# pr_to = 10000\n# if request.args.get('sq_from'):\n# sq_from = request.args.get('sq_from')\n# if request.args.get('sq_to'):\n# sq_to = request.args.get('sq_to')\n# if request.args.get('pr_from'):\n# pr_from = request.args.get('pr_from')\n# if request.args.get('pr_to'):\n# pr_to = request.args.get('pr_to')\n# print(sq_from)\n# print(sq_to)\n# print(pr_from)\n# print(pr_to)\n# bds = batdongsan.find({\"$and\" : [{\"square\" : {\"$lt\": float(sq_to)}}, {\"square\" : {\"$gt\": float(sq_from)}}, {\"price\" : {\"$lt\": float(pr_to)}}, {\"price\" : {\"$gt\": float(pr_from)}}]}).limit(20)\n# listPostWish = wishlist.find({\"user_id\": str(loads(session['user'])['_id'])})\n# idPostWish = []\n# for l in listPostWish:\n# idPostWish.append(ObjectId(l['post_id']))\n# return render_template('listHouse.html', bds = bds, user=loads(session['user']), idPostWish=idPostWish)\n# else :\n# return render_template('signin.html')\n\n@app.route('/xemTin', methods = ['GET', 'POST'])\ndef viewPost():\n if 'user' in session:\n sq_from = 0\n sq_to = 10000\n pr_from = 0\n pr_to = 10000\n if request.args.get('sq_from'):\n sq_from = request.args.get('sq_from')\n if request.args.get('sq_to'):\n sq_to = request.args.get('sq_to')\n if request.args.get('pr_from'):\n pr_from = request.args.get('pr_from')\n if request.args.get('pr_to'):\n pr_to = request.args.get('pr_to')\n page, per_page, offset = get_page_args(page_parameter='page',\n per_page_parameter='per_page')\n bds = batdongsan.find({\"$and\" : [{\"square\" : {\"$lt\": float(sq_to)}}, {\"square\" : {\"$gt\": float(sq_from)}}, {\"price\" : {\"$lt\": float(pr_to)}}, {\"price\" : {\"$gt\": float(pr_from)}}]})\n bds = list(bds)\n total = len(bds)\n bds = bds[offset : offset + per_page]\n pagination = Pagination(page=page, per_page=per_page, total=total,\n css_framework='bootstrap4')\n listPostWish = wishlist.find({\"user_id\": str(loads(session['user'])['_id'])})\n idPostWish = []\n for l in listPostWish:\n idPostWish.append(ObjectId(l['post_id']))\n return render_template('listHouse.html',user=loads(session['user']), idPostWish=idPostWish, bds=bds,\n page=page,\n per_page=per_page,\n pagination=pagination)\n else :\n return render_template('signin.html')\n\n\n@app.route('/add-wishlist/', methods = ['POST', 'GET'])\ndef addWishList(post_id) :\n user_id = str(loads(session['user'])['_id'])\n item = {\n \"post_id\" : post_id,\n \"user_id\" : user_id\n }\n wishlist.insert_one(item)\n flash('Thêm thành công')\n return redirect(url_for('viewPost'))\n\n@app.route('/delete-wishlist/', methods = ['POST', 'GET'])\ndef deleteWishList(post_id) :\n x = wishlist.delete_one({\"post_id\": post_id} and {\"user_id\": str(loads(session['user'])['_id'])})\n flash('Xóa thành công')\n return redirect(url_for('viewWishList'))\n\n@app.route('/danh-sach-yeu-thich')\ndef viewWishList():\n if 'user' in session :\n list = wishlist.find({ 'user_id': str(loads(session['user'])['_id'])})\n posts = []\n id = []\n for l in list:\n post = batdongsan.find_one({\"_id\" : ObjectId(l['post_id'])})\n posts.append(post)\n id.append(str(ObjectId(l['post_id'])))\n return render_template('wishList.html', user=loads(session['user']), list = posts,ids = id, index=0 )\n else :\n return render_template('signin.html')\n\n@app.route('/xem-chi-tiet/')\ndef viewDetail(id):\n if 'user' in session:\n data = batdongsan.find_one( { '_id' : ObjectId(id)} )\n # api get tin lien quan\n return render_template('detail.html', user=loads(session['user']), data = data)\n else:\n return render_template('signin.html')\n\n# search text\n@app.route('/search', methods=['GET'])\ndef searchPost():\n if 'user' in session:\n keyword = ''\n if request.args.get('keyword'):\n keyword = request.args.get('keyword')\n bds = {}\n listPostWish = wishlist.find({\"user_id\": str(loads(session['user'])['_id'])})\n idPostWish = []\n for l in listPostWish:\n idPostWish.append(ObjectId(l['post_id']))\n return render_template('listHouse.html', bds=bds, user=loads(session['user']), idPostWish=idPostWish)\n else:\n return render_template('signin.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"624562739","text":"import pandas as pd\n\"\"\"vilbert=pd.read_csv('st_surrey_vilbert_big_segments.csv',names=['index','scores'])\n\n\n\n\n\ntext=pd.read_csv('new_surrey_results_ismail.csv')\n\n\n\npicsom=pd.read_csv('picsom_big_segments.csv',names=['index','scores'])\nprint(picsom['scores'])\n\n\n\nensemble_score=0.5*picsom['scores']+ 0.2* vilbert['scores']+0.3*text['results_st']\n\"\"\"\ndf=pd.read_csv('Surrey_bigsegments_notsorted _SU.csv')\n#df['ensemble_score']=ensemble_score\n#df['vilbert_score']=vilbert['scores']\n#df['text_score']=text['results_st']\n#df['picsom_score']=picsom['scores']\n#df.to_csv('captions_clean.csv')\ndf=df.sort_values(by='ensemble_score', ascending=False)\ndf.to_csv('Surrey_bigsegments_sorted_SU.csv')\nprint(df)\n","sub_path":"Surrey/ensemble_score.py","file_name":"ensemble_score.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"502704338","text":"# A constans for our font types\nLARGE_FONT= (\"Verdana\", 12)\nEXTRA_LARGE_FONT =(\"Verdana\",20)\n# end\n\n# Import tkinter library to show MessageBox\nfrom tkinter import messagebox\n# end\n\n# Imported library for making GUI\nimport tkinter as eMenu\nfrom tkinter import *\n# end\n\n# Importing library to connect to serwer to send orders\nimport socket\nimport time\n# end \n\ndef sendOrder(self,textSummaryOrder,bill,nextMealsNames,nextMealsPrices,s):\n\n billInt = int (bill[0])\n\n if billInt == 0:\n\n textSummaryOrder.delete(1.0,END)\n messagebox.showinfo(\"Powiadomienie\",\"Nie wysłano pustego zamówienia !\")\n\n print(\"\\n Nie wysłano zamównienia, bowiem jest puste \\n\")\n \n else:\n textSummaryOrder.delete(1.0,END)\n\n message = \"\"\n \n for i in range(0,len(nextMealsNames)):\n message += \";\"\n message += nextMealsNames[i-1]\n message += \",\"\n message += nextMealsPrices[i-1]\n \n mainMessage = \"\\u0044\\u0001PCAPPK1\"\n mainMessage += message\n\n print(mainMessage)\n \n lengthOfMessage = len(mainMessage.encode('utf-8'))\n lengthOfMessageInt = int(lengthOfMessage)\n\n\n print(\"Dlugosc wiadomosci : \",lengthOfMessageInt)\n\n bytes1 = bytes( [lengthOfMessageInt] )\n bytes2 = mainMessage.encode('utf-8')\n allbytes = bytes1 + bytes2\n\n time.sleep(2)\n\n s.send(allbytes)\n from_server = s.recv(2)\n print(\"\\nOdpowiedz serwera :\",from_server,\"\\n\")\n\n print(\"Wysłano zamówienie \\n\")\n \n messagebox.showinfo(\"Powiadomienie\",\"Udało się wysłać zamówienie !\")\n\n bill[0]=0.00\n nextMealsNames.clear()\n nextMealsPrices.clear()\n","sub_path":"functions/sendOrder.py","file_name":"sendOrder.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"156510335","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport pytest\nimport subprocess\nfrom application.cache import CacheMgr\nfrom application.fibonacci import Fibonacci\nimport time\n\n#sys.path.append(os.path.join(os.path.dirname(__file__), '../app'))\n#sys.path.append('../app')\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef startmemcache(request):\n proc = subprocess.Popen(\"/usr/bin/memcached\")\n request.addfinalizer(proc.kill)\n time.sleep(3)\n\n@pytest.fixture(scope=\"function\")\ndef cache_mgr(request):\n cache_mgr = CacheMgr()\n def teardown():\n cache_mgr.tearDown() \n request.addfinalizer(teardown)\n\n return cache_mgr\n\ndef test_CacheMgr(cache_mgr):\n \"\"\" Test the get and set funcion \"\"\"\n cache_mgr.set('key1', 'value1')\n val = cache_mgr.get('key1')\n assert val == 'value1'\n\n cache_mgr.set('key1', 'hello_world')\n val = cache_mgr.get('key1')\n assert val == 'hello_world'\n\n \"\"\" Test a key that is not the cache \"\"\"\n val = cache_mgr.get('miss_key')\n assert val == None\n\ndef test_Fibonacci_get_number_from_cache(cache_mgr):\n \"\"\" Test the get_number function with valid parameter \"\"\"\n cache_mgr.set(str(0), '0')\n cache_mgr.set(str(1), '1')\n cache_mgr.set(str(2), '1')\n cache_mgr.set(str(3), '2')\n cache_mgr.set(str(4), '3')\n cache_mgr.set(str(5), '5')\n cache_mgr.set(str(6), '8')\n\n fibo = Fibonacci(cache_mgr, 6)\n # generate the fibonacci number for the first time\n y = fibo.get_number(4)\n assert y == 3\n\n # get the fibonacci number from cache\n y = fibo.get_number(7)\n assert y == 13\n","sub_path":"test/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"352740610","text":"import requests\n\n# Referências sobre o uso do requests:\n#\n# Fazendo requisições:\n# https://docs.python-requests.org/en/master/user/quickstart/#make-a-request\n# Usando JSON retornado:\n# https://docs.python-requests.org/en/master/user/quickstart/#json-response-content\n\ndef version_exists(package_name, version):\n try:\n url = f'https://pypi.org/pypi/{package_name}/{version}/json'\n return requests.get(url).status_code == requests.codes.ok\n except:\n return False\n\ndef latest_version(package_name):\n try:\n url = f'https://pypi.org/pypi/{package_name}/json'\n r = requests.get(url)\n if(r.status_code == requests.codes.ok):\n return list(r.json()[\"releases\"].keys())[-1]\n else:\n return None\n except:\n return None\n","sub_path":"api/pypi.py","file_name":"pypi.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"505995282","text":"# von Stefan Maibücher, Handelsblatt Fachmedien\n# Skript, das aus einer Onix Datei alle Coverbilder lokalisiert, diese runterlädt und ablegt.\n# Die Bilder werden im cover Unterordner mit dem Namen <>.png abgelegt\n\n\n'''\nUSAGE: python onix-png-download.py <>.xml\n\nFalls es zu Fehlern bei der Verbindung kommt, muss der Proxy in der Konsole gesetzt werden:\n\nset http_proxy=proxy.vhb.de:80\nset https_proxy=proxy.vhb.de:80\n\n'''\n\nimport os\nimport sys\nfrom builtins import len\nfrom lxml import etree\nimport requests\n\n\n# Das Skript erwartet als Kommandozeilenparamter die ESV XML Datei\nfilename = sys.argv[1]\n\nprint(filename)\n\nesv_doc = etree.parse(filename)\n\nnamespaces={'ns': 'http://www.editeur.org/onix/2.1/short'}\n\n# XPATH Ausdruck, der alle PNG Bilder lokalisiert:\nproducts_images = esv_doc.xpath('/*/ns:product/ns:mediafile/ns:f117/text()', namespaces=namespaces)\n\n#print(products_images, len(products_images)) # zum Testen\n\n# Verzeichnis für Bilder erstellen, falls nicht vorhanden:\ndirectory = 'cover'\nif not os.path.exists(directory):\n os.makedirs(directory)\n\nfor pi in products_images:\n # Die ISBN Nummer ist immer an derselben Stelle in der URL zu finden:\n image_name= 'cover/'+pi.split(\"/\")[-3]+\".png\"\n r = requests.get(pi, stream=True)\n with open(image_name, 'wb') as f:\n for chunk in r.iter_content():\n f.write(chunk)\n print(\"Datei: \" + image_name + \" geschrieben.\")\n\nprint(\"Alle Bilder runtergeladen und abgelegt.\")\n\n\n","sub_path":"onix-png-download.py","file_name":"onix-png-download.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514738798","text":"\"\"\"\n*************基于Flask快速构建RestFul API*************\nGithub: https://github.com/connonhc/MiniFlask\n\"\"\"\nfrom flask import request\nfrom flask import g\nfrom config.flask_config import FlaskConfig\nfrom utils.app_manager import AppManager\nfrom utils.response_manager import ResponseManager\nfrom utils.response_manager import ResponseCode\nfrom utils.request_manager import RequestManager\n\n\napp = AppManager.create_app()\n\n\n@app.before_request\ndef app_before_request():\n g.response_manager = ResponseManager()\n g.request_manager = RequestManager(request)\n\n\n@app.teardown_request\ndef app_teardown_request(error):\n del g.response_manager\n if error is not None:\n print(\"Error {}\".format(error))\n\n\n@app.errorhandler(Exception)\ndef app_error_handler(error):\n g.response_manager.set_status_code(ResponseCode.INTERNAL_ERROR)\n if FlaskConfig.DEBUG:\n print(error)\n return g.response_manager.data_response(error=error)\n\n\nif __name__ == '__main__':\n # if platform.node() == \"LXY\":\n app.run(\n host='0.0.0.0',\n port=80,\n debug=True\n )\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"266957679","text":"import numpy as np\nimport scipy.linalg\n\n\n# Using Scipy lib for the QR factorization\ndef qr_scipy_example():\n A = np.array([[12, -51, 4], [6, 167, -68], [-4, 24, -41]]) # From the Wikipedia Article on QR Decomposition\n Q, R = scipy.linalg.qr(A)\n print('scipy q: \\n', Q)\n print('scipy r: \\n', R)\n\n\n# My implementation of some methods, just to get used to NumPy\ndef transpose_matrix(M):\n return np.transpose(M)\n\n\ndef normalize_vector(u):\n return np.linalg.norm(u)\n\n\ndef multiply_matrix(M, N):\n m, n = M.shape\n m1, n1 = N.shape\n if (n != m1):\n return \"Can't multiply\"\n else:\n return np.matmul(M, N)\n return 0\n\n\n\"\"\"\nQR factorization\nWhere Qn..Q1 * A = R\nAnd A = Q1'...Qn' * R\nAnd A = Q * R\n\nI didnt create a matrix R, instead I used H * A, changing A\n\"\"\"\ndef qr(A):\n m, n = A.shape\n Q = np.eye(m)\n for i in range(n - (m == n)):\n H = np.eye(m)\n H[i:, i:] = householder(A[i:, i])\n Q = np.dot(Q, H)\n A = np.dot(H, A)\n return Q, A\n\n\n\"\"\"\nImplementation of Householder reflection\na - column vector\nv - unit vector obtained by a\nreturn h, a householder reflection of the vector h,\n that preserves length\n\"\"\"\ndef householder(a):\n v = a / (a[0] + np.copysign(np.linalg.norm(a), a[0]))\n v[0] = 1\n h = np.eye(a.shape[0])\n h -= (2 / np.dot(v[None, :], v[:, None])) * np.dot(v[:, None], v[None, :])\n return h\n\n\"\"\"\nMain method\n\"\"\"\ndef main():\n A = np.array([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])\n Q, R = qr(A)\n print('q:\\n', Q.round(6))\n print('r:\\n', R.round(6))\n print('-----\\n')\n qr_scipy_example()\n\n\nmain()\n","sub_path":"dec_fac/qrfac.py","file_name":"qrfac.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"457410448","text":"import argparse\nfrom datetime import datetime, timedelta\nimport hashlib\nimport logging\nimport os\nimport sys\n\nimport apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\n\nfrom utils.filesource import CsvFileSource\nfrom utils.filesink import CsvFileSink\n\n\nCONST_LOADDTM_FIELD = 'dv__load_dtm'\nCONST_CKSUM_FIELD = '__row_cksum'\nCONST_SOURCE_FIELD = 'dv__rec_source'\nCONST_BK_FIELD = 'dv__bk'\nLINK_KEY = 'dv__link_key'\nCONST_STATUS_FIELD = 'dv__status'\n\n\ndef print_line(record):\n print(record)\n\n\ndef print_index(record):\n # index = record[1]['index']\n # data = record[1]['data']\n print(record)\n # print(record[0], len(index), len(data))\n\n\n# Helper: read a tab-separated key-value mapping from a text file,\n# escape all quotes/backslashes, and convert it a PCollection of\n# (key, record) pairs.\ndef read_file(p, label, file_pattern, pk=None):\n data = p | 'Read: %s' % label >> beam.io.Read(CsvFileSource(file_pattern,\n add_source=False,\n dictionary_output=True))\n if pk:\n data = data | 'Key: %s' % label >> beam.Map(lambda x: (x[pk], x))\n return data\n\n\ndef get_business_key(record, bkey_list):\n s = ''\n first = True\n for key in bkey_list:\n if not first:\n s += '|'\n val = record.get(key, '')\n s += str(val).strip().upper()\n first = False\n return s\n\n\ndef calc_cksum(record):\n m = hashlib.md5()\n c = {k:v for k, v in record.items() if k != CONST_LOADDTM_FIELD and k != CONST_STATUS_FIELD}\n m.update(repr(sorted(c.items())))\n return m.hexdigest().upper()\n\n\ndef add_hub_dv_details(record, bkey_list, source):\n rec = record[1]\n rec[CONST_CKSUM_FIELD] = calc_cksum(rec)\n rec[CONST_SOURCE_FIELD] = source\n bk = get_business_key(rec, bkey_list)\n m = hashlib.md5()\n m.update(bk)\n rec[CONST_BK_FIELD] = m.hexdigest().upper()\n return (record[0], rec)\n\n\ndef add_link_dv_details(record, pk_keys, source):\n pk = [record.get(key[1], '') for key in pk_keys]\n pk = '|'.join(pk)\n record[CONST_CKSUM_FIELD] = calc_cksum(record)\n record[LINK_KEY] = pk\n record[CONST_SOURCE_FIELD] = source\n return (pk, record)\n\n\ndef hub_select_index_or_data(record, pk):\n index = record[1]['index']\n data = record[1]['data']\n if len(data) == 1:\n data_rec = data[0]\n return {CONST_BK_FIELD: data_rec[CONST_BK_FIELD],\n CONST_CKSUM_FIELD: data_rec[CONST_CKSUM_FIELD],\n pk: data_rec[pk]}\n\n if len(index) == 1 and len(data) == 0:\n index_rec = index[0]\n return {CONST_BK_FIELD: index_rec[CONST_BK_FIELD],\n CONST_CKSUM_FIELD: index_rec[CONST_CKSUM_FIELD],\n pk: index_rec[pk]}\n raise Exception(\"No valid record found\")\n\n\ndef link_select_index_or_data(record, pk):\n index = record[1]['index']\n data = record[1]['data']\n if len(data) == 1:\n data_rec = data[0]\n return {CONST_CKSUM_FIELD: data_rec[CONST_CKSUM_FIELD],\n pk: data_rec[pk]}\n\n if len(index) == 1 and len(data) == 0:\n index_rec = index[0]\n return {CONST_CKSUM_FIELD: index_rec[CONST_CKSUM_FIELD],\n pk: index_rec[pk]}\n\n raise Exception(\"No valid record found\")\n\n\ndef filter_data_rows(record):\n index = record[1]['index']\n data = record[1]['data']\n if len(index) > 1 or len(data) > 1:\n raise Exception(\"Primary key is not unique\")\n\n if len(data) == 1:\n # always pick up new rows\n return True\n\n # all other cases, filter out the row\n return False\n\ndef extract_data(record):\n index = record[1]['index']\n data = record[1]['data']\n if len(index) > 1 or len(data) > 1:\n raise Exception(\"Primary key is not unique\")\n\n return data[0]\n\n\ndef apply_business_key(record, field_name):\n index = record[1]['index']\n data = record[1]['data']\n for rec in data:\n if len(index) > 0:\n bk = index[0][CONST_BK_FIELD]\n rec[field_name] = bk\n else:\n rec[field_name] = None\n return data\n\n\nclass DvdRentalsPipeline(object):\n def __init__(self, source, *args, **kwargs):\n self.source = source\n\n def parse(self, argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--root',\n required=True,\n help=('Where root of processing area is'))\n parser.add_argument('--execution_dtm',\n required=True,\n help=('Day for which to execute the data flow'))\n known_args, self.pipeline_args = parser.parse_known_args(argv)\n print(known_args)\n\n parsed_dtm = datetime.strptime(known_args.execution_dtm, '%Y-%m-%dT%H:%M:%S')\n self.parsed_dtm = parsed_dtm\n self.year = str(parsed_dtm.year)\n self.month = '{0:02d}'.format(parsed_dtm.month)\n self.day = '{0:02d}'.format(parsed_dtm.day)\n self.yesterday_dtm = parsed_dtm - timedelta(days=1)\n self.psa = os.path.join(known_args.root, 'psa', self.source)\n self.index = os.path.join(known_args.root, 'index', self.source)\n self.loading = os.path.join(known_args.root, 'full-load', self.source)\n\n def get_psa(self, loc):\n return os.path.join(self.psa, loc)\n\n def get_psa_location(self, loc):\n return os.path.join(self.psa, loc, '*', '*', '*', loc)\n\n def get_loading_location(self, loc):\n return os.path.join(self.loading, loc, self.year, self.month, self.day, loc)\n\n def get_source_index(self, loc):\n return os.path.join(self.index, self.yesterday_dtm.strftime('%Y-%m-%d_') + loc)\n\n def get_target_index(self, loc):\n return os.path.join(self.index, self.parsed_dtm.strftime('%Y-%m-%d_') + loc)\n\n def resolve_foreign_keys(self, hub_name, pk, data, foreign_keys, pipeline):\n data = data | 'Unkey_{0}'.format(hub_name) >> \\\n beam.Map(lambda x: x[1])\n\n # Resolve foreign keys first\n for fk in foreign_keys:\n fk_table = fk[0]\n fk_key = fk[1]\n\n fk_index = None\n try:\n # Also set up a stream for the index\n fk_index = read_file(\n pipeline,\n '{0}index'.format(fk_table),\n self.get_target_index('hub_{0}*'.format(fk_table)),\n fk_key)\n except IOError:\n logging.info(\"Could not open index, incorrect load order\")\n raise\n\n data = data | 'Rekey_{0}_{1}'.format(hub_name, fk_table) >> \\\n beam.Map(lambda x: (x[fk_key], x))\n merge = ({'data': data, 'index': fk_index}) | \\\n 'resolve_{0}_{1}'.format(hub_name, fk_table) >> \\\n beam.CoGroupByKey()\n # merge | 'print_{0}'.format(fk_table) >> beam.Map(print_index)\n data = merge | 'convert_{0}_{1}'.format(hub_name, fk_table) >> \\\n beam.FlatMap(apply_business_key, '{0}_bk'.format(fk_table))\n data = data | 'Rekey_{0}'.format(hub_name) >> \\\n beam.Map(lambda x: (x[pk], x))\n\n return data\n\n def run(self):\n self.pipeline_options = PipelineOptions(self.pipeline_args)\n self.pipeline_options.view_as(SetupOptions).save_main_session = True\n\n # We consider the city a reference table and the design decision here\n # is that the address table will contain city+country into the satellite,\n # ditching the over normalization. This is much easier to do in hive later on\n # by joining on the id's in staging.\n self.process_hub(\n hub_name='address',\n pk='address_id',\n bkey_list=['postal_code', 'address'],\n field_list=['address', 'address2', 'district', 'city_id',\n 'postal_code', 'phone', 'last_update'])\n self.process_hub(\n hub_name='customer',\n pk='customer_id',\n bkey_list=['email'],\n field_list=['first_name', 'last_name', 'email', 'activebool',\n 'create_date', 'last_update', 'active', 'address_bk'],\n foreign_keys=[('address', 'address_id')])\n\n # Store/staff have bi-directional references, so we have to resolve the manager\n # link later on.\n self.process_hub(\n hub_name='store',\n pk='store_id',\n bkey_list=['store_id'],\n field_list=['last_update', 'manager_staff_id', 'address_bk'],\n foreign_keys=[('address', 'address_id')])\n self.process_hub(\n hub_name='staff',\n pk='staff_id',\n bkey_list=['first_name', 'last_name'],\n field_list=['staff_id', 'first_name' ,'last_name', 'address_bk', 'email', 'store_bk', 'active',\n 'username', 'password', 'last_update'],\n foreign_keys=[('address', 'address_id'), ('store', 'store_id')])\n self.process_hub(\n hub_name='city',\n pk='city_id',\n bkey_list=['city'],\n field_list=['city_id', 'city', 'country_id', 'last_update'])\n self.process_hub(\n hub_name='country',\n pk='country_id',\n bkey_list=['country_id'],\n field_list=['country_id', 'country', 'last_update'])\n\n self.process_hub(\n hub_name='actor',\n pk='actor_id',\n bkey_list=['first_name', 'last_name'],\n field_list=['first_name', 'last_name', 'last_update'])\n\n self.process_hub(\n hub_name='language',\n pk='language_id',\n bkey_list=['name'],\n field_list=['name', 'last_update'])\n self.process_hub(\n hub_name='category',\n pk='category_id',\n bkey_list=['name'],\n field_list=['name', 'last_update'])\n self.process_hub(\n hub_name='film',\n pk='film_id',\n bkey_list=['title', 'release_year'],\n field_list=['title', 'description', 'release_year', 'rental_duration', 'rental_rate',\n 'length', 'replacement_cost', 'rating', 'last_update', 'special_features', 'fulltext', 'language_bk'],\n foreign_keys=[('language', 'language_id')])\n\n # We process inventory as if it were a hub table, because we need the inventory_bk\n self.process_hub(\n hub_name='inventory',\n pk='inventory_id',\n bkey_list=['inventory_id'],\n field_list=['film_id', 'store_id', 'last_update'],\n foreign_keys=[('film', 'film_id'), ('store', 'store_id')])\n\n # Rental could be a hub, could be a link.\n # We process it as a hub for that reason, because it can be converted\n # to any type later on (or produce multiple links)\n self.process_hub(\n hub_name='rental',\n pk='rental_id',\n bkey_list=['rental_id'],\n field_list=['rental_date', 'return_date', 'last_update', 'inventory_bk', 'customer_bk'],\n foreign_keys=[('inventory', 'inventory_id'), ('customer', 'customer_id')])\n self.process_hub(\n hub_name='payment',\n pk='payment_id',\n bkey_list=['payment_id'],\n field_list=['payment_date', 'amount', 'customer_bk', 'staff_bk', 'rental_bk'],\n foreign_keys=[('customer', 'customer_id'), ('staff', 'staff_id'), ('rental', 'rental_id')])\n\n # Links follow a different processing:\n self.process_link(\n link_name='film_category',\n foreign_keys=[('film', 'film_id'), ('category', 'category_id')],\n field_list=['film_bk', 'category_bk'],\n bkey_list=['film_bk', 'category_bk'])\n self.process_link(\n link_name='film_actor',\n foreign_keys=[('film', 'film_id'), ('actor', 'actor_id')],\n field_list=['film_bk', 'actor_bk'],\n bkey_list=['film_bk', 'actor_bk'])\n\n def process_hub(self,\n hub_name,\n pk,\n bkey_list,\n field_list,\n foreign_keys=None):\n ext_field_list = \\\n field_list + [CONST_BK_FIELD, CONST_SOURCE_FIELD, CONST_LOADDTM_FIELD, CONST_STATUS_FIELD]\n\n with beam.Pipeline(options=self.pipeline_options) as p:\n # First set up a stream for the data\n data = read_file(\n p,\n hub_name,\n self.get_psa_location('public.{0}'.format(hub_name)) + '*',\n pk)\n\n index = None\n try:\n # Also set up a stream for the index\n index = read_file(\n p,\n '{0}index'.format(hub_name),\n self.get_source_index('hub_{0}*'.format(hub_name)),\n pk)\n except IOError:\n logging.info(\"Could not open index, maybe doesn't exist\")\n # create an empty pcollection, so we can at least run\n index = p | beam.Create([])\n\n # Generate business keys, checksum, dv_source, load_dtm\n preproc_data = data | 'preprocess_' + hub_name >> \\\n beam.Map(add_hub_dv_details, bkey_list, self.source)\n\n if foreign_keys:\n data = self.resolve_foreign_keys(\n hub_name=hub_name,\n pk=pk,\n data=data,\n foreign_keys=foreign_keys,\n pipeline=p)\n\n # Group with index to be able to identify new, updated, deleted\n merge = ({'data': preproc_data, 'index': index}) | 'grouped_by_' + pk >> beam.CoGroupByKey()\n\n # Extract the data out of the records (still has index/data dict in there)\n extract = merge \\\n | 'filter_' + hub_name >> beam.Filter(filter_data_rows) \\\n | 'extract_' + hub_name >> beam.Map(extract_data)\n\n # Write them out to disk in loading area\n extract | 'Write_' + hub_name >> beam.io.Write(\n CsvFileSink(\n self.get_loading_location('public.{0}'.format(hub_name)),\n header=ext_field_list))\n\n # Update the index\n updated_index = merge | 'updated_index_' + hub_name >> beam.Map(hub_select_index_or_data, pk)\n updated_index | 'Write_index_' + hub_name >> beam.io.Write(\n CsvFileSink(\n self.get_target_index('hub_{0}'.format(hub_name)),\n header=[CONST_BK_FIELD, CONST_CKSUM_FIELD, pk]))\n\n def process_link(self,\n link_name,\n bkey_list,\n field_list,\n foreign_keys):\n ext_field_list = \\\n [CONST_BK_FIELD, CONST_SOURCE_FIELD, CONST_LOADDTM_FIELD, CONST_STATUS_FIELD] + \\\n field_list\n\n keys = [t[1] for t in foreign_keys]\n generated_pk_name = '|'.join(keys)\n\n with beam.Pipeline(options=self.pipeline_options) as p:\n data = read_file(\n p,\n link_name,\n self.get_psa_location('public.{0}'.format(link_name)) + '*')\n\n index = None\n try:\n # Also set up a stream for the index\n index = read_file(\n p,\n '{0}index'.format(link_name),\n self.get_source_index('link_{0}*'.format(link_name)),\n LINK_KEY)\n except IOError:\n logging.info(\"Could not open index, maybe doesn't exist\")\n # create an empty pcollection, so we can at least run\n index = p | beam.Create([])\n\n preproc_data = data | 'preprocess_' + link_name >> \\\n beam.Map(add_link_dv_details, foreign_keys, self.source)\n\n preproc_data = self.resolve_foreign_keys(\n hub_name=link_name,\n pk=LINK_KEY,\n data=preproc_data,\n foreign_keys=foreign_keys,\n pipeline=p)\n\n # Group with index to be able to identify new, updated, deleted\n merge = ({'data': preproc_data, 'index': index}) | 'grouped_by_' + generated_pk_name >> beam.CoGroupByKey()\n\n # Extract the data out of the records (still has index/data dict in there)\n extract = merge \\\n | 'filter_' + link_name >> beam.Filter(filter_data_rows) \\\n | 'extract_' + link_name >> beam.Map(extract_data)\n\n # Write them out to disk in staging\n extract | 'Write_' + link_name >> beam.io.Write(\n CsvFileSink(\n self.get_loading_location('public.{0}'.format(link_name)),\n header=ext_field_list))\n\n # Update the index\n updated_index = merge | 'updated_index_' + link_name >> beam.Map(link_select_index_or_data, LINK_KEY)\n updated_index | 'Write_index_' + link_name >> beam.io.Write(\n CsvFileSink(\n self.get_target_index('link_{0}'.format(link_name)),\n header=[CONST_CKSUM_FIELD, LINK_KEY]))\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n p = DvdRentalsPipeline('dvdrentals')\n p.parse(sys.argv)\n p.run()\n","sub_path":"examples/datavault2-bigdata-example/dataflow/full_dv_rebuild.py","file_name":"full_dv_rebuild.py","file_ext":"py","file_size_in_byte":17481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"161602408","text":"# Import modules\r\nimport math\r\nimport random\r\nfrom utils import *\r\nfrom player import Player, DeadPlayer, player_max_rtspd, player_size\r\nfrom saucer import Saucer, Bullet\r\nfrom asteroids import Asteroid\r\n\r\ndef gameLoop(startingState):\r\n # Init variables\r\n gameState = startingState\r\n player_state = \"Alive\"\r\n player_blink = 0\r\n player_pieces = []\r\n player_dying_delay = 0\r\n player_invi_dur = 0\r\n hyperspace = 0\r\n next_level_delay = 0\r\n bullet_capacity = 4\r\n bullets = []\r\n asteroids = []\r\n stage = 3\r\n score = 0\r\n live = 2\r\n oneUp_multiplier = 1\r\n playOneUpSFX = 0\r\n intensity = 0\r\n player = Player(display_width / 2, display_height / 2)\r\n saucer = Saucer()\r\n\r\n # Main loop\r\n while gameState != \"Exit\":\r\n # Game menu\r\n while gameState == \"Menu\":\r\n gameDisplay.fill(black)\r\n drawText(\"ASTEROIDS\", white, display_width / 2, display_height / 2, 100)\r\n drawText(\"Press any key to START\", white, display_width / 2, display_height / 2 + 100, 50)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameState = \"Exit\"\r\n if event.type == pygame.KEYDOWN:\r\n gameState = \"Playing\"\r\n pygame.display.update()\r\n timer.tick(5)\r\n\r\n # User inputs\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameState = \"Exit\"\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n player.thrust = True\r\n if event.key == pygame.K_LEFT:\r\n player.rtspd = -player_max_rtspd\r\n if event.key == pygame.K_RIGHT:\r\n player.rtspd = player_max_rtspd\r\n if event.key == pygame.K_SPACE and player_dying_delay == 0 and len(bullets) < bullet_capacity:\r\n bullets.append(Bullet(player.x, player.y, player.dir))\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_fire)\r\n if gameState == \"Game Over\":\r\n if event.key == pygame.K_r:\r\n gameState = \"Exit\"\r\n gameLoop(\"Playing\")\r\n if event.key == pygame.K_LSHIFT:\r\n hyperspace = 30\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_UP:\r\n player.thrust = False\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n player.rtspd = 0\r\n\r\n # Update player\r\n player.updatePlayer()\r\n\r\n # Checking player invincible time\r\n if player_invi_dur != 0:\r\n player_invi_dur -= 1\r\n elif hyperspace == 0:\r\n player_state = \"Alive\"\r\n\r\n # Reset display\r\n gameDisplay.fill(black)\r\n\r\n # Hyperspace\r\n if hyperspace != 0:\r\n player_state = \"Died\"\r\n hyperspace -= 1\r\n if hyperspace == 1:\r\n player.x = random.randrange(0, display_width)\r\n player.y = random.randrange(0, display_height)\r\n\r\n # Check for collision w/ asteroid\r\n for a in asteroids:\r\n a.updateAsteroid()\r\n if player_state != \"Died\":\r\n if isColliding(player.x, player.y, a.x, a.y, a.size):\r\n # Create ship fragments\r\n player_pieces.append(DeadPlayer(player.x, player.y, 5 * player_size / (2 * math.cos(math.atan(1 / 3)))))\r\n player_pieces.append(DeadPlayer(player.x, player.y, 5 * player_size / (2 * math.cos(math.atan(1 / 3)))))\r\n player_pieces.append(DeadPlayer(player.x, player.y, player_size))\r\n\r\n # Kill player\r\n player_state = \"Died\"\r\n player_dying_delay = 30\r\n player_invi_dur = 120\r\n player.killPlayer()\r\n\r\n if live != 0:\r\n live -= 1\r\n else:\r\n gameState = \"Game Over\"\r\n\r\n # Split asteroid\r\n if a.t == \"Large\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n score += 20\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n elif a.t == \"Normal\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n score += 50\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangM)\r\n else:\r\n score += 100\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangS)\r\n asteroids.remove(a)\r\n\r\n # Update ship fragments\r\n for f in player_pieces:\r\n f.updateDeadPlayer()\r\n if f.x > display_width or f.x < 0 or f.y > display_height or f.y < 0:\r\n player_pieces.remove(f)\r\n\r\n # Check for end of stage\r\n if len(asteroids) == 0 and saucer.state == \"Dead\":\r\n if next_level_delay < 30:\r\n next_level_delay += 1\r\n else:\r\n stage += 1\r\n intensity = 0\r\n # Spawn asteroid away of center\r\n for i in range(stage):\r\n xTo = display_width / 2\r\n yTo = display_height / 2\r\n while xTo - display_width / 2 < display_width / 4 and yTo - display_height / 2 < display_height / 4:\r\n xTo = random.randrange(0, display_width)\r\n yTo = random.randrange(0, display_height)\r\n asteroids.append(Asteroid(xTo, yTo, \"Large\"))\r\n next_level_delay = 0\r\n\r\n # Update intensity\r\n if intensity < stage * 450:\r\n intensity += 1\r\n\r\n # Saucer\r\n if saucer.state == \"Dead\":\r\n if random.randint(0, 6000) <= (intensity * 2) / (stage * 9) and next_level_delay == 0:\r\n saucer.createSaucer()\r\n # Only small saucers >40000\r\n if score >= 40000:\r\n saucer.type = \"Small\"\r\n else:\r\n # Set saucer targer dir\r\n acc = small_saucer_accuracy * 4 / stage\r\n saucer.bdir = math.degrees(math.atan2(-saucer.y + player.y, -saucer.x + player.x) + math.radians(random.uniform(acc, -acc)))\r\n\r\n saucer.updateSaucer()\r\n saucer.drawSaucer()\r\n\r\n # Check for collision w/ asteroid\r\n for a in asteroids:\r\n if isColliding(saucer.x, saucer.y, a.x, a.y, a.size + saucer.size):\r\n # Set saucer state\r\n saucer.state = \"Dead\"\r\n\r\n # Split asteroid\r\n if a.t == \"Large\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n elif a.t == \"Normal\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangM)\r\n else:\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangS)\r\n asteroids.remove(a)\r\n\r\n # Check for collision w/ bullet\r\n for b in bullets:\r\n if isColliding(b.x, b.y, saucer.x, saucer.y, saucer.size):\r\n # Add points\r\n if saucer.type == \"Large\":\r\n score += 200\r\n else:\r\n score += 1000\r\n\r\n # Set saucer state\r\n saucer.state = \"Dead\"\r\n\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n\r\n # Remove bullet\r\n bullets.remove(b)\r\n\r\n # Check collision w/ player\r\n if isColliding(saucer.x, saucer.y, player.x, player.y, saucer.size):\r\n if player_state != \"Died\":\r\n # Create ship fragments\r\n player_pieces.append(DeadPlayer(player.x, player.y, 5 * player_size / (2 * math.cos(math.atan(1 / 3)))))\r\n player_pieces.append(DeadPlayer(player.x, player.y, 5 * player_size / (2 * math.cos(math.atan(1 / 3)))))\r\n player_pieces.append(DeadPlayer(player.x, player.y, player_size))\r\n\r\n # Kill player\r\n player_state = \"Died\"\r\n player_dying_delay = 30\r\n player_invi_dur = 120\r\n player.killPlayer()\r\n\r\n if live != 0:\r\n live -= 1\r\n else:\r\n gameState = \"Game Over\"\r\n\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n\r\n # Saucer's bullets\r\n for b in saucer.bullets:\r\n # Update bullets\r\n b.updateBullet()\r\n\r\n # Check for collision w/ asteroids\r\n for a in asteroids:\r\n if isColliding(b.x, b.y, a.x, a.y, a.size):\r\n # Split asteroid\r\n if a.t == \"Large\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n elif a.t == \"Normal\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n else:\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n\r\n # Remove asteroid and bullet\r\n asteroids.remove(a)\r\n saucer.bullets.remove(b)\r\n\r\n break\r\n\r\n # Check for collision w/ player\r\n if isColliding(player.x, player.y, b.x, b.y, 5):\r\n if player_state != \"Died\":\r\n # Create ship fragments\r\n player_pieces.append(DeadPlayer(player.x, player.y, 5 * player_size / (2 * math.cos(math.atan(1 / 3)))))\r\n player_pieces.append(DeadPlayer(player.x, player.y, 5 * player_size / (2 * math.cos(math.atan(1 / 3)))))\r\n player_pieces.append(DeadPlayer(player.x, player.y, player_size))\r\n\r\n # Kill player\r\n player_state = \"Died\"\r\n player_dying_delay = 30\r\n player_invi_dur = 120\r\n player.killPlayer()\r\n\r\n if live != 0:\r\n live -= 1\r\n else:\r\n gameState = \"Game Over\"\r\n\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n\r\n # Remove bullet\r\n saucer.bullets.remove(b)\r\n\r\n if b.life <= 0:\r\n try:\r\n saucer.bullets.remove(b)\r\n except ValueError:\r\n continue\r\n\r\n # Bullets\r\n for b in bullets:\r\n # Update bullets\r\n b.updateBullet()\r\n\r\n # Check for bullets collide w/ asteroid\r\n for a in asteroids:\r\n if b.x > a.x - a.size and b.x < a.x + a.size and b.y > a.y - a.size and b.y < a.y + a.size:\r\n # Split asteroid\r\n if a.t == \"Large\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Normal\"))\r\n score += 20\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangL)\r\n elif a.t == \"Normal\":\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n asteroids.append(Asteroid(a.x, a.y, \"Small\"))\r\n score += 50\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangM)\r\n else:\r\n score += 100\r\n # Play SFX\r\n pygame.mixer.Sound.play(snd_bangS)\r\n asteroids.remove(a)\r\n bullets.remove(b)\r\n\r\n break\r\n\r\n # Destroying bullets\r\n if b.life <= 0:\r\n try:\r\n bullets.remove(b)\r\n except ValueError:\r\n continue\r\n\r\n # Extra live\r\n if score > oneUp_multiplier * 10000:\r\n oneUp_multiplier += 1\r\n live += 1\r\n playOneUpSFX = 60\r\n # Play sfx\r\n if playOneUpSFX > 0:\r\n playOneUpSFX -= 1\r\n pygame.mixer.Sound.play(snd_extra, 60)\r\n\r\n # Draw player\r\n if gameState != \"Game Over\":\r\n if player_state == \"Died\":\r\n if hyperspace == 0:\r\n if player_dying_delay == 0:\r\n if player_blink < 5:\r\n if player_blink == 0:\r\n player_blink = 10\r\n else:\r\n player.drawPlayer()\r\n player_blink -= 1\r\n else:\r\n player_dying_delay -= 1\r\n else:\r\n player.drawPlayer()\r\n else:\r\n drawText(\"Game Over\", white, display_width / 2, display_height / 2, 100)\r\n drawText(\"Press \\\"R\\\" to restart!\", white, display_width / 2, display_height / 2 + 100, 50)\r\n live = -1\r\n\r\n # Draw score\r\n drawText(str(score), white, 60, 20, 40, False)\r\n\r\n # Draw Lives\r\n for l in range(live + 1):\r\n Player(75 + l * 25, 75).drawPlayer()\r\n\r\n # Update screen\r\n pygame.display.update()\r\n\r\n # Tick fps\r\n timer.tick(30)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Start game\r\n gameLoop(\"Menu\")\r\n\r\n # End game\r\n pygame.quit()\r\n quit()\r\n","sub_path":"astermax/astermax.py","file_name":"astermax.py","file_ext":"py","file_size_in_byte":14956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"518256181","text":"# -*- coding: utf-8 -*-\nimport argparse\n\nfrom Only_Python.Bowling.bowling import bowling_main\n\n\ndef main():\n \"\"\"\n Из текущего файла сделана консольная утилита для определения количества очков,\n с помощью пакета argparse.\n Скрипт принимает параметр --result и печататает на консоль\n Количество очков для результатов ХХХ - УУУ.\n \"\"\"\n parser = argparse.ArgumentParser(description='bowling')\n parser.add_argument('--result', type=str)\n args = parser.parse_args()\n input_data = args.result\n bowling_main.process_game(input_data=input_data, total_frames=10)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Only_Python/Bowling/01_score.py","file_name":"01_score.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"320396076","text":"#Q. -(d^2u/dx^2 + d^2u/dy^2) + 0.1u = 1 , R = 0 ' )\r\n ax.set_zlabel( ' <--- Values of f(x,y) ' )\r\n ax.view_init(60,35)\r\n fig\r\n '''\r\n # 2nd plot type\r\n ax.plot_wireframe(X, Y, Z, color='green')\r\n ax.set_xlabel( ' Values of x ---> ' )\r\n ax.set_ylabel( ' Values of y ---> ' )\r\n ax.set_zlabel( ' Values of f(x,y) ---> ' )\r\n plt.show()\r\n ax = plt.axes(projection='3d')\r\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='winter', edgecolor='none')\r\n\r\n\r\n\r\ndef GaussSeidel(XY,dx,dy,N,M):\r\n \r\n # w -> relaxation parameter\r\n # if w = 1 -> gauss seidel\r\n w = 1.5\r\n \r\n h = dx #(h = dx = dy)\r\n \r\n XY_ = []\r\n # BC at x=x0\r\n x = [0]*(M+1)\r\n XY_.append(x)\r\n # set internal values\r\n for i in range(1,N):\r\n y = []\r\n # BC at y=y0\r\n y.append(0)\r\n for j in range(1,M):\r\n a = (1 + XY[i+1][j]/(h*h) + XY_[i-1][j]/(h*h) + y[j-1]/(h*h) + XY[i][j+1]/(h*h) )/(4/(h*h)+0.1)\r\n #successive over relaxation\r\n b = XY[i][j] + w*(a - XY[i][j])\r\n y.append(b)\r\n # BC at y=yn\r\n if(len(y)>1):\r\n y.append(y[-2])\r\n else:\r\n y.append(y[-1])\r\n \r\n XY_.append(y)\r\n # BC at x=xn\r\n x = []\r\n for j in range(M+1):\r\n if(N-2>=0):\r\n x.append(XY_[N-2][j])\r\n else:\r\n x.append(XY_[N-1][j])\r\n XY_.append(x)\r\n \r\n # find maximum error\r\n error = 0.0000\r\n for i in range(N+1):\r\n for j in range(M+1):\r\n if(error < abs(XY_[i][j] - XY[i][j])):\r\n error = abs(XY_[i][j] - XY[i][j])\r\n \r\n # update the values\r\n for i in range(N+1):\r\n for j in range(M+1):\r\n XY[i][j] = XY_[i][j] \r\n \r\n return error\r\n \r\n \r\n\r\n\r\n# dx = grid size in x axis\r\ndx = 1/4\r\n\r\n# dy = grid size in x axis\r\ndy = 1/4\r\n\r\n# st -> start point, en -> end point for x and y axis\r\nstx = 0\r\nenx = 1\r\nsty = 0\r\neny = 1\r\n\r\n# N = number of x-axis points\r\nN = (int)((enx - stx)/dx)\r\n\r\n# M = number of y-axis points\r\nM = (int)((eny - sty)/dy)\r\n\r\n# BC is du/dn = 0 => u(i-1,j)=u(i+1,j) and u(i,j-1)=u(i,j+1) on x=1,y=1\r\n\r\n# set the initial matrix\r\nXY = []\r\n#set the initial values according to initial guess\r\n# guess -> u = 10\r\n\r\n# BC at x=x0\r\nx = [0]*(M+1)\r\nXY.append(x)\r\n# set internal values acc to the guess\r\nfor i in range(1,N):\r\n y = []\r\n # BC at y=y0\r\n y.append(0)\r\n for j in range(1,M):\r\n y.append(10)\r\n # BC at y=yn\r\n if(len(y)>1):\r\n y.append(y[-2])\r\n else:\r\n y.append(y[-1])\r\n\r\n XY.append(y)\r\n# BC at x=xn\r\nx = []\r\nfor j in range(M+1):\r\n if(N-2>=0):\r\n x.append(XY[N-2][j])\r\n else:\r\n x.append(XY[N-1][j])\r\nXY.append(x)\r\n\r\n\r\nprint(\"Initial Guess:\")\r\nshowResult(XY,stx,sty,dx,dy,N,M)\r\n\r\n# set the error limit\r\nepsilon = 0.000001\r\n\r\n# set err variable to keep track of convergence\r\nerr = 10000000000\r\n\r\niteration_num = 0\r\nlimit = 200\r\n# limit sets the number of iterations\r\n\r\nwhile(iteration_num < limit and err > epsilon):\r\n iteration_num+=1\r\n print(\"Iteration = \",iteration_num)\r\n err = GaussSeidel(XY,dx,dy,N,M)\r\n showResult(XY,stx,sty,dx,dy,N,M)\r\n\r\nif(iteration_num==limit):\r\n print(\"Doesn't converge\")\r\n","sub_path":"GaussSeidel_.py","file_name":"GaussSeidel_.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"354876058","text":"from flask import Flask, jsonify, request\nfrom flask_jwt_extended import (\n JWTManager, jwt_required, create_access_token,\n get_jwt_identity\n)\n\nfrom .models import MyDiary, Entries\nfrom mydiary import app, app_db, now_time\n\n\nmy_diary_object = MyDiary()\nmy_diary_object.user_entries = Entries()\n\n\n\"\"\" returns a single diary entry \"\"\"\n@app.route('/api/v1/entries/', methods=['GET'])\n@jwt_required\ndef get_entry(diary_entry_id):\n \"\"\" outputs one user entry specified by the id in the url \"\"\"\n user_id = get_jwt_identity()\n get_entry = my_diary_object.user_entries.getOneEntry(\n user_id,\n diary_entry_id\n )\n if get_entry == 'The specified entry cannot be found':\n return jsonify({'error': get_entry}), 400\n return jsonify({'getEntry': get_entry}), 200\n\n\"\"\" returns all diary entries \"\"\"\n@app.route('/api/v1/entries', methods=['GET'])\n@jwt_required\ndef get_all_entries():\n \"\"\" outputs all entries for the logged in user \"\"\"\n user_id_data = get_jwt_identity()\n get_entries = my_diary_object.user_entries.getAllEntries(now_time, user_id_data)\n return jsonify({\"entries\": get_entries[0], \"writtenToday\": get_entries[1], \"msg\": \"Authorized\"}), 200\n\n\"\"\" this route adds single diary entry \"\"\"\n@app.route('/api/v1/entries', methods=['POST'])\n@jwt_required\ndef post_entry():\n \"\"\" this method creates a new entry \"\"\"\n if not request.json:\n return jsonify({\"input error\": \"please input json data\"}), 400\n if 'entrydata' not in request.json:\n return jsonify({\"message\": \"Please enter a title\"}), 400\n if 'entrytitle' not in request.json:\n return jsonify({\"message\": \"Cannot find diary title\"}), 400\n entry_data=request.json.get('entrydata', \"\")\n title_data=request.json.get('entrytitle', \"\")\n if entry_data == \"\":\n return jsonify({\"message\": \"Null entry field\"}), 404\n if title_data == \"\":\n title_data = \"...No Title...\"\n user_id_data = get_jwt_identity()\n add_entry = my_diary_object.user_entries.addEntry(\n user_id_data,\n title_data,\n entry_data,\n now_time\n )\n if add_entry == \"Entry added successfully\":\n new_entry = {\n 'user_id' : user_id_data,\n 'title' : title_data,\n 'entrydata' : entry_data,\n 'datecreated' : now_time\n }\n return jsonify({'message' : add_entry, 'entry added' : new_entry}), 201\n return jsonify({'message' : add_entry}), 409\n\n\"\"\" this route updates a single diary entry \"\"\"\n@app.route('/api/v1/entries/', \\\n methods=['PUT'])\n@jwt_required\ndef put_entry(diary_entry_id):\n \"\"\" this method updates an entry's data \"\"\"\n if not request.json:\n return jsonify({\"input error\": \"please input data in json format\"}), 400\n if 'entrydata' not in request.json:\n return jsonify({\"message\": \"Diary entry data not found\"}), 400\n if 'entrytitle' not in request.json:\n return jsonify({\"message\": \"Diary entry title title\"}), 400\n data = request.get_json()\n entry_data=data[\"entrydata\"]\n title_data=data[\"entrytitle\"]\n if entry_data == \"\":\n return jsonify({\"message\": \"Null entry field\"}), 404\n if title_data == \"\":\n title_data = \"...No Title...\"\n edit_time = now_time\n user_id_data = get_jwt_identity()\n entry_id_data = diary_entry_id\n edit_entry = my_diary_object.user_entries.modifyEntry(\n title_data,\n entry_data,\n edit_time,\n entry_id_data,\n user_id_data\n )\n if edit_entry == \"Entry edited\":\n entry = {\n 'entry_id': entry_id_data,\n 'user_id': user_id_data,\n 'title': title_data,\n 'entrydata': entry_data,\n 'datecreated': now_time\n }\n return jsonify({'entry':entry, \"message\": edit_entry}), 201\n return jsonify({'message': edit_entry}), 400\n\n\"\"\" this route deletes a diary entry \"\"\"\n@app.route('/api/v1/entries/', methods=['DELETE'])\n@jwt_required\ndef delete_entry(diary_entry_id):\n \"\"\" this method deletes an entry \"\"\"\n user_id_data = get_jwt_identity()\n entry_id_data = diary_entry_id\n delete_entry = my_diary_object.user_entries.deleteEntry(entry_id_data, user_id_data)\n return jsonify({'message': delete_entry})\n","sub_path":"mydiary/entries.py","file_name":"entries.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"389943245","text":"import unittest\nfrom wta_hikes.wta_scrapers.wta_scrapers.spiders.trip_report import TripReportScraper\nfrom wta_hikes.wta_scrapers.test.responses import fake_response_from_file\n\nclass IcodropsUpcomingSpiderTest(unittest.TestCase):\n\n def setUp(self):\n self.spider = TripReportScraper()\n self.maxDiff = None\n\n def test_parse(self):\n results = self.spider.parse(fake_response_from_file('data/trip_report_landing.html'))\n\n actual_urls = sorted([str(x.url) for x in results])\n print(actual_urls)\n #print(actual_urls)\n # self.assertEqual(actual_urls, expected_urls)\n\n def test_hike_scraper(self):\n expected = {'HikeName': 'Melakwa Lake',\n 'AuthorName': '',\n 'TrailConditions': 'Minor obstacles posing few problems',\n 'Road': 'Road suitable for all vehicles',\n 'Bugs': 'Bugs were not too bad',\n 'Snow': 'Snow free',\n 'ReportText': \"We started hiking about 9:15 on Tuesday and had no difficulty\"\n \" parking in the main lot. It was a warm humid day and where \"\n \"there was no tree coverage on the trail it was very hot. \"\n \"No bugs at the parking lot or along the trail. At the lake \"\n \"there were flies but no one saw mosquitoes. Most of our \"\n \"group swam in the main lake and two of us swam in the small \"\n \"lake which was much, much colder. The trail up to the lake\"\n \" past the falls is rocky so we didn't save much time on the\"\n \" descent. This is a beautiful area!\",\n 'DateHiked': 'Jul 31, 2018',\n 'Url': 'http://www.example.com'}\n self.do_parse_profile_test('data/trip_report.html', expected)\n\n def do_parse_profile_test(self, response_path, expected, debug=False):\n actual = self.spider.parse_profiles(fake_response_from_file(response_path))\n actual = dict(actual)\n if debug:\n print(actual)\n self.assertDictEqual(expected, dict(actual))\n\n # def do_parse_pages_test(self, response_path, debug=False):\n # actual = self.spider.parse_pages(fake_response_from_file(response_path))\n # if debug:\n # print(actual)\n # self.assertEqual(99, len(actual))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"wta_scrapers/test/test_report_scraper.py","file_name":"test_report_scraper.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"125891441","text":"def findParent(i,disjset:list):\n while disjset[i] != -1:\n i = disjset[i]\n return i\n\ndef union(i,j,disjset:list):\n disjset[findParent(i,disjset)] = j\n\ndef minimalSpanningTree(nodes,edges):\n edges = sorted(edges,key=lambda e:e[2])\n disjset = []\n totalWeight = 0\n count = 0\n for i in range(nodes):\n disjset.append(-1)\n for i in range(len(edges)):\n if findParent(edges[i][0]-1,disjset) != findParent(edges[i][1]-1,disjset):\n union(edges[i][0]-1,edges[i][1]-1,disjset)\n totalWeight += edges[i][2]\n count += 1\n #边数等于节点数-1\n if count == nodes-1:\n break\n print(totalWeightm,end=\"\")\n\nif __name__ == \"__main__\":\n n,m = map(int,input().split(\" \"))\n edge = []\n for i in range(m):\n newedge = list(map(int,input().split(\" \")))\n edge.append(newedge)\n minimalSpanningTree(n,edge)","sub_path":"Code/CodeRecords/2086/61053/289096.py","file_name":"289096.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"276910634","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nkernel_slope = np.array([1, 1, -1, 1, -1, 0, -1, 0, 0])\nkernel_slope = np.reshape(kernel_slope, (3, 3))\n\nkernels = [kernel_slope, np.flip(kernel_slope)]\n\n\ndef get_maximum(arr):\n return np.argmax(arr)\n\n\ndef refine_kernel_results(img):\n second_kernel = np.array([-1, -1, 1, -1, 1, -1, 1, -1, -1])\n second_kernel = np.reshape(second_kernel, (3, 3))\n\n tmp = cv2.filter2D(img, -1, second_kernel)\n _, output = cv2.threshold(tmp, 2.2, 3., cv2.THRESH_BINARY)\n return output / 3.\n\n\nclass SlopedLinesDetection:\n def __init__(self, image):\n self.image = image\n\n def get_kernel_results(self):\n all_filters_added = np.zeros(self.image.shape)\n for kernel in kernels:\n res = cv2.filter2D(self.image, -1, kernel)\n _, res = cv2.threshold(res, 2.5, 3., cv2.THRESH_BINARY)\n all_filters_added += res / 3.\n return all_filters_added\n\n def get_candidate_triple(self):\n kernel_image = self.get_kernel_results()\n kernel_image = refine_kernel_results(kernel_image)\n left_to_right_sum = np.reshape(cv2.reduce(kernel_image, 0, cv2.REDUCE_SUM), (-1,))\n up_down_sum = np.reshape(cv2.reduce(kernel_image, 1, cv2.REDUCE_SUM), (-1,))\n\n first_index = get_maximum(left_to_right_sum)\n left_to_right_sum[first_index-10:first_index+10] = np.zeros((20,))\n second_index = get_maximum(left_to_right_sum)\n third_index = get_maximum(up_down_sum)\n\n first_index /= self.image.shape[1]\n second_index /= self.image.shape[1]\n third_index /= self.image.shape[0]\n\n if first_index < second_index:\n return first_index, second_index, third_index\n else:\n return second_index, first_index, third_index\n\n","sub_path":"Pre-alpha/detect_sloped_lines.py","file_name":"detect_sloped_lines.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"199350196","text":"import boto3\n# import ffmpeg\nimport os\nimport json\nfrom botocore.client import Config\nimport subprocess\nimport shlex\nimport re\nimport time\n\nSOURCE_BUCKET = os.environ['MEDIA_CAPTURE_BUCKET']\nSOURCE_PREFIX = 'captures'\n\ndef handler(event, context):\n print(event)\n time.sleep(15)\n meetingBody = json.loads(event['body'])\n MEETING_ID = meetingBody['meetingId']\n prefix = SOURCE_PREFIX + '/' + MEETING_ID + '/audio'\n client = boto3.client('s3')\n\n response = client.list_objects_v2(\n Bucket=SOURCE_BUCKET,\n Delimiter='string',\n MaxKeys=1000,\n Prefix=prefix\n )\n objects = response.get('Contents', [])\n file_list=[]\n print(objects)\n for object in objects:\n path, filename = os.path.split(object['Key'])\n client.download_file(SOURCE_BUCKET, object['Key'], '/tmp/' + filename)\n file_list.append(filename)\n\n print(\"Concatenating audio files...\")\n audio_objs_keys = filter(lambda x : 'mp4' in x, file_list)\n print(file_list)\n print(audio_objs_keys)\n with open('/tmp/audio_list.txt', 'w') as f:\n for k in audio_objs_keys:\n basename = os.path.splitext(k)[0]\n print(basename)\n ffmpeg_cmd = \"ffmpeg -i /tmp/\" + k + \" -bsf:v h264_mp4toannexb -f mpegts -framerate 15 -c copy /tmp/\" + basename + \".ts -y\"\n command1 = shlex.split(ffmpeg_cmd)\n print (command1)\n p1 = subprocess.run(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(p1)\n f.write(f'file \\'/tmp/{basename}.ts\\'\\n')\n\n ffmpeg_cmd = \"ffmpeg -f concat -safe 0 -i /tmp/audio_list.txt -c copy /tmp/audio.mp4 -y\"\n command1 = shlex.split(ffmpeg_cmd)\n p1 = subprocess.run(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(str(p1))\n client.upload_file('/tmp/audio.mp4', SOURCE_BUCKET, \"captures/\" + MEETING_ID + \"/processed\" + '/processedAudio.mp4')\n processedAudioUrl = client.generate_presigned_url('get_object', Params={'Bucket': SOURCE_BUCKET, 'Key': \"captures/\" + MEETING_ID + \"/processed\" + '/processedAudio.mp4' })\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Content-Type\": \"application/json\",\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,POST' \n },\n \"body\": json.dumps({\n \"processedUrl\" : processedAudioUrl\n })\n }","sub_path":"src/processLambda/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"547959951","text":"\"\"\"\nTime: O(mn)\nSpace: O(1)\nLeet: Accept\nProblems: None\n\"\"\"\n\nclass Solution(object):\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n islands = 0\n\n def sink(grid, row, col):\n #simple dfs traversal to sink the whole island\n if grid[row][col] == '1':\n grid[row][col] = '0'\n\n if row>0:\n sink(grid,row-1,col)\n if col>0:\n sink(grid,row,col-1)\n if row 0:\n job = queue.popleft()\n if job.response == -1:\n job.response = clock\n job.wait += (clock - job.lastran)\n if job.runtime > quantum:\n job.runtime -= quantum\n print(fmt_str0 % (clock, job.id, quantum))\n queue.append(job)\n clock += quantum\n else:\n job.turnaround = clock + job.runtime\n print(fmt_str1 % (clock, job.id, job.runtime, job.turnaround))\n clock += job.runtime\n job.runtime = 0\n job.lastran = clock\n\n print('\\nFinal statistics:')\n for job in jobs:\n print(fmt_str2 % (job.id, job.response, job.turnaround, job.wait))\n aver_response = sum(job.response for job in jobs) / options.jobs\n aver_turnaround = sum(job.turnaround for job in jobs) / options.jobs\n aver_wait = sum(job.wait for job in jobs) / options.jobs\n print(fmt_str3 % (aver_response, aver_turnaround, aver_wait))\nelse:\n msg = ('Compute the turnaround time, response time, and wait time for '\n 'each job.\\nWhen you are done, run this program again, with the '\n 'same arguments,\\nbut with -c, which will thus provide you with '\n 'the answers. You can use\\n-s or your own job list '\n '(-l 10,15,20 for example)\\nto generate different problems for '\n 'yourself.\\n')\n print(msg)\n","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"600279199","text":"from django.conf.urls import url\nfrom registration.api import views\n\n\nurlpatterns = [\n url(r'^register/$', views.RegisterView.as_view()),\n url(r'^active/$', views.ActiveView.as_view()),\n url(r'^active/(?P[a-zA-Z0-9]{64})/$', views.ActiveView.as_view()),\n url(r'^account/(?Plogout)/$', views.AccountView.as_view()),\n url(r'^account/(?Plogin)/$', views.AccountView.as_view()),\n]\n","sub_path":"useless/python/headquaters/registration/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"89630682","text":"# -*- coding: utf-8 -*-\nimport os, sys\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../util'))\nimport loghelper, util, db, config, name_helper\n\n#logger\nloghelper.init_logger(\"patch_user_wxheadimgurl\", stream=True)\nlogger = loghelper.get_logger(\"patch_user_wxheadimgurl\")\n\nconn = None\n\n\ndef main():\n conn = db.connect_torndb()\n users = conn.query(\"select * from user where wxheadimgurl is null\")\n for user in users:\n uw = conn.get(\"select * from user_wechat where userId=%s and headimgurl is not null order by id desc limit 1\", user[\"id\"])\n if uw is not None:\n conn.update(\"update user set wxheadimgurl=%s where id=%s\", uw[\"headimgurl\"], user[\"id\"])\n conn.close()\n\n\ndef main2():\n conn = db.connect_torndb()\n users = conn.query(\"select * from user where username is not null and username=phone\")\n for user in users:\n uw = conn.get(\"select * from user_wechat where userId=%s and headimgurl is not null order by id desc limit 1\", user[\"id\"])\n if uw is not None:\n conn.update(\"update user set username=%s where id=%s\", uw[\"nickname\"], user[\"id\"])\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n main2()","sub_path":"data/patch/patch_user_wxheadimgurl.py","file_name":"patch_user_wxheadimgurl.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"188668528","text":"import sys\nimport sparql_query\nimport json\nimport glob\nimport os\nimport networkx as nx\n# import matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nimport matplotlib.pyplot as plt\n\n\n_TYPE_URI = ''\n\n\ndef get_type(uri, endpoint_url):\n try:\n result = sparql_query.query(\n 'select distinct ?type where {' + uri + ' ' + _TYPE_URI + ' ?type}', endpoint_url)\n except:\n return None\n else:\n json_result = json.loads(result)['results']['bindings']\n if json_result:\n return json_result\n return None\n\n\n# @TODO Escribir la funcion que hace una consulta para sacar los\n# tipos mencionados en el endpoint y devolverlos como un set.\ndef get_types(endpoint):\n json_results = get_type('[]', endpoint)\n types = []\n for result in json_results:\n types.append(result['type']['value'])\n return set(types)\n\n\n# @TODO Escribir la funcion que con una consulta retorne\n# la cantidad de veces que aparece el tipo t en el endpoint.\ndef count_instances(endpoint_url, t):\n try:\n result = sparql_query.query(\n 'select distinct count(?s) where { ?s ' + _TYPE_URI + ' <' + t + '>}', endpoint_url)\n except:\n return None\n else:\n json_result = json.loads(result)['results']['bindings']\n if json_result:\n return int(json_result[0]['callret-0']['value'])\n return None\n\n\ndef generate_type_relation_stats(dataset, count_repetitions):\n\n path = '/media/data/logs/'\n # path = '/Users/cbuil/Proyectos/logs/data/logs_examples/'\n current_path = path + 'datasets/' + dataset + '/logs/'\n type_relation_maps = {}\n file_count = 0\n\n f_write = open(os.getcwd() + '/type_relation_map_graph_' + dataset + '_repeats_' + str(count_repetitions) + '.nt', 'w')\n for day_folder in sorted(glob.glob(current_path + 'relation_maps/*')):\n for user_file in glob.glob(day_folder + '/*.json'):\n with open(user_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n json_line = json.loads(line)\n repetitions = json_line['repetitions']\n type_relation_map = json_line['type_relation_map']\n for elements in type_relation_map:\n if 'openlink' not in elements[0] and 'openlink' not in elements[1] and 'openlink' not in elements[2]:\n f_write.write(str(elements[0]) + ' ' + str(elements[1]) + ' ' + str(elements[2]) + ' . \\n')\n if '?' in elements[0]:\n elements[0] = '_:s' # + str(file_count)\n if '?' in elements[1]:\n elements[1] = '_:p' # + str(file_count)\n if '?' in elements[2]:\n elements[2] = '_:o' # + str(file_count)\n if count_repetitions:\n type_relation_maps[tuple(elements)] = type_relation_maps.get(tuple(elements), 0) + int(repetitions)\n else:\n type_relation_maps[tuple(elements)] = type_relation_maps.get(tuple(elements), 0) + 1\n file_count += 1\n f_write.close()\n\n graph = nx.DiGraph()\n f_write = open('Type_relation_maps_stats_' + dataset + '_repeats_' + str(count_repetitions) + '.txt', 'w')\n with open('type_relation_map_graph_' + dataset + '_repeats_' + str(count_repetitions) + '_distinct.nt', 'w') as f_write_1:\n for type_relation_map in sorted(type_relation_maps, key=lambda type_relation_map: type_relation_maps[type_relation_map], reverse=True):\n if type_relation_maps[type_relation_map] > 10:\n graph.add_node(type_relation_map[0])\n graph.add_node(type_relation_map[2])\n graph.add_edge(type_relation_map[0], type_relation_map[2], weight=type_relation_maps[type_relation_map], label=type_relation_map[1])\n print(json.dumps(type_relation_map) + ' ' + str(type_relation_maps[type_relation_map]))\n f_write_1.write(json.dumps(type_relation_map[0]) + ' ' + json.dumps(type_relation_map[1]) + ' ' + json.dumps(type_relation_map[2]) + ' . \\n')\n # for element in type_relation_map:\n f_write.write(str(type_relation_map) + ' ' + str(type_relation_maps[type_relation_map]) + '\\n')\n # print(str(type_relation_map) + ' ' + str(type_relation_maps[type_relation_map]))\n f_write.close()\n nx.draw(graph)\n plt.savefig(\"graph.png\")\n\n\nif __name__ == '__main__':\n\n # Para ver que esta funcionando usamos un endpoint.\n # Si todo anda bien basta con eliminar el [:1] y dejarlo corriendo\n if len(sys.argv) == 1:\n print('usage: python type_stats.py ')\n else:\n generate_type_relation_stats(sys.argv[1], False)\n","sub_path":"src/statistics/type_relation_map_stats.py","file_name":"type_relation_map_stats.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"632467250","text":"from click.testing import CliRunner\nfrom unittest import mock\nimport json\n\nimport pytest\n\nfrom mapbox_tilesets.scripts.cli import (\n add_source,\n view_source,\n delete_source,\n validate_source,\n list_sources,\n)\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.post\")\ndef test_cli_add_source(mock_request_post, MockResponse):\n okay_response = {\"id\": \"mapbox://tileset-source/test-user/hello-world\"}\n mock_request_post.return_value = MockResponse(okay_response, status_code=200)\n runner = CliRunner()\n validated_result = runner.invoke(\n add_source, [\"test-user\", \"hello-world\", \"tests/fixtures/valid.ldgeojson\"]\n )\n assert validated_result.exit_code == 0\n\n assert (\n validated_result.output\n == \"\"\"{\"id\": \"mapbox://tileset-source/test-user/hello-world\"}\\n\"\"\"\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.post\")\ndef test_cli_add_source_no_validation(mock_request_post, MockResponse):\n error_response = {\n \"message\": \"Invalid file format. Only GeoJSON features are allowed.\"\n }\n mock_request_post.return_value = MockResponse(error_response, status_code=400)\n runner = CliRunner()\n no_validation_result = runner.invoke(\n add_source,\n [\n \"test-user\",\n \"hello-again\",\n \"tests/fixtures/invalid.ldgeojson\",\n \"--no-validation\",\n ],\n )\n assert no_validation_result.exit_code == 1\n\n assert (\n no_validation_result.exception.message\n == '{\"message\": \"Invalid file format. Only GeoJSON features are allowed.\"}'\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.get\")\ndef test_cli_view_source(mock_request_get, MockResponse):\n message = {\"id\": \"mapbox://tileset-source/test-user/hello-world\"}\n mock_request_get.return_value = MockResponse(message, status_code=200)\n runner = CliRunner()\n result = runner.invoke(view_source, [\"test-user\", \"hello-world\"])\n\n assert result.exit_code == 0\n assert json.loads(result.output) == message\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.delete\")\ndef test_cli_delete_source(mock_request_delete, MockResponse):\n mock_request_delete.return_value = MockResponse(\"\", status_code=204)\n runner = CliRunner()\n result = runner.invoke(delete_source, [\"test-user\", \"hello-world\"], input=\"y\")\n assert result.exit_code == 0\n assert (\n result.output\n == \"Are you sure you want to delete test-user hello-world? [y/N]: y\\nSource deleted.\\n\"\n )\n force_result = runner.invoke(delete_source, [\"test-user\", \"hello-world\", \"--force\"])\n assert force_result.exit_code == 0\n assert force_result.output == \"Source deleted.\\n\"\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.delete\")\ndef test_cli_delete_source_aborted(mock_request_delete, MockResponse):\n mock_request_delete.return_value = MockResponse(\"\", status_code=201)\n runner = CliRunner()\n result = runner.invoke(delete_source, [\"test-user\", \"hello-world\"], input=\"n\")\n assert result.exit_code == 1\n assert (\n result.output\n == \"Are you sure you want to delete test-user hello-world? [y/N]: n\\nAborted!\\n\"\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.get\")\ndef test_cli_view_source_2(mock_request_get, MockResponse):\n message = [\n {\"id\": \"mapbox://tileset-source/test-user/hello-world\"},\n {\"id\": \"mapbox://tileset-source/test-user/hola-mundo\"},\n ]\n mock_request_get.return_value = MockResponse(message, status_code=200)\n runner = CliRunner()\n result = runner.invoke(list_sources, [\"test-user\"])\n\n assert result.exit_code == 0\n assert (\n result.output\n == \"mapbox://tileset-source/test-user/hello-world\\nmapbox://tileset-source/test-user/hola-mundo\\n\"\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\ndef test_cli_validate_source():\n runner = CliRunner()\n result = runner.invoke(validate_source, [\"tests/fixtures/valid.ldgeojson\"])\n assert result.exit_code == 0\n assert result.output == \"Validating features\\n✔ valid\\n\"\n","sub_path":"tests/test_cli_sources.py","file_name":"test_cli_sources.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34117549","text":"from setuptools import setup\n\nns = \"eregs_ns.parser\" # The namespace for regulations-parser extensions.\nfs = \"atf_regparser\" # The directory name for the package.\nentry_points = {\n \"%s.preprocessors\" % ns: [\n \"USCode = %s.preprocs:USCode\" % fs\n ],\n \"%s.test_suite\" % ns: [\n \"testsuite = %s.tests\" % fs\n ]\n}\n\nsetup(\n name=fs,\n version=\"1.0.0\",\n packages=[fs],\n classifiers=[\n 'License :: Public Domain',\n 'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication'\n ],\n entry_points=entry_points\n)\n","sub_path":"eregs_extensions/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"207304972","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom jinja2 import Template\nfrom sqlalchemy.orm import sessionmaker\nfrom models import DB_ENGINE, Products\n# from googletrans import Translator\n\n# t = Translator()\nsess = sessionmaker(DB_ENGINE)\nsession = sess()\nprice = open('template.html').read()\ntemplate = Template(price)\n\n\ndef create_category_price(categories=[]):\n for cat in categories:\n arr = cat.split(\"|\")\n cat_name = arr[-1]\n products = session.query(Products).order_by(Products.name).filter(Products.category==cat).filter(Products.instock==True).limit(1000).all()\n if len(products)>20:\n print(cat)\n data = dict(cat_name=cat_name, products=products)\n out = template.render(data=data)\n with open(\"{}.html\".format(cat.replace('/','_').replace('|','_')), \"w\") as html:\n print(out, file=html)\n\ndef get_categories_list():\n cats = session.query(Products.category).distinct(Products.category).filter(Products.instock == True).all()\n cat_list = []\n for i in cats:\n cat_list.append(i[0])\n return cat_list\n\n\nif __name__ == '__main__':\n\n cats = get_categories_list()\n create_category_price(categories=cats)\n","sub_path":"price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"578558592","text":"#create services here\nfrom educationApp.models import Datas, Index\nfrom difflib import SequenceMatcher\nimport os\nimport educationApp.utils as utils\nfrom educationApp.tokenizer import Analyzer\nfrom educationApp.gensimUtils import Similar\nclass DataService:\n def __init__(self):\n pass\n\n def getDatas(self, name):\n datas = self.getDatasByIndex(name)\n return datas\n \n def getAllIndex(self, name):\n matcherService = NameMatcherService()\n allIndex = matcherService.getAllIndex(name)\n return allIndex\n\n def getDatasByIndex(self, index):\n dataName = index\n fileDatas = utils.readJsonFromTxtFile('/Users/liug/Documents/dataSearch/files/' + dataName + '.txt')\n dataNodes = fileDatas['returndata']['datanodes']\n nodesNames = [{'cname':cname['cname'], 'code':cname['code']} for cname in fileDatas['returndata']['wdnodes'][0]['nodes']]\n colName = []\n col2018 = []\n col2017 = []\n col2016 = []\n col2015 = []\n col2014 = []\n col2013 = []\n col2012 = []\n col2011 = []\n col2010 = []\n col2009 = []\n for nodeName in nodesNames:\n cname = nodeName['cname']\n code = nodeName['code']\n colName.append(cname)\n for dataNode in dataNodes:\n if(dataNode['code'] == 'zb.' + code + '_sj.2018'):\n col2018.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2017'):\n col2017.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2016'):\n col2016.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2015'):\n col2015.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2014'):\n col2014.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2013'):\n col2013.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2012'):\n col2012.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2011'):\n col2011.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2010'):\n col2010.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2009'):\n col2009.append(dataNode['data']['data']) \n \n tableData = []\n row = 0\n for col in colName:\n data = {\"name\": col, \"2018\":col2018[row], \"2017\":col2017[row], \"2016\":col2016[row],\n \"2015\":col2015[row], \"2014\":col2014[row], \"2013\":col2013[row], \"2012\":col2012[row],\n \"2011\":col2011[row], \"2010\":col2010[row], \"2009\":col2009[row]}\n tableData.append(data)\n row = row + 1\n '''\n tableDatas = {'指标':colName,\n '2018': col2018,'2017':col2017,'2016':col2016,'2015':col2015,'2014':col2014,'2013':col2013,'2012':col2012,'2011':col2011,'2010':col2010,'2009':col2009}\n '''\n datas = Datas('', index)\n datas.setDatas(tableData)\n return datas\n \nclass NameMatcherService:\n def __init__(self):\n pass\n \n def createWordsCorpora(self):\n corpora = []\n for filename in os.listdir('/Users/liug/Documents/dataSearch/files'):\n if filename.endswith(\".txt\") :\n corpora.append(filename[:-4])\n return corpora\n \n def getIndex(self, nameForMatch):\n analyzer = Analyzer()\n segWords = analyzer.cutWords(nameForMatch)\n wordCorpora = self.createWordsCorpora()\n sim = Similar(Similar.EDUCATION, wordCorpora)\n similaries = sim.similary(nameForMatch)\n index = ''\n for documentNumber, score in sorted(enumerate(similaries), key=lambda x: x[1], reverse=True):\n index = wordCorpora[documentNumber]\n break\n return index\n \n def getAllIndex(self, nameForMatch):\n analyzer = Analyzer()\n segWords = analyzer.cutWords(nameForMatch)\n wordCorpora = self.createWordsCorpora()\n sim = Similar(Similar.EDUCATION, wordCorpora)\n similaries = sim.similary(nameForMatch)\n allIndex = []\n for documentNumber, score in sorted(enumerate(similaries), key=lambda x: x[1], reverse=True):\n index = wordCorpora[documentNumber]\n allIndex.append(index)\n return allIndex\n \n \n ","sub_path":"educationApp/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"637628843","text":"\"\"\"\ninstabot example\n\nworkflow:\n mention [@user] in comment section\n\"\"\"\nimport os\nimport sys\n\nsys.path.append(os.path.join(sys.path[0], '../'))\nfrom instabot import Bot\n\nparser = argparse.ArgumentParser(add_help=True)\nparser.add_argument('-u', type=str, help=\"username\")\nparser.add_argument('-p', type=str, help=\"password\")\nparser.add_argument('-proxy', type=str, help=\"proxy\")\nparser.add_argument('user', type=str, help='user')\nparser.add_argument('nfollowers', type=int, help='nfollowers')\nargs = parser.parse_args()\n\nbot = Bot()\nbot.login()\n\nuserID = bot.get_user_id_from_username(args.user)\nsomeones_followers = bot.api.get_total_followers_or_followings(userID,\n amount=args.nfollowers,\n filter_private=False,\n filter_business=False,\n filter_verified=False,\n usernames=True,)\n\nmedias = bot.get_your_medias()\nmedia_to_comment = medias[0]\n\nfor usr in someones_followers:\n comment = '@' + usr\n bot.api.comment(media_to_comment, comment)\n bot.console_print('{} media commented with text: {}'.format(media_to_comment, comment), 'green')\n bot.total['comments'] += 1\n bot.delay('comment')\n\n'''\n# get a list of users to mention and store in a text file\ninput(colored(\"what user followers do you want to scrape ? : \", 'red')) # scrape users followers\nwith open('someones_followers_scrape.txt', 'w') as file:\n file.write(someones_followers)\npages_to_scrape = bot.read_list_from_file(\"someones_followers_scrape.txt\") # reading passed \"someones followers to scrape\"\nf = open(\"scrappedFOLLOWERS.txt\", \"w\") # stored list of \"Someone's Followers\"\nfor follower in pages_to_scrape:\n users = bot.get_user_followers(follower,30)\nfor userfollowers in users:\n f.write(userfollowers + \"\\n\")\nprint(colored(\"\\n\" + \"successfully written Someone's Followers , to textfile scrappedFOLLOWERS.txt\", 'green'))\nf.close()\n\n# convert passed scrapped followers to usernames\n\n\nprint(colored(\"Converting scrappedFOLLOWERS.txt to usernames, MIGHT TAKE AWHILE!!!!\", 'red'))\nwusers = bot.read_list_from_file(\"scrappedFOLLOWERS.txt\")\nwith open(\"usernamelist.txt\", 'w+') as f:\n\tfor list in wusers:\n\t\tusernames=bot.get_username_from_user_id(list) + '\\n'\n\t\tf.write(usernames)\n\tprint(colored(\"succesfully converted \" + str(wusers), 'green'))\n\n# append '@' to scrapped list\n\n\nprint(\"adding '@' to usernames\")\nappendText = '@'\nfollowlist = open(\"usernamelist.txt\", 'r')\nupdatedList = open(\"mentionlist.txt\", 'w')\nfor name in followlist:\n updatedList.write(appendText + name.rstrip() + '\\n')\nupdatedList.close()\nprint(colored(\"succesfully appended '@' to usernames\", 'green'))\n\n\n# comment @users on last media post\nmedias=bot.get_your_medias()\nwhile True:\n\tbot.comment_medias([medias[0]])\n'''","sub_path":"mention.py","file_name":"mention.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"117104369","text":"import os\ncount = 0\ntry:\n os.remove(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\")\nexcept:\n pass\nwith open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\Input text_files\\\\Capture.txt\", \"r\") as fp:\n Lines = fp.readlines()\n title = \"MED_ITEM_ID,PARTICULARS,QTY,PRICE,NET_AMT\"\n with open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\", \"a+\") as ff:\n title = title.replace(\" \",\"\")\n ff.write(title + \"\\n\")\n nextLineNewRecord = False\n startRecording = False\n headerIsMedicineOrMaterial = False\n rows = []\n row = \"\"\n csv = \"\"\n temp = \"\"\n for line in Lines:\n if (startRecording):\n line_s = line.strip().replace(\"\\\\''\",\"\")\n print(line_s)\n if line_s:\n if line_s.startswith(\"Sub Total\"):\n nextLineNewRecord = True\n headerIsMedicineOrMaterial = False\n else:\n nextLineNewRecord = False\n if nextLineNewRecord:\n rows.append(row)\n row = \"\"\n else:\n if row:\n if not headerIsMedicineOrMaterial:\n row = row + \" \" + line_s\n else:\n data = line_s.split()\n if not line_s.__contains__(\"Packed:\"):\n temp = temp + \" \" + line_s\n try:\n a = float(data[len(data) - 1])\n a = float(data[len(data) - 2])\n a = float(data[len(data) - 3])\n row = row + \"[\" + temp.replace(\"Charged :\",\"\")\n temp = \"\"\n except Exception as e:\n pass\n else:\n row = line_s\n if line_s.__contains__(\"Medicines\") or line_s.__contains__(\"Materials\"):\n headerIsMedicineOrMaterial = True\n else:\n if line.strip().startswith(\"Ref\"):\n startRecording = True\n if row:\n rows.append(row)\n\n for data in rows:\n dataSub = data.strip('\"').split(\"[\")\n header = dataSub[0].split()\n if header or len(dataSub) > 1:\n if header:\n SrNo = header[0]\n headerText = header[1:]\n with open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\",\"a+\") as ff:\n ff.write(SrNo + \",\" + \" \".join(headerText) + \"\\n\")\n for dd in dataSub[1:]:\n data_s = dd.strip('\"').split()\n l = len(data_s)\n particulars = \"\"\n quantity = \"\"\n price = \"\"\n amount = \"\"\n for i, d in enumerate(data_s):\n if (i < l - 3) or l < 3:\n if particulars:\n particulars = particulars + \" \" + d\n else:\n particulars = d\n elif i == l - 3:\n quantity = d\n elif i == l - 2:\n price = d\n elif i == l - 1:\n amount = d\n particulars=particulars.replace(\"'\",\"''\")\n\n if particulars.__contains__(\"]\"):\n csv = ',\"[' + particulars.replace('\"','\"\"') + '\",' + quantity + ',' + price + ',' + amount\n else:\n csv = ',\"' + particulars.replace('\"','\"\"') + '\",' + quantity + ',' + price + ',' + amount\n with open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\",\"a+\") as ff:\n ff.write(csv + \"\\n\")\n","sub_path":"BEML_Workflow1set/Python_Script/Detailed_Bill.py","file_name":"Detailed_Bill.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"61273494","text":"def test_visit_google_com_returns_page_with_Google_in_title(browser):\n b = browser\n b.visit(\"https://www.google.com/\")\n assert \"Google\" in b.title\n\n\ndef test_headless_visit_google_com_returns_page_with_Google_in_title(headless_browser):\n b = headless_browser\n b.visit(\"https://www.google.com/\")\n assert \"Google\" in b.title\n\n\ndef test_chrome_fill_github_in_google_search_box_returns_github_website(chrome_browser):\n b = chrome_browser\n b.visit(\"https://www.google.com/\")\n b.fill(\"q\", \"github\")\n search_button = b.find_by_name(\"btnK\")\n b.wait_for(search_button.is_displayed, timeout=1.5)\n search_button.click()\n b.wait(1)\n assert b.find_by_text(\"github.com\")\n","sub_path":"tests/test_example.py","file_name":"test_example.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"85599959","text":"# astrologers stars\nprint(\"give the no. of rows\")\nrows = int(input())\nprint(\"write true(1) or false(2) \")\nx = int(input())\nif x == 1:\n for i in range (1,rows+1):\n print(i*\"*\")\nelif x == 2:\n for i in range (1, rows + 1):\n i1 = rows+1 - i\n print(i1*\"*\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"23543341","text":"from itertools import (tee, islice)\n\ndef pairwise(iterable):\n '''s -> (s0,s1), (s1,s2), (s2, s3), ...\n from https://docs.python.org/3/library/itertools.html#itertools-recipes\n '''\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\ndef in_chunks(iterable, size):\n '''s, 3 -> [s0,s1,s2], [s3,s4,s5], ...'''\n while True:\n chunk = list(islice(iterable, size))\n if not chunk:\n raise StopIteration\n yield chunk\n\ndef add_items(dictionary, items):\n result = dictionary.copy()\n result.update(items)\n return result\n","sub_path":"Homework2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"575030808","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('success_game/', views.succ_game, name='success_game'),\n path('ind_player//', views.player_view, name='ind_player')\n\n]\n","sub_path":"elo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"183325646","text":"\"\"\"\r\nAuthor: Hrishee Shastri\r\nMay 2019\r\nGenetic Algorithm for optimization of scalar functions with vector input. \r\n\"\"\"\r\n\r\nfrom chromosome import *\r\nimport os\r\nimport math\r\n\r\ndef GA_SEARCH(mutrate, crossrate, popsize, gens, rep, file, fn, interval, key=min):\r\n \"\"\"\r\n Executes a genetic algorithm to optimize a mathematical function fn. Returns a pair (X,y) where X is an input vector and y is the optimized fn(X)\r\n mutrate -- mutation rate, between 0 and 1 inclusive\r\n crossrate -- crossover rate, between 0 and 1 inclusive\r\n popsize -- positive even integer population size to be maintained throughout iteration\r\n gens -- a number greater than 0 that specifies the number of generations to iterate through\r\n rep -- representation function to be used (instance of Representation class). Maps from bitstrings to real numbers in the given interval\r\n Pass the function object (e.g. GRAY_CODE)\r\n file -- text file name to write output to (not the same as console output -- file output writes every generation, while\r\n console output only writes when an improvement has been made)\r\n fn -- the real valued mathematical function to be optimized, wrapped in a TestFn object. fn : R^n --> R (i.e. vector valued inputs, scalar valued outputs).\r\n interval -- A 3-tuple (start, end, step) inclusive that constrains the search space for fn. In other words, each entry x_i in the input vector \r\n is constrained by x_i \\in [start,end] with step increments. Make sure fn is continuous along every point in the interval (e.g. no ZeroDivisionErrors).\r\n W -- scaling window = 1\r\n S -- selection strategy = E \r\n key -- min for function minimization and max for function maximization \r\n \"\"\"\r\n\r\n assert popsize > 0, \"popsize is not positive\"\r\n assert 0 <= mutrate and mutrate <= 1, \"invalid mutation rate\"\r\n assert 0 <= crossrate and crossrate <= 1, \"invalid crossover rate\"\r\n assert gens > 0, \"num of generations not positive\"\r\n\r\n# print(\"Initializing...\")\r\n\r\n # Initialize representation \r\n REP = rep(interval)\r\n\r\n# print(key.__name__.upper() + \"IMIZING \" + str(fn).upper() + \" (\" + REP.get_name() + \")\")\r\n\r\n\r\n f = open(os.path.join(\"caruana_data\", file + \".txt\"), 'w')\r\n #g = open(os.path.join(\"caruana_data\", file + \"best_sol\" + \".txt\"), 'w')\r\n\r\n # Initialize random population\r\n EVAL_LIMIT = 5000\r\n EVALS = 0\r\n curr_gen = 1\r\n POP = []\r\n dim = fn.get_input_dimension()\r\n\r\n for i in range(0, popsize):\r\n vec = \"\"\r\n for n in range(dim):\r\n vec += REP.get_random_bitstr()\r\n chrom = Chromosome(REP, vec)\r\n POP.append(chrom)\r\n\r\n assert len(POP) == popsize, \"POP has incorrect number of elements\"\r\n\r\n\r\n # evaluate population \r\n #print(\"Evolving...\")\r\n # Fitness map is not performance value. It is just the evaluation of the objective function to be minimized.\r\n FITNESS_MAP = {chrom:chrom.eval_fitness(fn) for chrom in POP}\r\n\r\n # scaling window of 1\r\n if key == min:\r\n best = math.inf\r\n f_prime = max(FITNESS_MAP.values())\r\n else:\r\n best = -math.inf\r\n f_prime = min(FITNESS_MAP.values())\r\n\r\n for k in POP:\r\n # f.write(str(k.performance_value(FITNESS_MAP, f_prime, key)))\r\n # f.write(\"\\t\")\r\n f.write(str(FITNESS_MAP[k]))\r\n f.write(\"\\n\")\r\n EVALS += 1\r\n\r\n #g.write(str(key(FITNESS_MAP.values())) + \"\\n\")\r\n # Evolve\r\n while EVALS < EVAL_LIMIT:\r\n curr_gen += 1\r\n child_POP = []\r\n new_children = [] # new individuals not from previous generation. Child_pop is the entire population that will replace POP.\r\n # new_children keeps track of the individuals that are not from previous generation\r\n for i in range(popsize//2):\r\n #parent1, parent2 = wheel_selection(POP, FITNESS_MAP, f_prime, key)\r\n #parent1, parent2 = stochastic_universal_sampling(POP, FITNESS_MAP, f_prime, key)\r\n parent1, parent2 = rank_selection(POP, FITNESS_MAP, f_prime, key)\r\n \r\n if random.uniform(0,1) <= crossrate:\r\n child1, child2 = parent1.crossover(parent2)\r\n else:\r\n child1, child2 = parent1, parent2\r\n\r\n child1 = child1.mutate(mutrate)\r\n child2 = child2.mutate(mutrate)\r\n\r\n if child1 != parent1 and child1 != parent2:\r\n new_children.append(child1)\r\n if child2 != parent1 and child2 != parent2:\r\n new_children.append(child2)\r\n\r\n\r\n child_POP.append(child1)\r\n child_POP.append(child2)\r\n\r\n # elitist replacement\r\n best_chrom = key(FITNESS_MAP, key = FITNESS_MAP.get)\r\n if best_chrom not in child_POP:\r\n child_POP.append(best_chrom)\r\n\r\n POP = child_POP.copy()\r\n\r\n assert len(POP) == popsize or len(POP) == popsize + 1, \"popsize not maintained after next generation\"\r\n FITNESS_MAP = {chrom:chrom.eval_fitness(fn) for chrom in POP}\r\n\r\n # scaling window of 1, so recompute f_prime every generation\r\n if key == min:\r\n f_prime = max(FITNESS_MAP.values())\r\n else:\r\n f_prime = min(FITNESS_MAP.values())\r\n\r\n for new in new_children:\r\n # f.write(str(new.performance_value(FITNESS_MAP, f_prime, key)))\r\n # f.write(\"\\t\")\r\n f.write(str(FITNESS_MAP[new]))\r\n f.write(\"\\n\")\r\n EVALS += 1\r\n if EVALS == EVAL_LIMIT:\r\n break \r\n\r\n #g.write(str(key(FITNESS_MAP.values())) + \"\\n\")\r\n\r\n# print(\"All \" + str(EVALS) + \" fitness evals completed\")\r\n","sub_path":"comparison-GA/optimizationGA.py","file_name":"optimizationGA.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"409240288","text":"class Hamster:\n def __init__(self, n, v, k):\n self.navn = n\n self.vekt = v\n self.kjonn = k\n \n self.over = None\n self.under = None\n self.venstre = None\n self.hoyre = None\n \n def __eq__(self, hamster):\n if (self.navn == hamster.navn and self.kjonn == hamster.kjonn):\n return True\n else:\n return False\n \n def hent_info(self):\n tekst = \"\"\n tekst += \"\\nNavn: \" + self.navn\n tekst += \"\\nVekt: \" + self.vekt\n tekst += \"\\nKjonn: \" + self.kjonn\n return tekst\n \n def __str__(self):\n return self.navn\n ","sub_path":"uke11/hamster.py","file_name":"hamster.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"599738811","text":"class MoveError(BaseException):\n pass\n\n\nclass World(object):\n \n def __init__(self, size=100):\n self.size = size\n self.directions = ['n', 'e', 's', 'w']\n self.obstacles = {}\n \n def get(self, loc):\n return self.obstacles.get('{}_{}'.format(loc[0], loc[1]))\n \n def put(self, loc, obj):\n self.obstacles['{}_{}'.format(loc[0], loc[1])] = obj;\n\n\nclass Rover(object):\n\n def __init__(self, world, x=0, y=0, direction='n'):\n self.world = world\n self.location = [x, y]\n self.dirAngle = self.world.directions.index(direction)\n\n def command(self, cmds):\n for cmd in cmds:\n if cmd is 'f':\n self.move(1)\n elif cmd is 'r':\n self.turn(1)\n elif cmd is 'l':\n self.turn(-1)\n elif cmd is 'b':\n self.move(-1)\n else:\n raise TypeError()\n\n @property\n def direction(self):\n return self.world.directions[self.dirAngle]\n\n def move(self, distance):\n axis = (self.dirAngle+1) % 2\n headingNE = 1 if self.dirAngle < 2 else -1\n new_loc = list(self.location)\n new_loc[axis] = (new_loc[axis] + headingNE * distance) % self.world.size\n if self.world.get(new_loc):\n raise MoveError()\n self.location = new_loc\n \n def turn(self, angle):\n self.dirAngle = (self.dirAngle + angle) % len(self.world.directions)","sub_path":"jpospychala/rover.py","file_name":"rover.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"381407994","text":"# Build a standalone function to remove strings of even lengths from a given array. Given ['Nope!', 'Its', 'kris', 'strating', 'with', 'K!', '(instead', 'of', 'Chris', 'with', 'C)', '.'], change it to ['Nope!', 'Its', 'Chris']\n\ndef removeEven(strArr):\n i = 0\n while i < len(strArr):\n if (len(strArr[i]) % 2 == 0):\n strArr.remove(strArr[i])\n else:\n i += 1\n\nmyStrArr = ['Nope!', 'Its', 'kris', 'starting', 'with', 'K!', '(instead', 'of', 'Chris', 'with', 'C)', '.']\nprint(\"The original array is {}\").format(myStrArr)\nremoveEven(myStrArr)\nprint(\"The changed array is {}\").format(myStrArr)\n","sub_path":"Chapter-04-Strings-AssociativeArrays/Remove-Even-Length-Strings/Remove-Even-Length-Strings.py","file_name":"Remove-Even-Length-Strings.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"137456818","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom pylab import *\r\nimport os.path\r\n\r\ncol_names = ['host', 'space1', 'space2', 'timestamp', 'timezone', 'request', 'code', 'bytes']\r\ndf1 = pd.read_csv('log.csv', delim_whitespace=True, names=col_names, header=None,\r\n error_bad_lines=False); \r\n# read csv data and place into dataframe\r\n\r\ndf1['freq'] = df1.groupby('code')['code'].transform('count');\r\n\r\ndf2 = df1[df1.code != 200]\r\n\r\n\r\n\r\n\r\n\r\ncount = 0\r\nb = 0\r\nresult = [] \r\nfor a in df2.index:\r\n for d in a:\r\n if abs(d - c) <= 2:\r\n count = count + 1\r\n b = b + 1\r\n result.append(str(c)+','+str(count))\r\n count = 0\r\n b = 0 \r\n \r\n\r\n\r\n\r\n''' \r\na = [200,4,5,7,8,11]\r\ncount = 0\r\nb = 0\r\nresult = []\r\nfor c in a:\r\n for d in a:\r\n if abs(d - c) <= 2:\r\n count = count + 1\r\n b = b + 1\r\n result.append(str(c)+','+str(count))\r\n count = 0\r\n b = 0 \r\n \r\n'''\r\n\r\n ","sub_path":"insight_testsuite/tests/test_features/log_input/feature4.py","file_name":"feature4.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"427866","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the shared functionality for Elasticsearch output modules.\"\"\"\n\nimport unittest\n\ntry:\n from mock import MagicMock\nexcept ImportError:\n from unittest.mock import MagicMock\n\nfrom dfvfs.path import fake_path_spec\n\nfrom plaso.containers import events\nfrom plaso.lib import definitions\nfrom plaso.output import shared_elastic\n\nfrom tests.containers import test_lib as containers_test_lib\nfrom tests.output import test_lib\n\n\nclass TestElasticsearchOutputModule(\n shared_elastic.SharedElasticsearchOutputModule):\n \"\"\"Elasticsearch output module for testing.\"\"\"\n\n def _Connect(self):\n \"\"\"Connects to an Elasticsearch server.\"\"\"\n self._client = MagicMock()\n\n\n@unittest.skipIf(shared_elastic.elasticsearch is None, 'missing elasticsearch')\nclass SharedElasticsearchOutputModuleTest(test_lib.OutputModuleTestCase):\n \"\"\"Tests the shared functionality for Elasticsearch output modules.\"\"\"\n\n # pylint: disable=protected-access\n\n _TEST_EVENTS = [\n {'a_binary_field': b'binary',\n 'data_type': 'syslog:line',\n 'filename': 'log/syslog.1',\n 'hostname': 'ubuntu',\n 'my_number': 123,\n 'some_additional_foo': True,\n 'path_spec': fake_path_spec.FakePathSpec(\n location='log/syslog.1'),\n 'text': (\n 'Reporter PID: 8442 (pam_unix(cron:session): session\\n '\n 'closed for user root)'),\n 'timestamp': '2012-06-27 18:17:01+00:00',\n 'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN}]\n\n def testConnect(self):\n \"\"\"Tests the _Connect function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._client)\n\n output_module._Connect()\n\n self.assertIsNotNone(output_module._client)\n\n def testCreateIndexIfNotExists(self):\n \"\"\"Tests the _CreateIndexIfNotExists function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n def testFlushEvents(self):\n \"\"\"Tests the _FlushEvents function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n output_module._InsertEvent(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 2)\n self.assertEqual(output_module._number_of_buffered_events, 1)\n\n output_module._FlushEvents()\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n def testGetSanitizedEventValues(self):\n \"\"\"Tests the _GetSanitizedEventValues function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n\n event_tag = events.EventTag()\n event_tag.AddLabel('Test')\n\n event_values = output_module._GetSanitizedEventValues(\n event, event_data, event_data_stream, event_tag)\n\n expected_event_values = {\n 'a_binary_field': 'binary',\n 'data_type': 'syslog:line',\n 'datetime': '2012-06-27T18:17:01.000000Z',\n 'display_name': 'FAKE:log/syslog.1',\n 'filename': 'log/syslog.1',\n 'hostname': 'ubuntu',\n 'message': '[',\n 'my_number': 123,\n 'path_spec': (\n '{\"__type__\": \"PathSpec\", \"location\": \"log/syslog.1\", '\n '\"type_indicator\": \"FAKE\"}'),\n 'some_additional_foo': True,\n 'source_long': 'Log File',\n 'source_short': 'LOG',\n 'tag': ['Test'],\n 'text': ('Reporter PID: 8442 (pam_unix(cron:session): '\n 'session\\n closed for user root)'),\n 'timestamp': 1340821021000000,\n 'timestamp_desc': 'Content Modification Time',\n }\n\n self.assertIsInstance(event_values, dict)\n self.assertEqual(event_values, expected_event_values)\n\n def testInsertEvent(self):\n \"\"\"Tests the _InsertEvent function.\"\"\"\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n output_module._InsertEvent(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 2)\n self.assertEqual(output_module._number_of_buffered_events, 1)\n\n output_module._InsertEvent(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 4)\n self.assertEqual(output_module._number_of_buffered_events, 2)\n\n output_module._FlushEvents()\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n def testClose(self):\n \"\"\"Tests the Close function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n\n self.assertIsNotNone(output_module._client)\n\n output_module.Close()\n\n self.assertIsNone(output_module._client)\n\n def testSetFlushInterval(self):\n \"\"\"Tests the SetFlushInterval function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertEqual(\n output_module._flush_interval, output_module._DEFAULT_FLUSH_INTERVAL)\n\n output_module.SetFlushInterval(1234)\n\n self.assertEqual(output_module._flush_interval, 1234)\n\n def testSetIndexName(self):\n \"\"\"Tests the SetIndexName function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._index_name)\n\n output_module.SetIndexName('test_index')\n\n self.assertEqual(output_module._index_name, 'test_index')\n\n def testSetPassword(self):\n \"\"\"Tests the SetPassword function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._password)\n\n output_module.SetPassword('test_password')\n\n self.assertEqual(output_module._password, 'test_password')\n\n def testSetServerInformation(self):\n \"\"\"Tests the SetServerInformation function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._host)\n self.assertIsNone(output_module._port)\n\n output_module.SetServerInformation('127.0.0.1', 1234)\n\n self.assertEqual(output_module._host, '127.0.0.1')\n self.assertEqual(output_module._port, 1234)\n\n def testSetUsername(self):\n \"\"\"Tests the SetUsername function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._username)\n\n output_module.SetUsername('test_username')\n\n self.assertEqual(output_module._username, 'test_username')\n\n def testWriteEventBody(self):\n \"\"\"Tests the WriteEventBody function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n output_module.WriteEventBody(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 2)\n self.assertEqual(output_module._number_of_buffered_events, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/output/shared_elastic.py","file_name":"shared_elastic.py","file_ext":"py","file_size_in_byte":9081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"521912252","text":"'''\r\nThis scripts is to caculate the frequency of diffrent length miRNA in five files.\r\n'''\r\n\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\nimport os\r\n\r\n\r\ndef CountNumbers(file):\r\n \"count the number of different lenth of the sequences.\"\r\n with open(r\"{}\".format(file), \"r\") as f: # open files\r\n reads = f.read()\r\n\r\n pattern = re.compile(\"[A-Z]+\")\r\n match_list = re.findall(pattern, reads)\r\n\r\n count = dict()\r\n for i in range(17, 36):\r\n count[i] = 0\r\n for read in match_list:\r\n L = len(read)\r\n count[L] += 1\r\n\r\n ### a more pythonic method to caculate the number of various length of miRNA in the fasta file\r\n # from collections import Counter\r\n # length = [len(seq) for seq in match_list]\r\n # count = Counter(length)\r\n\r\n count = sorted(count.items(), key=lambda d: d[0], reverse=False)\r\n return count\r\n\r\n\r\ndef LengthToNumbers(file):\r\n \"return a tuple, in which every element contains length and numbers.\"\r\n cunt = CountNumbers(file)\r\n length = []\r\n numbers = []\r\n for i in range(len(cunt)):\r\n length.append(cunt[i][0])\r\n numbers.append(cunt[i][1])\r\n return length, numbers\r\n\r\n\r\nnumberlist = []\r\nlengthlist = [i for i in range(17, 36)]\r\nos.chdir(r\"D:/miRNA\")\r\nfilename = os.listdir()\r\n\r\nfor file in filename:\r\n length, numbers = LengthToNumbers(file)\r\n numberlist.append(numbers)\r\n\r\nwith open(r\"D:/PY sumbline coding/count.csv\", \"a\", encoding=\"utf-8\") as cf:\r\n csvfile = csv.writer(cf)\r\n csvfile.writerow(lengthlist)\r\n csvfile.writerows(numberlist)\r\n\r\n\r\n# visualize these data\r\ndata=pd.read_csv('count.csv').T\r\ndata.columns=[\"F1\",\"F2\",\"F3\",\"F4\",\"F5\"]\r\ndata.plot.bar(rot=0)\r\nplt.xlabel=(\"Length of miRNA\")\r\nplt.ylabel=(\"Frequency of Diffrent miRNA\")\r\nplt.title(\"the Number of Various Length of miRNA\")\r\nplt.show()\r\nplt.savefig('countFrequency.png',dpi=400,bbox_inches='tight')","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"156937616","text":"from itertools import combinations\nimport numpy as np\nimport copy\n\n#素数を数え上げる\nprime_list = []\nfor i in range(2, 100):\n flag = 0\n for j in range(2, i//2+1):\n if(i % j == 0):\n flag = 1\n break\n if flag == 0:\n prime_list.append(i)\n\nnum = len(prime_list)\nsub = sum_all = sum(prime_list)\n#print(sum_all)\n\nsa_list = []\nsb_list = []\n\n#組み合わせ全探索\nfor k in range(num+1):\n for comb in combinations(prime_list, k):\n #閾値より小さかったら残す\n if sub > abs((sum_all-sum(comb))-sum(comb)):\n sa_list.clear()\n for p in comb:\n sa_list.append(p)\n sub = abs((sum_all-sum(comb))-sum(comb))\n\nprint(\"SA>{}\".format(sa_list))\nfor t in prime_list:\n if t not in sa_list:\n sb_list.append(t)\nprint(\"SB>{}\".format(sb_list))\n","sub_path":"0527/2210104053/problem7_1.py","file_name":"problem7_1.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"269244504","text":"import copy\n\nmsr_training_utf8 = open('msr_training.utf8.ic', 'r', encoding='utf-8')\nmsr_str = msr_training_utf8.read()\nmsr_training_utf8.close()\nmsr_list = msr_str.split('\\n')\n\nhead_dict = {\n 'B':0,\n 'M':1,\n 'E':2,\n 'S':3,\n}\nreverse_head_dict ={\n 0:'B',\n 1:'M',\n 2:'E',\n 3:'S',\n}\n\nmsr_pro_dict_txt = open('msr_pro_dict.txt', 'r', encoding='utf-8')\nmsr_pro_dict = eval(msr_pro_dict_txt.read())\nmsr_pro_dict_txt.close()\n\nlast_word_pro_dict_txt = open('last_word_pro_dict.txt', 'r', encoding='utf-8')\nlast_word_pro_dict = eval(last_word_pro_dict_txt.read())\nlast_word_pro_dict_txt.close()\n\nnext_word_pro_dict_txt = open('next_word_pro_dict.txt', 'r', encoding='utf-8')\nnext_word_pro_dict = eval(next_word_pro_dict_txt.read())\nnext_word_pro_dict_txt.close()\n\nmsr_double_array_pro_dict_txt = open('msr_double_array_pro_dict.txt', 'r', encoding='utf-8')\nmsr_double_array_pro_dict = eval(msr_double_array_pro_dict_txt.read())\nmsr_double_array_pro_dict_txt.close()\n\nstr_test = input('输入一句话:')\nsegmentation_list = []\npath_list = []\n\nfor i in str_test:\n segmentation_list.append([i, 0, 0, 0, 0])\n\npath_list = copy.deepcopy(segmentation_list)\n\nfor i in range(len(segmentation_list)):\n if i == 0:\n # 代表第一个字,首字需要注意没有上一个字,只需要计算W后和R\n W_before = 0\n R = 0\n for j in range(4):\n string_last_word = segmentation_list[i][0] # 希\n string_current_word = segmentation_list[i+1][0] # 腊\n if string_last_word+reverse_head_dict[j]+string_current_word in next_word_pro_dict:\n W_before = next_word_pro_dict[string_last_word+reverse_head_dict[j]+string_current_word]\n if string_last_word in msr_pro_dict:\n R = msr_pro_dict[string_last_word][j]\n segmentation_list[i][j + 1] = W_before + R\n continue\n\n if i < len(segmentation_list) - 1:\n # 中间的所有字都有上下\n string_last_word = segmentation_list[i - 1][0]\n string_current_word = segmentation_list[i][0]\n string_next_word = segmentation_list[i + 1][0]\n\n for a in range(4): # 遍历当前文字的四种状态\n last_word_pro = 0\n next_word_pro = 0\n R = 0\n P = 0\n # 判断上面一个字的哪个状态导致的这个字的某个状态\n\n max_index = 0\n for b in range(1, 4): # 求上一个字的四种状态对应的式子最大值和他的index\n if string_last_word in msr_double_array_pro_dict:\n if msr_double_array_pro_dict[string_last_word][max_index][a] * segmentation_list[i-1][max_index+1] < \\\n msr_double_array_pro_dict[string_last_word][b][a] * segmentation_list[i-1][b+1]:\n max_index = b\n\n if string_last_word+string_current_word+reverse_head_dict[a] in last_word_pro_dict:\n last_word_pro = last_word_pro_dict[string_last_word+string_current_word+reverse_head_dict[a]]\n if string_current_word+reverse_head_dict[a]+string_next_word in next_word_pro_dict:\n next_word_pro = next_word_pro_dict[string_current_word+reverse_head_dict[a]+string_next_word]\n if string_current_word in msr_pro_dict:\n R = msr_pro_dict[string_current_word][a]\n if string_last_word in msr_double_array_pro_dict:\n P = msr_double_array_pro_dict[string_last_word][max_index][a]\n segmentation_list[i][a+1] = P * segmentation_list[i-1][max_index + 1] + \\\n last_word_pro + next_word_pro + R\n path_list[i][a+1] = max_index\n\n if i == len(segmentation_list) - 1:\n string_last_word = segmentation_list[len(segmentation_list) - 2][0]\n string_current_word = segmentation_list[len(segmentation_list) - 1][0]\n for a in range(4):\n last_word_pro = 0\n P = 0\n R = 0\n max_index = 0\n for b in range(1, 4): # 求上一个字的四种状态对应的式子最大值和他的index\n if string_current_word in msr_double_array_pro_dict:\n if msr_double_array_pro_dict[string_current_word][max_index][a] * segmentation_list[i - 1][max_index + 1] < \\\n msr_double_array_pro_dict[string_current_word][b][a] * segmentation_list[i - 1][b + 1]:\n max_index = b\n if string_last_word+string_current_word+reverse_head_dict[a] in last_word_pro_dict:\n last_word_pro = last_word_pro_dict[string_last_word+string_current_word+reverse_head_dict[a]]\n if string_current_word in msr_double_array_pro_dict:\n P = msr_double_array_pro_dict[string_current_word][max_index][a]\n if string_current_word in msr_pro_dict:\n R = last_word_pro + msr_pro_dict[string_current_word][a]\n segmentation_list[len(segmentation_list) - 1][a+1] = \\\n P * segmentation_list[i - 1][max_index + 1] + R\n path_list[len(segmentation_list) - 1][a+1] = max_index\n\n# for i in segmentation_list:\n# print(i)\n# for i in path_list:\n# print(i)\n\nmax_index = 0\nfor i in range(1, 4):\n if segmentation_list[-1][i+1] > segmentation_list[-1][max_index+1]:\n max_index = i\nstr_sentence = ''\nindex = max_index\nstr_sentence = segmentation_list[-1][0] + reverse_head_dict[max_index] + str_sentence\nfor i in range(0, len(segmentation_list)-1):\n index = path_list[-1-i][index + 1]\n str_sentence = segmentation_list[-2-i][0] + reverse_head_dict[index] + str_sentence\n\nfor i in segmentation_list:\n print(i)\nfor i in path_list:\n print(i)\nprint(str_sentence)\n\nstring_sentence = ''\nfor s in str_sentence:\n if s == 'E' or s == 'S':\n string_sentence += ' '\n elif s == 'B' or s == 'M':\n continue\n else:\n string_sentence += s\nprint('分词结果:' + string_sentence)","sub_path":"First/word_segmentation.py","file_name":"word_segmentation.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"107244530","text":"import requests\nimport urllib.request\nimport image2image as im\n\nurl = 'https://api.github.com/repos/pgrimaud/pgrimaud/stargazers'\n\n\ndef get_stargazers_from_api(api_url, start_page):\n # Without an API key in the params, you are limited to 50 API calls per hour\n params = {'format': 'json', 'page': start_page, 'per_page': 100}\n response = requests.get(url=api_url, params=params)\n return response.json()\n\n\ndef get_all_users():\n page = 1\n avatars = []\n\n has_results = get_stargazers_from_api(url, page)\n while has_results:\n for result in has_results:\n avatars.append(result['avatar_url'] + '&s=30')\n print('Got page ' + str(page) + ' of stargazers')\n page += 1\n has_results = get_stargazers_from_api(url, page)\n\n return avatars\n\n\ndef download_avatars():\n counter = 1\n for avatar in get_all_users():\n urllib.request.urlretrieve(avatar, './avatars/avt' + str(counter) + '.png')\n counter += 1\n\n\nif __name__ == '__main__':\n print('Starting script')\n download_avatars()\n print('Avatars have been downloaded')\n im.main(im.get_args())\n print('Output has been created')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"180472712","text":"import logging\nfrom gi.repository import Gtk\n\nclass StreamblankToolbarController(object):\n\t\"\"\" Manages Accelerators and Clicks on the Composition Toolbar-Buttons \"\"\"\n\n\tdef __init__(self, drawing_area, win, uibuilder, warning_overlay):\n\t\tself.log = logging.getLogger('StreamblankToolbarController')\n\n\t\tself.warning_overlay = warning_overlay\n\n\t\tblank_sources = ['pause', 'nostream']\n\n\n\t\tlivebtn = uibuilder.find_widget_recursive(drawing_area, 'stream_live')\n\t\tblankbtn = uibuilder.find_widget_recursive(drawing_area, 'stream_blank')\n\n\t\tblankbtn_pos = drawing_area.get_item_index(blankbtn)\n\n\t\tlivebtn.connect('toggled', self.on_btn_toggled)\n\t\tlivebtn.set_name('live')\n\n\t\tfor idx, name in enumerate(blank_sources):\n\t\t\tif idx == 0:\n\t\t\t\tnew_btn = blankbtn\n\t\t\telse:\n\t\t\t\tnew_icon = Gtk.Image.new_from_pixbuf(blankbtn.get_icon_widget().get_pixbuf())\n\t\t\t\tnew_btn = Gtk.RadioToolButton(group=livebtn)\n\t\t\t\tnew_btn.set_icon_widget(new_icon)\n\t\t\t\tdrawing_area.insert(new_btn, blankbtn_pos+1)\n\n\t\t\tnew_btn.set_label(\"Stream %s\" % name)\n\t\t\tnew_btn.connect('toggled', self.on_btn_toggled)\n\t\t\tnew_btn.set_name(name)\n\n\tdef on_btn_toggled(self, btn):\n\t\tif not btn.get_active():\n\t\t\treturn\n\n\t\tself.log.info(\"on_btn_toggled: %s\", btn.get_name())\n\t\tif btn.get_name() == 'live':\n\t\t\tself.warning_overlay.disable()\n\n\t\telse:\n\t\t\tself.warning_overlay.enable(btn.get_name())\n","sub_path":"voctogui/lib/toolbar/streamblank.py","file_name":"streamblank.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"399494427","text":"# list (mutable)\ngrades = [77, 80, 30, 80]\n# print(sum(grades) / len(grades))\n\n# tuplets (immutable)\ngrades_tuplets = (77, 213, 23)\n\n# sets - collection of unique & unordered\ngrades_set = {214, 77, 33}\n\n# grades_tuplets = grades_tuplets + (2,)\n# print(grades_set)\n\n\n# set operations\nlottery_numbers = {1,2,3,4}\nwinner_numbers = {1,2,3,7}\n\n# print(lottery_numbers.intersection(winner_numbers)) # 1,2,3\n# print(lottery_numbers.union(winner_numbers)) # 1,2,3,4,7\n# print(lottery_numbers.difference(winner_numbers)) # 4","sub_path":"section2/lists_tuplets_sets.py","file_name":"lists_tuplets_sets.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"448744054","text":"import os\n\nimport numpy as np\nimport segmentation_models_pytorch as smp\nimport pydensecrf.densecrf as dcrf\nimport torch\n\nfrom lib import unet\n\ndef load_model(name, pretrained=False):\n '''\n Loads model into memory\n\n Arguements:\n name (string) -- codename of the model. Avalible models:\n * pure_unet - Simple U-Net model\n * unet_resnet18 - U-Net with pretrained ResNet18 encoder\n * unet_vgg11 - U-Net with pretrained VGG11 encoder\n * unet_seresnext50 - U-Net with pretrained SE-ResNeXt50 encoder\n pretrained (boolean) -- load pretrained weights. \n If True, model loads in evaluation mode.\n ''' \n if not pretrained:\n if name==\"pure_unet\":\n model = unet.unet_model.UNet(3, 1)\n elif name==\"unet_resnet18\":\n model = smp.Unet(\"resnet18\", \"imagenet\")\n elif name==\"unet_vgg11\":\n model = smp.Unet(\"vgg11\", \"imagenet\")\n elif name==\"se_resnext50\":\n model = smp.Unet(\"se_resnext50_32x4d\", \"imagenet\")\n else:\n if name==\"pure_unet\":\n model = torch.load('../models/best_model_unet.pth')\n elif name==\"unet_resnet18\":\n model = torch.load('../models/best_model_unet_reanet18_aug.pth')\n elif name==\"unet_vgg11\":\n model = torch.load('../models/best_model_unet_vgg11_aug.pth')\n elif name==\"se_resnext50\":\n model = torch.load('../models/best_model_seresnext50_aug.pth')\n model = model.eval()\n return model\n\nclass CRFModel:\n '''\n Wrapper of DenseCRF postprocessor on PyTorch segmentation models\n\n Arguements:\n base_model (torch.Module) -- wrapped PyTorch model\n device (string) -- device where model is held\n\n Methods:\n __call__(tensor) -- interface for base_model.forward(tensor)\n get_mask(image) -- apply model with DenseCRF postprocessing\n to an input image\n '''\n def __init__(self, base_model, device=None):\n self.base = base_model\n self.device = device\n if device is not None:\n self.base = self.base.to(device)\n\n def __call__(self, input):\n return self.base(input)\n\n def get_mask(self, image, no_crf=False):\n # transform image to uint8 array for pydencecrf\n if image.dtype in {np.float16, np.float32, np.float64}:\n image = (image*255).astype(np.np.uint8)\n # get tensor for model\n tensor = self.__img_to_torch(image)\n mask = self.base(tensor)\n mask = self.__mask_to_numpy(mask)\n if no_crf:\n return (mask>0.5).astype(np.uint8)\n # Apply DenseCRF\n mask = self.__dense_crf(image, mask)\n return mask\n\n def __img_to_torch(self, image):\n '''\n Tranforms image to PyTorch format\n\n Eg. image(240,320,3) -> tensor(1,3,240,320)\n '''\n image = image.astype(np.float32)#/255\n image = np.moveaxis(image, 2, 0)\n tensor = torch.tensor(image[np.newaxis, :, :, :])\n if self.device is not None:\n tensor = tensor.to(self.device)\n return tensor\n\n def __mask_to_numpy(self, tensor):\n '''\n Transforms model output to numpy array\n '''\n tensor = tensor.cpu().detach()\n if torch.min(tensor)<0 or torch.max(tensor)>1:\n tensor = torch.sigmoid(tensor)\n return tensor.numpy()[0,0]\n\n def __dense_crf(self, img, output_probs):\n # code from: https://github.com/milesial/Pytorch-UNet/blob/master/utils/crf.py\n h = output_probs.shape[0]\n w = output_probs.shape[1]\n\n output_probs = np.expand_dims(output_probs, 0)\n output_probs = np.append(1 - output_probs, output_probs, axis=0)\n\n d = dcrf.DenseCRF2D(w, h, 2)\n U = -np.log(output_probs)\n U = U.reshape((2, -1))\n U = np.ascontiguousarray(U)\n img = np.ascontiguousarray(img)\n\n d.setUnaryEnergy(U)\n\n d.addPairwiseGaussian(sxy=20, compat=3)\n d.addPairwiseBilateral(sxy=30, srgb=20, rgbim=img, compat=10)\n\n Q = d.inference(5)\n Q = np.argmax(np.array(Q), axis=0).reshape((h, w))\n\n return Q\n","sub_path":"human_segmentation/lib/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"614229891","text":"#!/usr/bin/env python\n\nfrom blimpy.guppi import GuppiRaw\nimport h5py\ntry:\n import bitshuffle.h5\n HAS_BITSHUFFLE = True\nexcept ImportError:\n HAS_BITSHUFFLE = False\n \nimport time\nimport os\nimport glob\nimport numpy as np\n\ndef cmd_tool(args=None):\n \"\"\" Command line tool for converting guppi raw into HDF5 versions of guppi raw \"\"\"\n from argparse import ArgumentParser\n\n if not HAS_BITSHUFFLE:\n print(\"Error: the bitshuffle library is required to run this script.\")\n exit()\n\n parser = ArgumentParser(description=\"Command line utility for creating HDF5 Raw files.\")\n parser.add_argument('filename', type=str, help='Name of filename to read')\n args = parser.parse_args()\n\n fileroot = args.filename.split('.0000.raw')[0]\n\n filelist = glob.glob(fileroot + '*.raw')\n filelist = sorted(filelist)\n\n\n # Read first file\n r = GuppiRaw(filelist[0])\n header, data = r.read_next_data_block()\n dshape = data.shape #r.read_next_data_block_shape()\n print(dshape)\n\n n_blocks_total = 0\n for filename in filelist:\n print(filename)\n r = GuppiRaw(filename)\n n_blocks_total += r.n_blocks\n print(n_blocks_total)\n\n full_dshape = np.concatenate(((n_blocks_total,), dshape))\n\n\n # Create h5py file\n h5 = h5py.File(fileroot + '.h5', 'w')\n h5.attrs['CLASS'] = 'GUPPIRAW'\n block_size = 0 # This is chunk block size\n dset = h5.create_dataset('data',\n shape=full_dshape,\n #compression=bitshuffle.h5.H5FILTER,\n #compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4),\n dtype=data.dtype) \n\n h5_idx = 0\n for filename in filelist:\n print(\"\\nReading %s header...\" % filename)\n r = GuppiRaw(filename)\n h5 = h5py.File(filename + '.h5', 'w')\n \n header, data = r.read_next_data_block()\n \n for ii in range(0, r.n_blocks):\n t0 = time.time()\n print(\"Reading block %i of %i\" % (h5_idx+1, full_dshape[0]))\n header, data = r.read_next_data_block()\n t1 = time.time()\n \n t2 = time.time()\n print(\"Writing block %i of %i\" % (h5_idx+1, full_dshape[0]))\n dset[h5_idx, :] = data\n t3 = time.time()\n print(\"Read: %2.2fs, Write %2.2fs\" % ((t1-t0), (t3-t2)))\n \n h5_idx += 1\n\n # Copy over header information as attributes\n for key, value in header.items():\n dset.attrs[key] = value\n\n h5.close()\n\n t1 = time.time()\n print(\"Conversion time: %2.2fs\" % (t1- t0))\n\nif __name__ == \"__main__\":\n cmd_tool()","sub_path":"blimpy/deprecated/gup2hdf.py","file_name":"gup2hdf.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"16383458","text":"# encoding:utf-8\nimport os\nimport pyhfss.HfssGeneral as pg\nimport pyhfss.Hfss3DModeler as pd\ndef op():\n\n testscript = pg.HfssGeneral()\n testscript.HfssNewProject()\n testscript.HfssInsertDesign()\n testmodel = pd.Hfss3DModeler(testscript.fid)\n testmodel.HfssDrawBox('box2',['0','1','1'],['1','2','3'],'pec','False')\n testscript.HfssClosefid()\n\n os.system(r\"D:\\Software\\AnsysEM\\AnsysEM19.3\\Win64\\ansysedt.exe /RunScript D:\\code\\hfss\\tmp.py\")\n\n\nif __name__ == '__main__':\n op()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"559050622","text":"import numpy as num\n# from print_stats import print_test_stats, build_full_flag\n# import cresthh.anuga\nimport sys\nsys.path.append('/home/ZhiLi/CRESTHH')\nimport cresthh.anuga\nfrom cresthh import anuga\nfrom cresthh.anuga import Domain\nimport pandas as pd\n# from anuga import Transmissive_boundary, Refelective_boundary\nimport numpy as np\nimport os\n\nfrom cresthh.anuga import distribute, myid, numprocs, finalize, barrier\nimport geopandas as gpd\nfrom pyproj import Proj, CRS, transform\n\n\nmyProj= Proj(\"+proj=utm +zone=15, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\nstart='20170825000000'\nend= '20170901000000'\ninterval= '2M'\nif myid==0:\n\n \n yieldstep= pd.Timedelta(interval).total_seconds() \n topo_file= '/home/ZhiLi/CRESTHH/data/dem/DEM_sub.tif'\n study_area= gpd.read_file('/home/ZhiLi/CRESTHH/Examples/excessive_rain/68500_sub/68500_basin.shp')\n interior_area= gpd.read_file('/home/ZhiLi/CRESTHH/data/buffered_mainstream_new/mainstream_buffer.shp')\n base_resolution = 1000000 #1km\n interior_resolution= 1000 #10 m2 \n \n myProj = Proj(\"+proj=utm +zone=15, +south +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\n \n lons= np.array(study_area.exterior[0].coords)[:,0]; lats=np.array(study_area.exterior[0].coords)[:,1]\n utm_coords_ext= [myProj(lon,lat) for (lon, lat) in zip(lons, lats)]\n lons= np.array(interior_area.exterior[0].coords)[:,0]; lats=np.array(interior_area.exterior[0].coords)[:,1]\n utm_coords_int= [myProj(lon,lat) for (lon, lat) in zip(lons, lats)] \n if os.path.exists('1km_082500.msh'):\n DOMAIN= anuga.create_domain_from_file('1km_082500.msh')\n else:\n DOMAIN= anuga.create_domain_from_regions(\n utm_coords_ext,\n boundary_tags={'bottom': [0]},\n maximum_triangle_area=1000000,\n interior_regions=[[utm_coords_int, interior_resolution]],\n mesh_filename='1km_082500.msh') \n # domain= anuga.create_domain_from_regions(bounding_polygon, boundary_tags={'bottom':[0],}, maximum_triangle_area=0.001,verbose=True)\n DOMAIN.set_name('Aug_Sep_coupled_refined_channel')\n DOMAIN.set_proj(\"+proj=utm +zone=15, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\n DOMAIN.set_quantity('elevation', filename=topo_file, location='centroids') # Use function for elevation\n DOMAIN.set_quantity('friction', filename='/home/ZhiLi/CRESTHH/data/Texas_friction/manningn.tif', location='centroids') # Constant friction \n DOMAIN.set_quantity('stage', expression='elevation', location='centroids') \n DOMAIN.set_quantity('SM', 0.012, location='centroids')\n DOMAIN.set_quantity('Ksat', filename='/hydros/MengyuChen/ef5_param/crest_params/ksat_usa.tif', location='centroids')\n DOMAIN.quantities['Ksat'].centroid_values[:]*= 289.0\n DOMAIN.set_quantity('WM', filename='/hydros/MengyuChen/ef5_param/crest_params/wm_usa.tif', location='centroids')\n DOMAIN.quantities['WM'].centroid_values[:]*= 871.0\n DOMAIN.set_quantity('B', filename='/hydros/MengyuChen/ef5_param/crest_params/b_usa.tif', location='centroids')\n DOMAIN.quantities['B'].centroid_values[:]*= 5e-10\n DOMAIN.set_quantity('IM', filename='/hydros/MengyuChen/ef5_param/crest_params/im_usa.tif', location='centroids')\n DOMAIN.quantities['IM'].centroid_values[:]*= 0.06\n DOMAIN.set_quantity('KE', 0.415853, location='centroids')\n \n Br = anuga.Reflective_boundary(DOMAIN)\n Bt = anuga.Transmissive_boundary(DOMAIN)\n Bi = anuga.Dirichlet_boundary([0, 0, 0]) \n\n DOMAIN.set_boundary({'bottom': Bt,\n 'exterior': Br})\nelse:\n DOMAIN=None\n\nbarrier()\n\nDOMAIN= distribute(DOMAIN)\nDOMAIN.set_proj(\"+proj=utm +zone=15, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\nDOMAIN.set_coupled(True)\n\n#domain.set_evap_dir('/hydros/MengyuChen/pet', pattern='cov_et17%m%d.asc', freq='D')\n#domain.set_precip_dir('/home/ZhiLi/CRESTHH/data/precip',pattern='imerg%Y%m%dS%H%M%S.tif', freq='H')\n#domain.set_timestamp('20170825180000', format='%Y%m%d%H%M%S')\n#domain.set_time_interval('1H')\n\nDOMAIN.set_evap_dir('/home/ZhiLi/CRESTHH/data/evap', pattern='cov_et17%m%d.asc.tif', freq='1D')\n# domain.set_precip_dir('/home/ZhiLi/CRESTHH/data/precip',pattern='nimerg%Y%m%dS%H%M%S.tif', freq='H')\nDOMAIN.set_precip_dir('/hydros/MengyuChen/mrmsPrecRate',pattern='PrecipRate_00.00_%Y%m%d-%H%M00.grib2-var0-z0.tif', freq=interval)\nDOMAIN.set_timestamp(start, format='%Y%m%d%H%M%S')\nDOMAIN.set_time_interval(interval)\ntotal_seconds= (pd.to_datetime(end) - pd.to_datetime(start)).total_seconds()\n\n\nfor t in DOMAIN.evolve(yieldstep=120, duration=total_seconds):\n if myid==0:\n DOMAIN.write_time()\n\nDOMAIN.sww_merge(verbose=True)\n","sub_path":"Examples/excessive_rain/.ipynb_checkpoints/parallel_job-checkpoint.py","file_name":"parallel_job-checkpoint.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"310641821","text":"from ..funcs import load_images_from_spritesheet\nimport pygame, json\n\nclass Image:\n def __init__(self, editor, j, i, x, y, offset, data=None, selection=None):\n self.editor = editor\n self.i = i\n self.j = j\n self.position = [x,y]\n self.offset = offset\n\n if data:\n self.id = data['id']\n self.filepath = data['filepath']\n self.group_name = data['group_name']\n self.image = data['image']\n self.index = data['index']\n self.scale = data['scale']\n elif selection:\n self.id = selection.id\n self.filepath = selection.filepath\n self.group_name = selection.group_name\n self.image = selection.image\n self.index = selection.index\n self.scale = selection.scale\n\n try:\n if selection:\n self.autotile_config = json.load(open(selection.autotile_config_path, 'r'))\n return\n\n self.autotile_config = json.load(open(data['autotile_config_path'], 'r'))\n except:\n self.autotile_config = None\n\n def show(self, surface=None):\n if not surface:\n surface = self.editor.screen\n #Renders the image according to the self.editor.world.scroll\n surface.blit(self.image, [self.position[0]+self.offset[0]-self.editor.world.scroll[0], self.position[1]+self.offset[1]-self.editor.world.scroll[1]])\n\n def fill(self, images, selection, depth=950):\n if depth == 0:\n return\n\n for dir in [(0,-1), (1,0), (0,1), (-1,0)]:\n i, j = self.i+dir[1], self.j+dir[0]\n\n if i-self.editor.world.scroll[1]//self.editor.res >= 0 and i-self.editor.world.scroll[1]//self.editor.res < self.editor.screen.get_height()//self.editor.res+1 and j-self.editor.world.scroll[0]//self.editor.res >= 0 and j-self.editor.world.scroll[0]//self.editor.res < self.editor.screen.get_width()//self.editor.res+1:\n neighbor = self.get_image_with_index(i, j, images)\n\n #If the neighbor is not yet defined, the neighbor becomes an image object and is put into the images list\n if not neighbor:\n neighbor = Image(self.editor, j, i, j*self.editor.res, i*self.editor.res, self.offset, selection=selection)\n images.append(neighbor)\n neighbor.fill(images, selection, depth-1)\n\n def autotile(self, images, selector_panel_images):\n if self.autotile_config:\n neighbors = self.get_neighbors(images)\n\n binary = '0000'\n\n #Sets binary according to the neighbors around the image\n for neighbor in neighbors:\n if neighbor and neighbor.id == self.id:\n binary += '1'\n else:\n binary += '0'\n\n #Gets the image according to the binary and the configuration file\n try:\n key = str(int(binary, 2))\n index = self.autotile_config[key]\n\n images = load_images_from_spritesheet(f'data/graphics/spritesheet/{self.filepath}.png')\n image = images[index]\n\n self.image = pygame.transform.scale(image, (image.get_width()*self.scale, image.get_height()*self.scale))\n self.index = index\n\n try:\n offset_data = json.load(open(f'data/configs/offsets/{self.id}_offset.json', 'r'))\n offset = offset_data[str(self.index)]\n offset[0] *= self.scale\n offset[1] *= self.scale\n except Exception as e:\n # print(e)\n offset = [0,0]\n\n self.offset = offset\n\n except Exception as e:\n print('AUTOTILE ERROR: ', e)\n\n def get_neighbors(self, images):\n #Returns neighbor images\n neighbors = []\n\n for dir in [(0,-1), (1,0), (0,1), (-1,0)]:\n i, j = self.i+dir[1], self.j+dir[0]\n if i-self.editor.world.scroll[1]//self.editor.res >= 0 and i-self.editor.world.scroll[1]//self.editor.res < self.editor.screen.get_height()//self.editor.res+1 and j-self.editor.world.scroll[0]//self.editor.res >= 0 and j-self.editor.world.scroll[0]//self.editor.res < self.editor.screen.get_width()//self.editor.res+1:\n neighbor = self.get_image_with_index(i, j, images)\n neighbors.append(neighbor)\n\n return neighbors\n\n def get_image_with_index(self, i, j, images):\n #Returns image with the same given index (i, j)\n for image in images:\n if image.i == i and image.j == j:\n return image\n\n return None\n\n def within(self, starting, ending):\n #Returns image if it is within the rectangle dimension\n sx, sy = starting[0]+self.editor.world.scroll[0], starting[1]+self.editor.world.scroll[1]\n ex, ey = ending[0]+self.editor.world.scroll[0], ending[1]+self.editor.world.scroll[1]\n\n return (\n self.position[0] > sx and\n self.position[1] > sy and\n self.position[0]+self.get_width() < ex and\n self.position[1]+self.get_height() < ey\n )\n\n def get_width(self):\n #Returns image width\n return self.image.get_width()\n\n def get_height(self):\n #Returns image height\n return self.image.get_height()\n","sub_path":"Level_Editor/scripts/world/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"523023805","text":"import json\nimport logging\nfrom django.http.response import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib import auth\nfrom topicos.models import Topico\nfrom topicos.models import Mensagem\nfrom topicos.decorators import ajax_login_required\nfrom django.contrib.auth.models import User\n\nlogger = logging.getLogger(__name__)\n\n\ndef login(request):\n username = request.POST['username']\n password = request.POST['password']\n user = auth.authenticate(username=username, password=password)\n user_dict = None\n\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n user_dict = _user2dict(user)\n return HttpResponse(json.dumps(user_dict), content_type='application/json')\n\n\ndef cadastro(request):\n username = request.POST['username']\n password = request.POST['password']\n email = request.POST['email']\n first_name = request.POST['first_name']\n is_superuser = json.loads(request.POST['is_superuser'])\n\n user_dict = adiciona_usuario(username, password, email,\n first_name, is_superuser)\n\n return HttpResponse(json.dumps(user_dict), content_type='application/json')\n\n\ndef adiciona_usuario(username, password, email, first_name, is_superuser):\n user_dict = None\n\n if username and password and email and first_name:\n\n try:\n user_exist = User.objects.get(username=username)\n except:\n user_exist = None\n\n if user_exist is None:\n user = User.objects.create_user(username, password, email)\n\n user.first_name = first_name\n user.set_password(password)\n user.is_superuser = is_superuser\n user.save()\n user_dict = _user2dict(user)\n\n return user_dict\n\n\ndef logout(request):\n auth.logout(request)\n return HttpResponse('{}', content_type='application/json')\n\n\ndef _whoami(request):\n if request.user.is_authenticated():\n i_am = {\n 'user': _user2dict(request.user),\n 'authenticated': True,\n }\n else:\n i_am = {'authenticated': False}\n\n return i_am\n\n\ndef whoami(request):\n i_am = _whoami(request)\n return HttpResponse(json.dumps(i_am), content_type='application/json')\n\n\ndef get_user_details(request):\n username = request.GET['username']\n user = auth.get_user_model().objects.get(username=username)\n user_dict = _user2dict(user)\n return HttpResponse(json.dumps(user_dict), content_type='application/json')\n\n\n@ajax_login_required\ndef list_topicos(request):\n filters = json.loads(request.GET.get('filters', '{}'))\n topicos_dic = pega_topicos()\n\n return HttpResponse(json.dumps(topicos_dic), content_type='application/json')\n\n\ndef pega_topicos():\n topicos = Topico.objects.all()\n topicos_dic = [t.to_dict_json() for t in topicos]\n for t in topicos_dic:\n user = t['id_usuario']\n t['id_usuario'] = user.id\n\n return topicos_dic\n\n\n@ajax_login_required\ndef edita_topico(request):\n topico_id = json.loads(request.GET.get('topico_id'))\n topico_editado = request.GET.get('topico_editado')\n i_am = _whoami(request)['user']\n\n resposta = update_topico(topico_id, topico_editado, i_am['username'])\n\n return HttpResponse(json.dumps(resposta), content_type='application/json')\n\n\ndef update_topico(topico_id, topico_editado, username):\n if topico_editado:\n try:\n topico_exists = Topico.objects.get(name=topico_editado)\n return 'Esse nome eh invaliado'\n except Topico.DoesNotExist:\n try:\n topico = Topico.objects.get(id=topico_id)\n except:\n return 'Topico Invalido'\n\n try:\n i_am = User.objects.get(username=username)\n except:\n return 'Usuario Invalido'\n\n user = topico.id_usuario\n\n if user.id == i_am.id or (i_am.is_superuser\n and user.is_superuser == False):\n\n try:\n topico.name = topico_editado\n topico.save()\n return \"Topico salvo com sucesso\"\n except:\n return \"Topico nao pode ser salvo\"\n else:\n return 'Voce nao pode modificar esse topico'\n else:\n return 'O nome do topico nao pode ficar em branco'\n\n\n@ajax_login_required\ndef list_mensagens(request):\n topico_id = request.GET.get('topico_id')\n\n if topico_id is None:\n logger.error('Nenhum topico encontrado, retornando []')\n return HttpResponse('[]', content_type='application/json')\n\n topico_id = json.loads(topico_id)\n\n mensagens_dict = get_mensagens(topico_id)\n\n return HttpResponse(json.dumps(mensagens_dict), content_type='application/json')\n\n\ndef get_mensagens(topico_id):\n\n if topico_id:\n try:\n thread = Topico.objects.get(id=topico_id)\n except:\n return []\n\n try:\n todas_mensagens = Mensagem.objects.filter(id_topico=thread)\n except:\n return []\n\n mensagens_dict = [m.to_dict_json() for m in todas_mensagens]\n for m in mensagens_dict:\n user = m['id_usuario']\n m['usuario_nome'] = user.username\n m['data'] = str(m['data'])\n m['data'] = m['data'].split('+')[0]\n m['data'] = m['data'].split('.')[0]\n m['id_topico'] = ''\n m['id_usuario'] = user.id\n else:\n mensagens_dict = []\n\n return mensagens_dict\n\n\n@ajax_login_required\ndef deleta_mensagem(request):\n usuario_id = request.GET.get('usuario_id')\n mensagem_id = request.GET.get('mensagem_id')\n\n if usuario_id and mensagem_id:\n usuario_id = json.loads(usuario_id)\n mensagem_id = json.loads(mensagem_id)\n\n i_am = _whoami(request)['user']\n\n mensagem_dict = deletar_mensagembd(usuario_id, mensagem_id, i_am['username'])\n\n return HttpResponse(json.dumps(mensagem_dict), content_type='application/json')\n\n\ndef deletar_mensagembd(usuario_id, mensagem_id, username):\n mensagem_dict = 'Mansagem nao encontrada'\n\n try:\n user = User.objects.get(id=usuario_id)\n except:\n return 'Usuario Invalido'\n\n try:\n i_am = User.objects.get(username=username)\n except:\n return 'Voce precisa logar para ter acesso a essa funcionalidade'\n\n if i_am.id == user.id:\n try:\n mensagem_dict = _deletar(mensagem_id)\n except:\n mensagem_dict = 'Mensagem nao existe'\n elif i_am.is_superuser and user.is_superuser == False:\n try:\n mensagem_dict = _deletar(mensagem_id)\n except:\n mensagem_dict = 'Mensagem nao existe'\n else:\n mensagem_dict = 'Mensagem nao pode ser deletada por você'\n\n return mensagem_dict\n\n\ndef _deletar(mensagem_id):\n try:\n mensagem = Mensagem.objects.filter(id=mensagem_id)\n mensagem.delete()\n return 'Mensagem deletada com sucesso'\n except:\n return 'Mensagem id invalido'\n\n\n@ajax_login_required\ndef registra_mensagem(request):\n topico_id = json.loads(request.GET.get('topico_id'))\n mensagem = request.GET.get('mensagem')\n i_am = _whoami(request)['user']\n\n mensagem_dict = salva_mensagem(topico_id, mensagem, i_am['username'])\n\n return HttpResponse(json.dumps(mensagem_dict), content_type='application/json')\n\n\ndef salva_mensagem(topico_id, mensagem, username):\n\n try:\n usuario = User.objects.get(username=username)\n except:\n return 'Usuario Invalido'\n\n try:\n thread = Topico.objects.get(id=topico_id)\n except:\n return 'Topico Invalido'\n\n try:\n m = Mensagem(id_usuario=usuario, id_topico=thread, conteudo=mensagem)\n m.save()\n return \"Mensagem salva com sucesso\"\n except:\n return \"Mensagem nao pode ser salva no momento\"\n\n\n@ajax_login_required\ndef registra_topico(request):\n topico = request.GET.get('topico')\n mensagem = request.GET.get('mensagem')\n i_am = _whoami(request)['user']\n\n mensagem_topico = salva_topico(topico, mensagem, i_am['username'])\n\n return HttpResponse(json.dumps(mensagem_topico), content_type='application/json')\n\n\ndef salva_topico(topico, mensagem, username):\n if topico and mensagem:\n try:\n usuario = User.objects.get(username=username)\n except:\n return 'Esse usuario nao existe'\n\n try:\n topico_existe = Topico.objects.get(name=topico)\n return 'Topico ja existe'\n except:\n thread = Topico(name=topico, id_usuario=usuario)\n thread.save()\n\n m = Mensagem(id_usuario=usuario, id_topico=thread, conteudo=mensagem)\n m.save()\n return 'Topico adicionado com sucesso'\n else:\n return 'Existem campos em branco'\n\n\n@ajax_login_required\ndef deleta_topico(request):\n topico_id = topico = request.GET.get('topico_id')\n\n if topico_id:\n topico_id = json.loads(topico_id)\n i_am = _whoami(request)['user']\n\n mensagem_delete = deletar_topico(topico_id, i_am['username'])\n else:\n mensagem_delete = 'Topico nao existe'\n\n return HttpResponse(json.dumps(mensagem_delete), content_type='application/json')\n\n\ndef deletar_topico(topico_id, username):\n try:\n topico = Topico.objects.get(id=topico_id)\n except:\n return 'Topico invalido'\n\n try:\n i_am = User.objects.get(username=username)\n except:\n return 'Usuario Invalido'\n\n user = topico.id_usuario\n\n if i_am.id == user.id or (i_am.is_superuser and user.is_superuser == False):\n mensagens_deletadas = Mensagem.objects.filter(id_topico=topico.id)\n try:\n for m in mensagens_deletadas:\n Mensagem.objects.filter(id=m.id).delete()\n topico.delete()\n return 'Topico deletado com sucesso'\n except:\n return 'O topico nao pode ser deletado'\n else:\n return 'Topico nao pode ser deletada por você'\n\n\ndef _user2dict(user):\n return {\n 'username': user.username,\n 'name': user.first_name,\n 'permissions':{\n 'ADMIN': user.is_superuser,\n 'STAFF': user.is_staff,\n }\n }\n","sub_path":"topicos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"588906441","text":"import pandas as pd\nimport numpy as np\nimport requests, json\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\nfrom preprocessing import load_stop_words, lemmatize_remove_stop_words, preprocess_columns\n\ndata = pd.read_csv('data/dataset2.csv')\ncolumns_names = list(pd.read_csv('data/dataset2.csv', nrows=1).columns)\nstop_words = load_stop_words()\ntrain_dataset, test_dataset = ([] for i in range(2))\n\nchunk_size_for_test = 50\ntotal_predicted, good_predicted = (0,)*2\n\nfor x in range(len(data.columns)):\n dataset = data[data.columns[x]].dropna().apply(preprocess_columns).tolist()\n train, test = train_test_split(dataset, test_size=0.2)\n column_as_string = ' '.join(str(v) for v in train).replace('\"', '').lower()\n preprocessed_train_data = lemmatize_remove_stop_words(column_as_string, stop_words)\n train_dataset.append(preprocessed_train_data)\n #rozdel testovacie data (list) na mensie listy o velkosti \"chunk_size_for_test\" + v kazdom tomto liste sprav z elementov jeden string:\n test_chunks = [' '.join(str(v) for v in test[x:x+chunk_size_for_test]).replace('\"', '').lower() for x in range(0, len(test), chunk_size_for_test)]\n test_dataset.append(test_chunks)\n\ntext_clf_svm = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf-svm', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, max_iter=5, random_state=42)),]).fit(train_dataset, columns_names)\n\nfor x in range(len(data.columns)):\n preprocessed_test_chunk_data = [lemmatize_remove_stop_words(chunk_from_column, stop_words) for chunk_from_column in test_dataset[x]]\n predicted_svm = text_clf_svm.predict(preprocessed_test_chunk_data)\n\n print(\"\\n\"+columns_names[x] + \"\\nCelkovy pocet: \" + str(len(predicted_svm)) + \"\\nSpravne: \"+ str(np.count_nonzero(predicted_svm == columns_names[x])) + \"\\nNespravne urcene: \")\n bad_predicted = [elem for elem in predicted_svm if elem != columns_names[x]]\n print({i:bad_predicted.count(i) for i in bad_predicted})\n total_predicted += len(predicted_svm)\n good_predicted += np.count_nonzero(predicted_svm == columns_names[x])\n\nprint(\"\\n\\nCelkovy pocet: \"+ str(total_predicted) + \", spravne: \" + str(good_predicted))\nprint((good_predicted/total_predicted)*100)\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"166604943","text":"\n\n\n# Give some coins of different value and their quantity. Find how many values which are in range 1 ~ n can these coins be combined.\n\n# 多重背包变种\n\n\n\nclass Solution:\n \"\"\"\n @param n: the value from 1 - n\n @param value: the value of coins\n @param amount: the number of coins\n @return: how many different value\n \"\"\"\n def backPackVIII(self, n, value, amount):\n # write your code here\n # 2D-DP\n m = len(value) # number of items\n dp = [False] * (n + 1) # dp[i][j]: if can first i coins can combine j value\n dp[0] = True\n res = 0\n for i in range(m): # traverse on item\n cnt = [0] * (n + 1) # count how many times i-th item are used in combing the values from value[i] to n.\n for j in range(value[i], n + 1): # 多次放入背包,正序\n if dp[j] == False and dp[j - value[i]] and cnt[j - value[i]] < amount[i]: # j is not visited but can be combined from j - value[i] and the count does not exceed amount[i]\n dp[j] = True\n res += 1\n cnt[j] = cnt[j - value[i]] + 1\n \n return res","sub_path":"Backpack VIII.py","file_name":"Backpack VIII.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"185514218","text":"def fn():\r\n s = input().strip()\r\n\r\n def priority(c):\r\n if(c == '+' or c == '-'):\r\n return 1\r\n elif(c == '*' or c == '/'):\r\n return 2\r\n else:\r\n return 3\r\n\r\n ans = \"\"\r\n stack = []\r\n\r\n for i in s:\r\n if i.isalpha():\r\n ans += i\r\n elif(i == '('):\r\n stack.append(i)\r\n elif(i == ')'):\r\n while(stack[-1] != '('):\r\n ans+=stack.pop()\r\n stack.pop()\r\n else:\r\n while(len(stack) != 0 and stack[-1] != '(' and priority(stack[-1])>=priority(i)):\r\n ans += stack.pop()\r\n stack.append(i)\r\n\r\n while(len(stack)!=0):\r\n ans += stack.pop()\r\n \r\n print(ans)\r\n\r\n\r\n \r\nfor _ in range(int(input().strip())):\r\n fn()","sub_path":"python/Infix_to_Postfix.py","file_name":"Infix_to_Postfix.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"257749855","text":"# -*- coding: cp936 -*-\n#屏幕圆球的移动\n#主要的目的是如何确定小球能确定出边界\n\n\n#加载模块,使用SimpleGUICS2Pygame 代替 simplegui\nimport SimpleGUICS2Pygame.simpleguics2pygame as simplegui\nimport random\n#初始化\n\nwidth =800\n\nheight = 480\n\nball_radius = 20\n\ninit_pos =[width/2,height/2]\n\npositionx =random.randrange(2,10,3)\n\npositiony =random.randrange(2,10,5)\n\nvel=[positionx,positiony]\ntime = 0\n\n#事件函数\n\ndef draw(canvas):\n ball_pos = init_pos\n ball_pos[0] = init_pos[0] + 2 * vel[0]\n ball_pos[1] = init_pos[1] + 3 * vel[1]\n if ball_pos[1] > height-1-ball_radius:\n vel[1]=-vel[1]\n if ball_pos[1] < ball_radius:\n vel[1]=-vel[1]\n if ball_pos[0] < ball_radius:\n vel[0] =-vel[0]\n if ball_pos[0] > width-1-ball_radius:\n vel[0] =- vel[0]\n canvas.draw_circle(ball_pos,ball_radius,2,\"black\",\"red\")\n\n\ndef tick():\n global time\n time = time+1\n#注册事件\n\nf = simplegui.create_frame(\"motion\",width,height)\nf.set_draw_handler(draw)\nt = simplegui.create_timer(100,tick)\n\nt.start()\nf.start()\n","sub_path":"Collisions.py","file_name":"Collisions.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"447544502","text":"class URL:\n\tdef __init__(self, url):\n\t\tprorocol_link = url.split('://')\n\t\tif len(prorocol_link) == 1:\n\t\t\tprorocol_link = ['', url]\n\t\t\n\t\tprotocol, link = prorocol_link\n\t\thost, *path = link.split('/')\n\t\tpath = '/'.join(path)\n\n\t\tpath_query = path.split('?')\n\t\tif len(path_query) == 1:\n\t\t\tpath_query = [path, '']\n\n\t\tpath, query = path_query\n\t\tdomain = '.'.join(host.split('.')[-2:]).split(':')[0]\n\t\t\n\t\tself.protocol = protocol\n\t\tself.path = path\n\t\tself.domain = domain\n\t\tself.host = host\n\t\tself.query = query\n\n\t\tself.params = None\n\t\tself.__get_params()\n\n\tdef __get_params(self):\n\t\tquery = self.query\n\t\tif not query: return\n\n\t\tparams = {}\n\t\tquery = query.split('&')\n\n\t\tfor q in query:\n\t\t\tname, val = q.split('=')\n\t\t\tparams[name] = val\n\n\t\tself.params = params\n\n","sub_path":"repl-set-data/scraper/URL.py","file_name":"URL.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"28665296","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CategoryVideo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, verbose_name=b'Nome')),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data do Cadastro', blank=True)),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Categoria de V\\xeddeo',\n 'verbose_name_plural': 'Categorias de V\\xeddeos',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Magazine',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('number', models.IntegerField(verbose_name=b'N\\xc3\\xbamero')),\n ('file', models.FileField(upload_to=b'', verbose_name=b'Arquivo da revista')),\n ('status', models.BooleanField(default=True)),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data do Cadastro', blank=True)),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Revista',\n 'verbose_name_plural': 'Revistas',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Video',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.URLField()),\n ('embed_code', models.CharField(max_length=100)),\n ('type', models.CharField(max_length=2, verbose_name=b'Tipo', choices=[(b'0', b'Vimeo'), (b'1', b'Youtube')])),\n ('status', models.BooleanField(default=True)),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data do Cadastro', blank=True)),\n ('category', models.ForeignKey(verbose_name=b'Categoria', to='multimidia.CategoryVideo')),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'V\\xeddeo',\n 'verbose_name_plural': 'V\\xeddeos',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"apps/multimidia/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"13934844","text":"import torch\nfrom .. import SimpleLogger\nfrom . import ae\nfrom ..utils import kl_divergence\n\nimport os\nimport pickle\n\n\nclass Model(ae.Model):\n def __init__(\n self,\n net,\n opt,\n n_epochs,\n gpu_ids,\n save_dir,\n data_provider,\n crit_recon,\n save_state_iter=1,\n save_progress_iter=1,\n beta=1,\n beta_start=1000,\n beta_iters_max=12500,\n c_max=500,\n c_iters_max=80000,\n gamma=500,\n objective=\"H\",\n kld_avg=False,\n ):\n\n super(Model, self).__init__(\n net,\n opt,\n data_provider,\n crit_recon,\n gpu_ids,\n save_dir,\n n_epochs,\n save_state_iter,\n save_progress_iter,\n )\n\n self.beta = beta\n self.beta_start = beta_start\n self.beta_iters_max = beta_iters_max\n self.kld_avg = kld_avg\n self.objective = objective\n\n logger_path = \"{}/logger.pkl\".format(save_dir)\n\n if os.path.exists(logger_path):\n self.logger = pickle.load(open(logger_path, \"rb\"))\n else:\n print_str = \"[{epoch:%d}][{iter:%d}] reconLoss: {recon_loss:%.6f} kld: {kld_loss:%.6f} total: {total_loss:%.6f} time: {time:%.2f}\"\n\n self.logger = SimpleLogger(print_str)\n\n def iteration(self):\n\n torch.cuda.empty_cache()\n\n gpu_id = self.gpu_ids[0]\n\n net = self.net\n opt = self.opt\n crit_recon = self.crit_recon\n\n # do this just incase anything upstream changes these values\n net.train(True)\n\n opt.zero_grad()\n\n x = self.data_provider.next()\n x = x.cuda(gpu_id)\n\n #####################\n # train autoencoder\n #####################\n\n # Forward passes\n x_hat, z = net(x)\n\n recon_loss = crit_recon(x_hat, x)\n\n kld, _, _ = kl_divergence(z[0], z[1])\n if self.objective == \"H\":\n beta_vae_loss = recon_loss + self.beta * kld\n elif self.objective == \"H_eps\":\n beta_vae_loss = recon_loss + torch.abs((self.beta * kld) - x.shape[0] * 0.1)\n elif self.objective == \"B\":\n C = torch.clamp(\n torch.Tensor(\n [self.c_max / self.c_iters_max * len(self.logger)]\n ).type_as(x),\n 0,\n self.c_max,\n )\n beta_vae_loss = recon_loss + self.gamma * (kld - C).abs()\n\n beta_vae_loss.backward(retain_graph=True)\n opt.step()\n\n log = {\n \"recon_loss\": recon_loss.item(),\n \"kld_loss\": kld.item(),\n \"total_loss\": beta_vae_loss.item(),\n \"z\": [e.cpu().numpy() for e in z],\n }\n\n return log\n\n def save_progress(self):\n # gpu_id = self.gpu_ids[0]\n # epoch = self.get_current_epoch()\n\n # data_provider = self.data_provider\n # net = self.net\n pass\n","sub_path":"geneselection/solvers/bvae.py","file_name":"bvae.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"164284373","text":"import shutil\nimport random\nimport glob\nimport datetime\nimport os\n\ndef combine_img(path, org_dir_name_1, org_dir_name_2, new_dir_name):\n \"\"\"\n 複数ディレクトリ内の画像を一つのディレクトリに移動する\n \"\"\"\n filepathP = path + org_dir_name_1\n filepathN = path + org_dir_name_2\n fileListP = sorted(glob.glob(filepathP + \"/*.jpg\"))\n fileListN = sorted(glob.glob(filepathN + \"/*.jpg\"))\n\n now = datetime.datetime.now()\n time_seed = now.timestamp()\n random.seed(time_seed)\n\n if not os.path.exists(path + new_dir_name):\n os.makedirs(path + new_dir_name)\n for p in fileListP :\n\n shutil.move(p, path + new_dir_name)\n\n for n in fileListN :\n shutil.move(n, path + new_dir_name)\n\n","sub_path":"edit_image/combine_img.py","file_name":"combine_img.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"200217799","text":"from collections import deque\n\nn = int(input())\nisland = [list(map(int, input().split())) for _ in range(n)]\n\ncnt = 0\n\nQ = deque()\n\ndy = [0, 0, 1, -1, 1, -1, 1, -1]\ndx = [1, -1, 0, 0, 1, 1, -1, -1]\n\nfor i in range(n):\n for j in range(n):\n if island[i][j] == 1:\n island[i][j] = 0\n Q.append((i,j))\n while Q:\n tmp = Q.popleft()\n for k in range(8):\n x = tmp[0] + dx[k]\n y = tmp[1] + dy[k]\n if 0 <= x < n and 0 <= y < n and island[x][y] == 1:\n island[x][y] = 0\n Q.append((x,y))\n cnt += 1\nprint(cnt)","sub_path":"sec07-dfs-and-bfs/13_섬나라_아일랜드(BFS).py","file_name":"13_섬나라_아일랜드(BFS).py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"35112817","text":"\"\"\"\nAuthor:goblinM\nDate:2020-01-15\nDescribe:yaml数据解析\n\"\"\"\nimport os\n\nimport yaml\n# current_path = os.path.abspath(\".\")\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\nmultiple_yaml_path = os.path.join(current_path, \"config.yaml\")\nsingle_yaml_path = os.path.join(current_path, \"single_config.yaml\")\n\n\nclass YamlMethods(object):\n\n def open_read_file(self, path=single_yaml_path):\n \"\"\"打开文件\"\"\"\n with open(path, 'r', encoding='utf-8') as f:\n file_data = f.read()\n return file_data\n\n def open_write_file(self, path):\n \"\"\"写入文件\"\"\"\n f = open(path, 'w', encoding='utf-8')\n return f\n\n def single_yaml_load(self):\n \"\"\"解析单个yaml文档\"\"\"\n file_data = self.open_read_file(single_yaml_path)\n data = yaml.safe_load(file_data)\n print(\"type:\", type(data))\n print(data)\n return data\n\n def multiple_yaml_load(self):\n \"\"\"解析多个yaml文档\"\"\"\n file_data = self.open_read_file(multiple_yaml_path)\n data_list = yaml.safe_load_all(file_data)\n print(\"type:\", type(data_list))\n for data in data_list:\n print(data)\n return data_list\n\n def non_standard_yaml_dump(self):\n \"\"\"dump生成不一定标准的yaml文件 safe_dump 生成标准文档\"\"\"\n py_object = {'school': 'zhang',\n 'students': ['a', 'b']}\n file = self.open_write_file(os.path.join(current_path, \"output_non_standard_config.yaml\"))\n yaml.safe_dump(py_object, file)\n\n def standard_yaml_dump(self):\n \"\"\"ruamel 生成标准的yaml文件\"\"\"\n from ruamel import yaml\n py_object = {'school': 'zhang',\n 'students': ['a', 'b']\n }\n file = self.open_write_file(os.path.join(current_path, \"output_standard_config.yaml\"))\n yaml.dump(py_object, file, Dumper=yaml.RoundTripDumper)\n\n\nif __name__ == '__main__':\n y = YamlMethods()\n # y.non_standard_yaml_dump()\n # y.standard_yaml_dump()\n y.single_yaml_load()\n # y.multiple_yaml_load()\n\n","sub_path":"test_page_object/data_layer/analysis_data.py","file_name":"analysis_data.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"489623038","text":"import tweepy\n\nkeys = []\n\nwith open(\"keys.gitignore\", \"r\") as f:\n for line in f:\n keys.append(line[:-1])\n f.close()\n\nconsumer_key = keys[0]\nconsumer_secret = keys[1]\naccess_key = keys[2]\naccess_secret = keys[3]\n\ndef authorize_account(consumer_key = consumer_key, consumer_secret = consumer_secret,\n access_key = access_key, access_secret = access_secret):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n return tweepy.API(auth)\n \nif __name__ == \"__main__\":\n twitter_account = authorize_account()\n twitter_account.update_status(\"Teste teste test.\")\n","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"108554439","text":"import os\nfrom .common import *\n\n# Debug settings\nDEBUG = False\n\n### Add any site-specific Kiwi settings below this line\n# for more information about available settings see\n# http://kiwitcms.readthedocs.io/en/latest/configuration.html\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'change-me'\n\n\n# Administrators error report email settings\nADMINS = [\n # ('Your Name', 'your_email@example.com'),\n]\n\n\n### DO NOT CHANGE THE SETTINGS BELOW\n\n# provides filename versioning\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\n# indicate that this is the Enterprise Edition version\nKIWI_VERSION = \"%s-ee\" % KIWI_VERSION\n","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"236494183","text":"\r\n#load Dataset\r\nimport pandas as pd\r\nfrom wordcloud import WordCloud\r\nimport matplotlib.pyplot as pl\r\n\r\ndef load_data():\r\n data_=pd.read_csv('post_vaccination_tweets_final.csv')\r\n return data_\r\n\r\ndata__=load_data()\r\ndf_twitter= pd.DataFrame(data__)\r\n\r\ndef wordcloud_plot(wordcloud):\r\n pl.figure(figsize=(20,20))\r\n pl.imshow(wordcloud)\r\n pl.axis('off')\r\nmy_string=[]\r\nfor text in df_twitter['text']:\r\n my_string.append(text)\r\n \r\n \r\n \r\n \r\nmy_string=pd.Series(my_string).str.cat(sep=' ') \r\nwordcloud=WordCloud(width=1000,height=500).generate(my_string)\r\nwordcloud_plot(wordcloud)","sub_path":"word cloud visulization.py","file_name":"word cloud visulization.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"527784358","text":"import tkinter as tk\nfrom test import *\nfrom tkinter import *\nimport os\n\nclass Redirect():\n\n def __init__(self, widget):\n self.widget = widget\n\n def write(self, text):\n self.widget.insert('end', text)\n\n # some widget may need it\n #def flush(self):\n # pass\n\ntext = tk.Text(root)\ntext.pack()\n\n# keep original stdout\nold_stdout = sys.stdout \n\n# assing Redirect with widget Text \nsys.stdout = Redirect(text)\n\nroot.mainloop()\n\n# assign back original stdout (if you need it)\nsys.stdout = old_stdout\n","sub_path":"Python_GUI/teste/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"205099832","text":"import os\n\ndef file_input(x, y):\n os.system(\"mkdir a%03d_%03d\"%(x,y))\n os.system(\"cp * ./a%03d_%03d\"%(x,y))\n\ndef change_itp(x, y):\n os.chdir(\"./a%03d_%03d\"%(x,y))\n f = open(\"or.itp\", 'r')\n o = open(\"ethanol.itp\", 'w')\n \n a = 1\n\n for i in f:\n if a == 8:\n sigma = float(i[45:52])\n epsilin = float(i[53:64])\n o.write(i[:45]+'%7.4f'%(sigma*x/100)+'%12.4f'%(epsilin*y/100)+'\\n')\n #if a == 11:\n # sigma = float(i[45:52])\n # epsilin = float(i[53:64])\n # o.write(i[:45]+'%7.4f'%(sigma*x/100)+'%12.4f'%(epsilin*y/100)+'\\n')\n elif a == 12:\n sigma = float(i[45:52])\n epsilin = float(i[53:64])\n o.write(i[:45]+'%7.4f'%(sigma*x/100)+'%12.4f'%(epsilin*y/100)+'\\n')\n else:\n o.write(i)\n a += 1\n os.chdir(\"../\")\n\nfor a in range(95,106):\n for b in range(95,106):\n file_input( a , b )\n change_itp(a , b)\n","sub_path":"arc/linux_tools/scan/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"617897126","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport csv\nimport sys\n\nargs = sys.argv\n\ndef main():\n # 元データの数dataNと,0を付け足したデータ数N\n dataN = 2500\n N = 4096\n\n # サンプリング間隔(s)\n dt = 0.01\n\n #時間軸と周波数軸 周波数は単位を[kHz]にするため1000で割る\n t = np.arange(0, N*dt, dt)\n freq = np.linspace(0, 1.0/dt, N)/1000\n\n #csvデータ読み込み用配列\n data = []\n\n csv_file = args[1]\n\n with open(csv_file) as f:\n reader = csv.reader(f)\n data = []\n for row in reader:\n data = data + [row]\n #print('DATA NUM ='+str(len(data)))\n\n # データ末尾に0を付け足す\n for i in range(N-dataN):\n data.append(['0','0'])\n #print('CHANGED DATA NUM ='+str(len(data)))\n\n #文字列を実数に直して,numpy配列に入れ,転置する\n f = np.array([[float(s2) for s2 in s] for s in data])\n f2 = f.T\n\n # 高速フーリエ変換\n Fx = np.fft.fft(f2[0])\n Fy = np.fft.fft(f2[1])\n #print(Fx.size)\n\n # 振幅スペクトルを計算\n Ampx = np.abs(Fx) / dataN/10 * 2\n Ampy = np.abs(Fy) / dataN/10 * 2\n\n for i in range(Fx.size):\n print(freq[i],',',Ampx[i],',',Ampy[i])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"vol2deg_FFT/FFT.py","file_name":"FFT.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"639788369","text":"import os, sys\nimport wx\nimport wx.lib.agw.multidirdialog as MDD\nimport pandas as pd\nfrom geopy import Point\nfrom geopy.distance import vincenty\nfrom shapely.geometry.multipolygon import MultiPolygon\nfrom shapely import wkt\nfrom shapely.ops import cascaded_union\nfrom itertools import combinations\nfrom shapely.geometry import Polygon\nimport numpy as np\nimport pyproj \nimport shapely\nimport shapely.ops as ops\nfrom shapely.geometry.polygon import Polygon\nfrom functools import partial\nfrom math import sin, cos, asin, sqrt, degrees, radians\nfrom shapely.geometry.polygon import LinearRing\nfrom matplotlib import pyplot as fig\n\n#Global Variable Declaration\n#directory1 --> \"Upload opr1\" ; directory2 --> \"Upload opr1\"; directory3 --> \"Upload opr2\"; directory4 --> \"Upload opr2\"\ndirectory1=\"\"\ndirectory2=\"\"\ndirectory3=\"\"\ndirectory4=\"\"\n\n#filex stores the file name selected coresponding to directoryx\nfile1=\"\"\nfile2=\"\"\nfile3=\"\"\nfile4=\"\"\n\n#sectorName is string being Typed in textBox of Sector name\nsectorName=\"\"\n\n#list used for generating multiplygon Data\nlstii=[]\nlstww=[]\nmaxlst=[]\ninitialv=0\nfinalv=0\n\n#dfpd --> \"Physical Data VF\"; dfv1 --->\"UE-2.1 Voda LTE time Mean Data\"; dfv2 --->\"UE-5 Voda LTE time Mean Data\"\n#df--> \"Mapped Data of LTE time Data + Physical Data\";\n#dfIND--> \"Individual Sector Analysis of the entered Sector Name\"\n#dfUE -->\"UE% Left in each bucket of every sector, See-UE%_Data.xlsx\"\n#dfUE_Final -->\"Overall UE% Left every sector, See-UE%_Data_.xlsx\"\n\ndfpd=pd.DataFrame()\ndfv1=pd.DataFrame()\ndfv2=pd.DataFrame()\ndf=pd.DataFrame()\ndfInd=pd.DataFrame()\ndfUE_Final=pd.DataFrame()\ndfUE=pd.DataFrame()\n\nwildcard = \"Excel Sheet (*.xlsx)|*.xlsx|\" \\\n \"All files (*.*)|*.*\"\n\n#MyForm class calls the Framework \nclass MyForm(wx.Frame):\n from shapely.geometry.multipolygon import MultiPolygon\n\n def __init__(self):\n wx.Frame.__init__(self, None, wx.ID_ANY, title='Nokia Frame-Work',size=wx.Size(450, 100))\n \n # Add a panel so it looks correct on all platforms\n self.panel = wx.Panel(self, wx.ID_ANY)\n self.currentDirectory = os.getcwd()\n \n#Defining componentes of Framework i.e., the buttons,TextBox,Static Text\n bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (16, 16))\n titleIco = wx.StaticBitmap(self.panel, wx.ID_ANY, bmp)\n title = wx.StaticText(self.panel, wx.ID_ANY, 'Frame-Work Info')\n\n title11 = wx.StaticText(self.panel, wx.ID_ANY, 'Upload Time Raw Data')\n openFileDlgBtn1 = wx.Button(self.panel,-1, label=\"Upload Opr1\")\n openFileDlgBtn1.Bind(wx.EVT_BUTTON, self.onOpenFile1)\n \n title12 = wx.StaticText(self.panel, wx.ID_ANY, 'Upload Physical Data ')\n openFileDlgBtn2 = wx.Button(self.panel,-1, label=\"Upload Opr1\")\n openFileDlgBtn2.Bind(wx.EVT_BUTTON, self.onOpenFile2)\n\n openFileDlgBtn11 = wx.Button(self.panel,-1, label=\"Upload Opr2\")\n openFileDlgBtn11.Bind(wx.EVT_BUTTON, self.onOpenFile11)\n \n openFileDlgBtn22 = wx.Button(self.panel,-1, label=\"Upload Opr2\")\n openFileDlgBtn22.Bind(wx.EVT_BUTTON, self.onOpenFile22)\n\n\n\n Btitle1 = wx.StaticText(self.panel, wx.ID_ANY, 'Parse(Upload File First)* ')\n genBtn1 = wx.Button(self.panel, wx.ID_ANY, 'Parse Opr1')\n self.Bind(wx.EVT_BUTTON, self.onClickParse, genBtn1)\n genBtn11 = wx.Button(self.panel, wx.ID_ANY, 'Parse Opr2')\n self.Bind(wx.EVT_BUTTON, self.onClickParse, genBtn11)\n \n Btitle2 = wx.StaticText(self.panel, wx.ID_ANY, 'Generate Multi-Polygon Data')\n genBtn2 = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen1, genBtn2)\n\n Btitle3 = wx.StaticText(self.panel, wx.ID_ANY, 'Generate UE% Left Data')\n genBtn3 = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen2, genBtn3)\n\n Btitle4 = wx.StaticText(self.panel, wx.ID_ANY, 'Generate Final Coverage Data')\n genBtn4 = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen3, genBtn4)\n \n para1 = wx.StaticText(self.panel, wx.ID_ANY, 'Get Overlap Data of LMBTS With Neighbour Site')\n\n \n l1 = wx.StaticText(self.panel, -1, \"Sector Name\")\n self.t1 = wx.TextCtrl(self.panel)\n self.t1.Bind(wx.EVT_TEXT,self.OnKeyTyped) \n\n okBtn = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen4, okBtn)\n \n #Putting components into BoxSizer which is basically a layout\n topSizer = wx.BoxSizer(wx.VERTICAL)\n titleSizer = wx.BoxSizer(wx.HORIZONTAL)\n uploadSizer1 = wx.BoxSizer(wx.HORIZONTAL)\n uploadSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer1 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer3 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer4 = wx.BoxSizer(wx.HORIZONTAL)\n paratextSizer = wx.BoxSizer(wx.HORIZONTAL)\n btnSizer = wx.BoxSizer(wx.HORIZONTAL)\n \n#Adding Corresponding Component to there boxes\n titleSizer.Add(title, 0, wx.ALL, 5)\n titleSizer.Add(titleIco, 0, wx.ALL, 5)\n \n \n uploadSizer1.Add(title11,0, wx.ALL, 5)\n uploadSizer1.Add(openFileDlgBtn1,0, wx.ALL, 5)\n uploadSizer1.Add(openFileDlgBtn11,0, wx.ALL, 5)\n\n uploadSizer2.Add(title12,0, wx.ALL, 5)\n uploadSizer2.Add(openFileDlgBtn2,0, wx.ALL, 5)\n uploadSizer2.Add(openFileDlgBtn22,0, wx.ALL, 5)\n\n genSizer1.Add(Btitle1,0, wx.ALL, 5)\n genSizer1.Add(genBtn1,0, wx.ALL, 5)\n genSizer1.Add(genBtn11,0, wx.ALL, 5)\n\n genSizer2.Add(Btitle2,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n genSizer2.Add(genBtn2,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n\n genSizer3.Add(Btitle3,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n genSizer3.Add(genBtn3,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n\n genSizer4.Add(Btitle4,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n genSizer4.Add(genBtn4,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n\n paratextSizer.Add(para1, 0, wx.ALL, 5)\n\n btnSizer.Add(l1, 1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)\n btnSizer.Add(self.t1,1,wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)\n btnSizer.Add(okBtn, 0, wx.ALL, 5)\n\n \n topSizer.Add(titleSizer, 0, wx.CENTER)\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n topSizer.Add(uploadSizer1, 0, wx.CENTER)\n topSizer.Add(uploadSizer2, 0, wx.CENTER)\n topSizer.Add(genSizer1, 0, wx.RIGHT)\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n\n topSizer.Add(genSizer2, 0, wx.CENTER)\n topSizer.Add(genSizer3, 0, wx.CENTER)\n topSizer.Add(genSizer4, 0, wx.CENTER)\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5) \n topSizer.Add(paratextSizer, 0, wx.CENTER)\n topSizer.Add(btnSizer, 0, wx.ALL|wx.CENTER, 5)\n \n # SetSizeHints(minW, minH, maxW, maxH)\n self.SetSizeHints(250,300,500,400)\n self.panel.SetSizer(topSizer)\n topSizer.Fit(self)\n\n#Framework Layout is done, now defining there function which will be called\n#First \"Generate\" button --> onGen1;\n#Second \"Generate\" button --> onGen2;\n#Third \"Generate\" button --> onGen3;\n#Fourth \"Generate\" button --> onGen4;\n\n#onGen1 Creates File \"Multipolygon.xlsx\" which would be input for further \"Generate\" Button\n def onGen1(self, event):\n print(\"Wait Code running...\")\n global directory1,sectorName,lstii,lstww,maxlst,initialv,finalv,dfpd,dfv1,dfv2,df,dfInd,dfUE_Final,dfUE \n\n#Reading 'Parsed.xlsx' file generated\n os.chdir(directory1)\n file = 'Parsed.xlsx'\n xl= pd.ExcelFile(file)\n dfv1=xl.parse(xl.sheet_names[0])\n dfv2=xl.parse(xl.sheet_names[1])\n\n initialv=dfv1.columns.get_loc(\"% UEs with distance to base station in the range of 0-78m in 2.1km cells\")\n finalv=dfv1.columns.get_loc(\"% UEs with distance to base station in the range of 2262-3000m in 2.1km cells\")\n\n#Generating Data for voda UE-2.1\n dfg11=self.onGen1Fun(dfv1)\n\n if(dfv2.columns.get_loc(\"% UEs with distance to base station in the range of 4.8-6km in 5km cells\")>finalv):\n finalv=dfv2.columns.get_loc(\"% UEs with distance to base station in the range of 4.8-6km in 5km cells\")\n\n#Generating Data for voda UE-5\n dfg12=self.onGen1Fun(dfv2)\n \n#Merging Multipolygon Data into df\n df=pd.concat([dfg11,dfg12],axis=0)\n newfile=directory1+\"\\Multipolygon.xlsx\"\n writer = pd.ExcelWriter(newfile)\n df.to_excel(writer,sheet_name=\"Sheet-1\",index=False)\n writer.save()\n#Cleaing df dataframe to avoid append of data while multi-click \n df.iloc[0:0]\n print ('Done! Check File at Source Destination..')\n \n\n \n def onGen2(self, event):\n print(\"Wait Code running...\")\n global directory1,directory2,directory3,directory4,file1,file2,file3,file4,sectorName,lstii,lstww,maxlst,initialv,finalv,dfpd,dfv1,dfv2,df,dfInd,dfUE_Final,dfUE\n \n#Reading Recent 'Multipolygon.xlsx' generated\n os.chdir(directory1)\n file = 'Multipolygon.xlsx'\n xl= pd.ExcelFile(file)\n df=xl.parse(xl.sheet_names[0])\n \n#Defining header of dfUE dataframe\n stri=\"\"\n strj=\"\"\n clm=['Sector Name','Intersecting Sector List']\n\n for a in range(initialv,finalv+1):\n clm.append('PercentLeftFrominitial in '+'Polygon'+str(a-initialv+1))\n clm.append('PolygonLeftFrominitial in '+'Polygon'+str(a-initialv+1))\n dfUE = pd.DataFrame(columns=clm)\n\n#iterate over each sector polygen and intersect it will rest other Sector\n for i in range(len(df)): \n stri='POLYGON'+lstww[i][len(lstww[i])-1]\n pi = wkt.loads(stri)\n mi = self.MultiPolygon([pi]) \n j=0 \n lst=[]\n strw=[] \n percent_remaining=[]\n poly_r=[]\n for f in range(maxlst[i]):\n strw.append(lstww[i][f])\n percent_remaining.append(100)\n\n area_ini=[]\n for f in range(maxlst[i],finalv-initialv+1):\n percent_remaining.append('NA')\n for f in range(maxlst[i]): \n strini='POLYGON'+strw[f]\n p_ini = wkt.loads(strini)\n m_ini = self.MultiPolygon([p_ini])\n poly_r.append(m_ini)\n coordinates_arrayi=[]\n for pol in m_ini:\n coordinates_arrayi = np.asarray(pol.exterior.coords)\n\n area_ini.append(self.initial_area(coordinates_arrayi))\n\n alt=[]\n alt1=[]\n alt.append(df.iat[i,1])\n alt1.append(df.iat[i,1])\n\n#Condition for intersection --> intersite distance distance <= sum of cell size determined by cummulative sum to 95% of both sector\n#Condition for intersection --> Then check if their outer boundry intersect\n#If yes, intersect each bucket of polygen with the outer boundry.\n for j in range(len(df)):\n if(j!=i and df.iat[i,0]!=df.iat[j,0]):\n d=self.dist(float(df.iat[i,2]),float(df.iat[i,3]),float(df.iat[j,2]),float(df.iat[j,3]))\n if(d<(df.iat[i,5]+df.iat[j,5])/1000):\n strj='POLYGON'+lstww[j][len(lstww[j])-1]\n pj = wkt.loads(strj)\n mj = self.MultiPolygon([pj])\n for pol in mi:\n for pol2 in mj:\n if (pol.intersects(pol2)==True) :\n lst.append(df.iat[j,1])\n for t in range(maxlst[i]):\n str1='POLYGON'+strw[t]\n try:\n p1 = wkt.loads(str1)\n m1 = self.MultiPolygon([p1])\n for pol in m1:\n for pol2 in mj:\n if (pol.intersects(pol2)==True and self.initial_area(self.get_array(m1))!=0 and self.initial_area(self.get_array(mj))!=0) :\n try:\n polygon_remaining=self.get_polyg(m1,mj)\n \n if(polygon_remaining.is_empty):\n percent_remaining[t]=0\n strw[t]=\"((0 0,0 0,0 0,0 0))\"\n string='POLYGON'+strw[t]\n p12 = wkt.loads(string)\n polygon_remaining = self.MultiPolygon([p12])\n poly_r[t]=polygon_remaining\n \n else:\n coordinate_remaining=self.get_array(polygon_remaining)\n # area_initial=self.initial_area(coordinate_initial)\n area_remaining=self.remaining_area(coordinate_remaining)\n percent_remaining[t]=area_remaining/area_ini[t]*100\n poly_r[t]=polygon_remaining\n y=0\n strw[t]=\"((\"\n while(y!=len(coordinate_remaining)-1):\n if(coordinate_remaining[y]==coordinate_remaining[y+1] and y==0):\n strw[t]=strw[t]\n elif(coordinate_remaining[y]==coordinate_remaining[y+1] and y==len(coordinate_remaining)-2):\n strw[t]=strw[t] \n elif(coordinate_remaining[y]==coordinate_remaining[y+1]):\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\")\"+\",\"+\"(\"\n else:\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\",\"\n y=y+1\n strw[t]=strw[t]+str(coordinate_remaining[len(coordinate_remaining)-1][0])+\" \"+str(coordinate_remaining[len(coordinate_remaining)-1][1])\n strw[t]=strw[t]+\"))\"\n \n except:\n arr_remaining=self.get_polyg1(m1,mj)\n if(len(arr_remaining)<=2):\n percent_remaining[t]=0 \n strw[t]=\"((0 0,0 0,0 0,0 0))\"\n string='POLYGON'+strw[t]\n p12 = wkt.loads(string)\n polygon_remaining = self.MultiPolygon([p12])\n poly_r[t]=polygon_remaining\n # alt.append(0)\n else:\n polygon_remaining=self.MultiPolygon(arr_remaining)\n coordinate_remaining=self.get_array(polygon_remaining)\n # area_initial=self.initial_area(coordinate_initial)\n area_remaining=self.remaining_area(coordinate_remaining)\n percent_remaining[t]=area_remaining*100/area_ini[t]\n poly_r[t]=polygon_remaining\n y=0\n strw[t]=\"((\"\n while(y!=len(coordinate_remaining)-1):\n if(coordinate_remaining[y]==coordinate_remaining[y+1] and y==0):\n strw[t]=strw[t]\n elif(coordinate_remaining[y]==coordinate_remaining[y+1] and y==len(coordinate_remaining)-2):\n strw[t]=strw[t] \n elif(coordinate_remaining[y]==coordinate_remaining[y+1]):\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\")\"+\",\"+\"(\"\n else:\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\",\"\n y=y+1\n strw[t]+str(coordinate_remaining[len(coordinate_remaining)-1][0])+\" \"+str(coordinate_remaining[len(coordinate_remaining)-1][1])\n strw[t]=strw[t]+\"))\"\n else:\n percent_remaining[t]=percent_remaining[t]\n except:\n percent_remaining[t]=percent_remaining[t]\n for t in range(maxlst[i],finalv-initialv+1):\n percent_remaining[t]=percent_remaining[t]\n alt.append(lst)\n for t in range(maxlst[i]):\n\n if(poly_r[t].is_empty==False):\n coordinate_remaining=self.get_array(poly_r[t])\n area_remaining=self.remaining_area(coordinate_remaining)\n if(ifinalv):\n finalv=dfv2.columns.get_loc(\"% UEs with distance to base station in the range of 4.8-6km in 5km cells\")\n\n \n#Summing up all UE% in all bucket of LNBTS name.\n clm=['Sector Name','Final Coverage']\n dfUE_Final = pd.DataFrame(columns=clm)\n print(\"dfUE\"+str(len(dfUE)))\n for i in range(len(dfUE)):\n \n alt=[]\n alt.append(dfUE.iat[i,0])\n t=2\n sum=0\n \n while(str(dfUE.iat[i,t])!='nan' and t<2*(finalv-initialv+1)):\n \n sum=sum+dfUE.iat[i,t]\n t=t+2\n alt.append(sum)\n \n k=pd.Series(alt,index=clm)\n dfUE_Final=dfUE_Final.append(k, ignore_index=True)\n \n newfile=directory1+\"\\\\UE%_Data_final.xlsx\"\n writer = pd.ExcelWriter(newfile)\n dfUE_Final.to_excel(writer,sheet_name=\"Sheet-1\",index=False)\n writer.save()\n print ('Done! Check File at Source Destination..')\n dfUE_Final.iloc[0:0]\n \n \n#onGen4 generate individual Report of entered Sector name. It shows which all sector cut how much UE% in each bucket of entered sector name \n def onGen4(self, event):\n global directory1,directory2,directory3,directory4,file1,file2,file3,file4,sectorName,lstii,lstww,maxlst,initialv,finalv,dfpd,dfv1,dfv2,df,dfInd,dfUE_Final,dfUE\n os.chdir(directory1)\n\n for i in range(len(df)):\n if(df.iat[i,1]==sectorName):\n index=i\n break\n \n stri=\"\"\n strj=\"\"\n \n#generating header of dataframe\n clm=['Sector Name','Co-Sector Name']\n for a in range(initialv,finalv+1):\n clm.append('PercentLeftFrominitial in '+'Polygon'+str(a-initialv+1))\n dfInd = pd.DataFrame(columns=clm)\n c=0\n i=0\n i=index \n\n stri='POLYGON'+lstww[i][len(lstww[i])-1]\n pi = wkt.loads(stri)\n mi = self.MultiPolygon([pi]) \n j=0 \n strw=[] \n percent_remaining=[]\n poly_r=[]\n for f in range(maxlst[i]):\n strw.append(lstww[i][f])\n percent_remaining.append(100)\n\n area_ini=[]\n for f in range(maxlst[i],finalv-initialv+1):\n percent_remaining.append(\"NA\")\n for f in range(maxlst[i]): \n strini='POLYGON'+strw[f]\n p_ini = wkt.loads(strini)\n m_ini = self.MultiPolygon([p_ini])\n poly_r.append(m_ini)\n coordinates_arrayi=[]\n for pol in m_ini:\n coordinates_arrayi = np.asarray(pol.exterior.coords)\n\n area_ini.append(self.initial_area(coordinates_arrayi))\n\n\n\n#iterating over every sector to check sectors which are intersecting on the same condition above\n for j in range(len(df)):\n c=0\n for e in range(maxlst[i]):\n if(percent_remaining[e]!=0):\n c=c+1\n if(j!=i and df.iat[i,0]!=df.iat[j,0] and c!=0):\n d=self.dist(float(df.iat[i,2]),float(df.iat[i,3]),float(df.iat[j,2]),float(df.iat[j,3]))\n if(d<(df.iat[i,5]+df.iat[j,5])/1000):\n strj='POLYGON'+lstww[j][len(lstww[j])-1]\n pj = wkt.loads(strj)\n mj = self.MultiPolygon([pj])\n for pol in mi:\n for pol2 in mj:\n if (pol.intersects(pol2)==True):\n alt=[]\n alt.append(df.iat[i,1])\n alt.append(df.iat[j,1])\n for t in range(maxlst[i]):\n str1='POLYGON'+strw[t]\n p1 = wkt.loads(str1)\n m1 = self.MultiPolygon([p1])\n for pol in m1:\n for pol2 in mj:\n if (pol.intersects(pol2)==True and self.initial_area(self.get_array(m1))!=0 and self.initial_area(self.get_array(mj))!=0) :\n try:\n polygon_remaining=self.get_polyg(m1,mj)\n if(polygon_remaining.is_empty):\n alt.append(0) \n percent_remaining[t]=0 \n strw[t]=\"((0 0,0 0,0 0,0 0))\"\n string='POLYGON'+strw[t]\n p12 = wkt.loads(string)\n polygon_remaining = self.MultiPolygon([p12])\n poly_r[t]=polygon_remaining\n else:\n coordinate_remaining=self.get_array(polygon_remaining)\n area_remaining=self.remaining_area(coordinate_remaining)\n percent_remaining[t]=area_remaining/area_ini[t]*100\n if(i97):\n h=h+1\n break\n elif(sum+dfv1.iat[i,l]<97):\n sum=sum+dfv1.iat[i,l]\n h=h+1\n else:\n break\n maxlst.append(h)\n dv=[]\n dv=self.get_rangeArray(dfv1)\n alt.append(dv[h-1])\n m=initialv+h \n\n\n multipolyg=[]\n arr1=[]\n string1=\"\"\n\n for j in range(initialv,m):\n uplst=self.uplist(dv[j-initialv],lat1,lon1,dfv1.iat[i,11]) \n rightlst=self.rightlist(dv[j-initialv],lat1,lon1,dfv1.iat[i,11]-(42+j*3/(finalv-initialv)))\n leftlst=self.leftlist(dv[j-initialv],lat1,lon1,dfv1.iat[i,11]+(42+j*3/(finalv-initialv)))\n arr1=[(lat1,lon1),(leftlst[0],leftlst[1]),(uplst[0],uplst[1]),(rightlst[0],rightlst[1])]\n\n string1=\"((\"\n for p in range(len(arr1)):\n string1=string1+str(arr1[p][0])+\" \"+str(arr1[p][1])+\",\"\n\n string1=string1+str(arr1[0][0])+\" \"+str(arr1[0][1])\n string1=string1+\"))\" \n string='POLYGON'+string1\n p = wkt.loads(string)\n m1 = self.MultiPolygon([p])\n\n multipolyg.append(m1)\n\n if(j==initialv):\n\n alt.append(m1)\n lsti.append(arr1)\n lstw.append(string1)\n else:\n\n poly_remain=self.get_polyg(multipolyg[j-initialv],multipolyg[j-initialv-1])\n alt.append(poly_remain) \n arr=self.get_array(poly_remain)\n\n lsti.append(arr)\n string=\"((\"\n for p in range(4):\n string=string+str(arr[p][0])+\" \"+str(arr[p][1])+\",\"\n string=string+str(arr[4][0])+\" \"+str(arr[4][1])+\")\"+\",\"+\"(\"\n for p in range(5,len(arr)-1):\n string=string+str(arr[p][0])+\" \"+str(arr[p][1])+\",\"\n string=string+str(arr[0][0])+\" \"+str(arr[0][1])\n string=string+\"))\" \n lstw.append(string)\n for o in range(m,finalv+1):\n alt.append(0)\n lsti.append(arr1)\n lstw.append(string1)\n lstii.append(lsti)\n lstww.append(lstw)\n alt.append(m1)\n\n s=pd.Series(alt,index=clm)\n df=df.append(s, ignore_index=True) \n return df\n\n#returns multipolygens\n def poly(self,string):\n stri='POLYGON'+string\n p = wkt.loads(stri)\n m1 = self.MultiPolygon([p])\n return(m1)\n\n#return part of pol1 which is non-overlapping\n def get_polyg(self,m1,m2):\n outmulti=[]\n for pol1 in m1:\n for pol2 in m2:\n if pol1.intersects(pol2)==True:\n nonoverlap = (pol1.symmetric_difference(pol2)).difference(pol2)\n outmulti.append(nonoverlap)\n else:\n outmulti.append(pol1)\n finalpol = self.MultiPolygon(outmulti)\n return finalpol\n\n#return array form of part of pol1 which is non-overlapping\n def get_polyg1(self,m1,m2):\n outmulti=[]\n for pol1 in m1:\n for pol2 in m2:\n if pol1.intersects(pol2)==True:\n pol1=pol1.buffer(0)\n pol2=pol2.buffer(0)\n nonoverlap = (pol1.symmetric_difference(pol2)).difference(pol2) \n outmulti.append(nonoverlap)\n else: \n outmulti.append(pol1)\n return outmulti\n\n#return array of given multipolygon\n def get_array(self,m1):\n interior_coords = []\n exterior_coords=[]\n coordinates_initial=[]\n for pol in m1:\n exterior_coords= pol.exterior.coords[:] \n for interior in pol.interiors:\n interior_coords += interior.coords[:]\n for i in range(len(exterior_coords)):\n coordinates_initial.append(exterior_coords[i])\n for i in range(len(interior_coords)):\n coordinates_initial.append(interior_coords[i]) \n\n return coordinates_initial\n\n#return area of given array of polygon\n def initial_area(self,coordinate_initial):\n geom = Polygon(coordinate_initial)\n geom_area = ops.transform(\n partial(\n pyproj.transform,\n pyproj.Proj(init='EPSG:32643'),\n pyproj.Proj(\n proj='aea',\n lat1=geom.bounds[0],\n lat2=geom.bounds[2])),\n geom)\n area1=geom_area.area\n return (area1) \n\n#return area of given array of polygon\n def remaining_area(self,coordinate_remaining):\n geom = Polygon(coordinate_remaining)\n geom_area = ops.transform(\n partial(\n pyproj.transform,\n pyproj.Proj(init='EPSG:32643'),\n pyproj.Proj(\n proj='aea',\n lat1=geom.bounds[0],\n lat2=geom.bounds[2])),\n geom)\n area2=geom_area.area\n return (area2) \n\n#return outer distance of each range in raw data\n def get_rangeArray(self,df):\n dis=[]\n clm=df.columns\n idx1=idx2=0\n for i in range (len(clm)):\n if (clm[i]=='Avg UE distance'):\n idx1=i\n if (clm[i]=='Sector Name'):\n idx2=i\n\n for i in range(idx1+1,idx2):\n st1=clm[i].split(\" of\",1)[1]\n st2=st1.split(\"in \",1)[0]\n st3=st2.split(\"-\",1)[1]\n if(st3[len(st3)-3:]==\"km \"):\n dis.append(float(st3[:len(st3)-3])*1000)\n else:\n dis.append(float(st3[:len(st3)-2]))\n return dis\n\n\n def haversine(self,angle_radians):\n return sin(angle_radians / 2.0) ** 2\n\n def inverse_haversine(self,h):\n return 2 * asin(sqrt(h)) # radians\n\n#calculate distance between two location using lat long data\n def dist(self,lat1, lon1, lat2, lon2):\n Earth_radius_km = 6371.0\n RADIUS = Earth_radius_km\n\n lat1 = radians(lat1)\n lat2 = radians(lat2)\n dlat = lat2 - lat1\n dlon = radians(lon2 - lon1)\n h = self.haversine(dlat) + cos(lat1) * cos(lat2) * self.haversine(dlon)\n return RADIUS * self.inverse_haversine(h)\n\n#return left cordinate the polygon taken anti-clockwise from site location \n def leftlist(self,dist,lat,lon,azimuth):\n left=vincenty(kilometers=dist/1000).destination(Point(lat, lon),azimuth).format_decimal()\n leftlst=list(map(float,(left.split(\",\"))))\n return leftlst\n\n#return upside cordinate the polygon taken anti-clockwise from site location \n\n def uplist(self,dist,lat,lon,azimuth):\n up=vincenty(kilometers=dist/1000).destination(Point(lat, lon),azimuth).format_decimal()\n uplst=list(map(float,(up.split(\",\"))))\n return uplst\n\n#return right cordinate the polygon taken anti-clockwise from site location \n\n def rightlist(self,dist,lat,lon,azimuth):\n right=vincenty(kilometers=dist/1000).destination(Point(lat, lon), azimuth).format_decimal()\n rightlst=list(map(float,(right.split(\",\"))))\n return rightlst\n\n##########################################################################################################################################\n\n def OnKeyTyped(self, event): \n global sectorName\n sectorName=event.GetString()\n print (event.GetString())\n \n def onOK(self, event):\n # Do something\n print ('Done! Check File at Source Destination..')\n\n#opens file browser for Upload Button\n def onOpenFile1(self, event):\n \n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory1\n global file1\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory1=os.path.split(path)[0]\n file1=os.path.split(path)[1]\n print(file1)\n dlg.Destroy()\n\n#opens file browser for Upload Button\n def onOpenFile2(self, event):\n \n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory2\n global file2\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory2=os.path.split(path)[0]\n file2=os.path.split(path)[1]\n print(file2)\n dlg.Destroy()\n\n \n#opens file browser for Upload Button \n def onOpenFile11(self, event):\n \n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory3\n global file3\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory3=os.path.split(path)[0]\n file3=os.path.split(path)[1]\n print(file3)\n dlg.Destroy()\n\n#opens file browser for Upload Button\n def onOpenFile22(self, event):\n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory4\n global file4\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory4=os.path.split(path)[0]\n file4=os.path.split(path)[1]\n print(file4)\n dlg.Destroy()\n\n\n#function used to parse the raw data and mapp to physical data \n def onClickParse(self, event):\n global dfpd\n print(\"Wait Code running...\")\n\n\n os.chdir(directory1)\n xl1 = pd.ExcelFile(file1)\n df=xl1.parse(xl1.sheet_names[0])\n\n os.chdir(directory2)\n xl2 = pd.ExcelFile(file2)\n dfpd=xl2.parse(xl2.sheet_names[0])\n\n#Classifing the data based on the Cell size\n df.dropna(subset=['Expect cell size'], inplace=True)\n df1=df[df['Expect cell size']==2.1]\n df2=df[df['Expect cell size']==5]\n\n df1 =df1.dropna(axis=1,how='all')\n df2 =df2.dropna(axis=1,how='all')\n df1.dropna(subset=['% UEs with distance to base station in the range of 0-78m in 2.1km cells'], inplace=True)\n df2.dropna(subset=['% UEs with distance to base station in the range of 0-468m in 5km cells'], inplace=True)\n df2=df2.sort_values(['LNBTS name', 'LNCEL name'], ascending=[True,True])\n df1=df1.sort_values(['LNBTS name', 'LNCEL name'], ascending=[True,True])\n\n dfn1 = df1.groupby(['LNCEL name'],as_index=False).agg({'PERIOD_START_TIME':'last','MRBTS/SBTS name':'last',\n 'LNBTS type':'last','LNBTS name':'last','Expect cell size':'mean',\n 'Avg UE distance':'mean','% UEs with distance to base station in the range of 0-78m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 78-156m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 156-312m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 312-468m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 468-624m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 624-780m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 780-1092m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 1092-1404m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 1404-1794m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 1794-2262m in 2.1km cells':'mean',\n '% UEs with distance to base station more than 2262m in 2.1km cells':'mean'})\n\n dfn1 = dfn1.rename(columns={'% UEs with distance to base station more than 2262m in 2.1km cells': '% UEs with distance to base station in the range of 2262-3000m in 2.1km cells'})\n\n dfn2 = df2.groupby(['LNCEL name'],as_index=False).agg({'PERIOD_START_TIME':'last','MRBTS/SBTS name':'last',\n 'LNBTS type':'last','LNBTS name':'last','Expect cell size':'mean',\n 'Avg UE distance':'mean','% UEs with distance to base station in the range of 0-468m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 468-1014m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 1014-1482m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 1482-2028m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 2028-2656m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 2656-3400m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 3.4-4.1km in 5km cells':'mean',\n '% UEs with distance to base station in the range of 4.1-4.8km in 5km cells':'mean',\n '% UEs with distance to base station in the range of 4.8-5.6km in 5km cells':'mean',\n '% UEs with distance to base station more than 5.6km in 5km cells':'mean'})\n\n dfn2 = dfn2.rename(columns={'% UEs with distance to base station more than 5.6km in 5km cells': '% UEs with distance to base station in the range of 4.8-6km in 5km cells'})\n\n\n dfn1=self.Mapp_sectorID(dfn1)\n dfn2=self.Mapp_sectorID(dfn2)\n\n dfpd.dropna(subset=['Sector Name','Lat', 'Long','Azimuth'], inplace=True)\n dfn1.dropna(subset=dfn1.columns, inplace=True)\n dfn2.dropna(subset=dfn2.columns,inplace=True)\n\n#generating intermediate data\n newfile=directory1+\"\\Parsed_Intermediate_data.xlsx\"\n writer = pd.ExcelWriter(newfile)\n dfn1.to_excel(writer, sheet_name='UE-2.1(N)',index=False)\n dfn2.to_excel(writer, sheet_name='UE-5(N)',index=False)\n writer.save()\n\n#making sector name uniform in raw data and physical data \n for i in range(len(dfpd)):\n s1=str(dfpd.iat[i,4])\n l1=s1[len(s1)-1:]\n s1=s1[:len(s1)-2]\n sn1=s1+\"C\"+l1\n dfpd.iat[i,4]=sn1\n \n dfpd=dfpd.sort_values(by='Sector Name', ascending=False)\n dfn1=dfn1.sort_values(by='Sector Name', ascending=False)\n dfn2=dfn2.sort_values(by='Sector Name', ascending=False)\n dfpd = dfpd.rename(columns={'Sector Name': 'Sector Name-PD'})\n\n df_final_1=self.get_FinalData(dfn1)\n df_final_2=self.get_FinalData(dfn2)\n\n#creating \"parsed.xlsx\" file\n newfile=directory1+\"\\Parsed.xlsx\"\n print(\"About to write\")\n writer = pd.ExcelWriter(newfile)\n df_final_1.to_excel(writer, sheet_name='UE-2.1',index=False)\n df_final_2.to_excel(writer, sheet_name='UE-5.0',index=False)\n writer.save()\n dfpd.iloc[0:0]\n\n\n print ('Done! Check File at Source Destination..')\n\n\n def get_FinalData(self,df):\n clm=dfpd.columns \n clm=clm.append(df.columns)\n df_final=pd.DataFrame(columns=clm)\n\n for i in range(len(df)):\n for j in range(len(dfpd)):\n if(df.iat[i,len(df.columns)-1]==dfpd.iat[j,4]):\n data=[]\n for k in range(len(dfpd.columns)):\n data.append(dfpd.iat[j,k])\n for k in range(len(df.columns)):\n data.append(df.iat[i,k])\n\n s=pd.Series(data,index=clm)\n df_final=df_final.append(s, ignore_index=True)\n \n return df_final\n\n\n\n\n\n def Mapp_sectorID(self,dfn1):\n df_sector_1=pd.DataFrame(columns=['Sector Name'])\n for i in range(len(dfn1)):\n s1=str(dfn1.iat[i,0])\n if(len(s1)>2):\n l1=s1[len(s1)-1:]\n s1=s1[:len(s1)-2]\n sn1=s1+\"C\"+l1\n dfn1.iat[i,0]=sn1\n\n if (type(dfn1.iat[i,0])==str):\n k=pd.Series(dfn1.iat[i,0].split('-')[1],index=['Sector Name'])\n df_sector_1=df_sector_1.append(k, ignore_index=True)\n else:\n k=pd.Series(\"NaN\",index=['Sector Name'])\n df_sector_1=df_sector_1.append(k, ignore_index=True)\n dfn1['Sector Name']=df_sector_1\n return(dfn1)\n \n\n# Run the program\nif __name__ == '__main__':\n app = wx.App()\n frame = MyForm().Show()\n app.MainLoop()\n del app\n","sub_path":"Coverage_FrameWork.py","file_name":"Coverage_FrameWork.py","file_ext":"py","file_size_in_byte":52622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"293614892","text":"from django.db import models\nfrom datetime import datetime\nfrom ckeditor.fields import RichTextField\n\n# Create your models here.\nclass Youtuber(models.Model):\n\n # Restricting user\n crew_choices = (\n ('Solo', 'Solo'),\n ('Small', 'Small'),\n ('Large', 'Large'),\n )\n\n camera_choices = (\n ('Canon', 'Canon'),\n ('Sony', 'Sony'),\n ('Nikon', 'Nikon'),\n ('Red', 'Red'),\n ('Fuji', 'Fuji'),\n ('Gopro', 'Gopro'),\n ('Other', 'Other'),\n )\n\n categories_choices = (\n ('Code', 'Code'),\n ('Mobile Review', 'Mobile Review'),\n ('Vlogs', 'Vlogs'),\n ('Comedy', 'Comedy'),\n ('Gaming', 'Gaming'),\n ('Films', 'Films'),\n ('Cooking', 'Cooking'),\n ('Others', 'Others'),\n )\n\n name = models.CharField(max_length=50)\n price = models.IntegerField()\n photo = models.ImageField(upload_to='media/ytubers/%Y/%m/%d/')\n video_url = models.CharField(max_length=250)\n description = RichTextField()\n city = models.CharField(max_length=50)\n age = models.IntegerField()\n height = models.IntegerField()\n crew = models.CharField(choices=crew_choices , max_length=250)\n camera_type = models.CharField(choices=camera_choices, max_length=250)\n subs_count = models.CharField(max_length=250)\n category = models.CharField(choices=categories_choices, max_length=250)\n is_featured = models.BooleanField(default=False)\n created_date = models.DateTimeField(default=datetime.now, blank=True)\n\n def __str__(self):\n return self.name \n ","sub_path":"tubers/youtubers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"147928748","text":"from stanza.research import config\nif __name__ == '__main__':\n config.redirect_output()\n\nfrom stanza.cluster import pick_gpu\nparser = config.get_options_parser()\nparser.add_argument('--device', default=None,\n help='The device to use in Theano (\"cpu\" or \"gpu[0-n]\"). If None, '\n 'pick a free-ish device automatically.')\noptions, extras = parser.parse_known_args()\nif '-h' in extras or '--help' in extras:\n # If user is just asking for the options, don't scare them\n # by saying we're picking a GPU...\n pick_gpu.bind_theano('cpu')\nelse:\n pick_gpu.bind_theano(options.device)\n\n\nfrom stanza.monitoring import progress\nfrom stanza.research import evaluate, metrics, output\nimport datetime\nimport numbers\nimport learners\nimport color_instances\n\nparser.add_argument('--learner', default='Histogram', choices=learners.LEARNERS.keys(),\n help='The name of the model to use in the experiment.')\nparser.add_argument('--load', metavar='MODEL_FILE', default=None,\n help='If provided, skip training and instead load a pretrained model '\n 'from the specified path. If None or an empty string, train a '\n 'new model.')\nparser.add_argument('--train_size', type=int, default=[None], nargs='+',\n help='The number of examples to use in training. This number should '\n '*include* examples held out for validation. If None, use the '\n 'whole training set.')\nparser.add_argument('--validation_size', type=int, default=[0], nargs='+',\n help='The number of examples to hold out from the training set for '\n 'monitoring generalization error.')\nparser.add_argument('--test_size', type=int, default=[None], nargs='+',\n help='The number of examples to use in testing. '\n 'If None, use the whole dev/test set.')\nparser.add_argument('--data_source', default=['dev'], nargs='+',\n choices=color_instances.SOURCES.keys(),\n help='The type of data to use. Can supply several for sequential training.')\nparser.add_argument('--output_train_data', type=config.boolean, default=False,\n help='If True, write out the training dataset (after cutting down to '\n '`train_size`) as a JSON-lines file in the output directory.')\nparser.add_argument('--output_test_data', type=config.boolean, default=False,\n help='If True, write out the evaluation dataset (after cutting down to '\n '`test_size`) as a JSON-lines file in the output directory.')\nparser.add_argument('--listener', type=config.boolean, default=False,\n help='If True, evaluate on listener accuracy (description -> color). '\n 'Otherwise evaluate on speaker accuracy (color -> description).')\nparser.add_argument('--progress_tick', type=int, default=300,\n help='The number of seconds between logging progress updates.')\n\n\ndef main():\n options = config.options()\n\n progress.set_resolution(datetime.timedelta(seconds=options.progress_tick))\n\n train_datasets = []\n validation_datasets = []\n test_datasets = []\n\n if len(options.train_size) == 1:\n options.train_size = options.train_size * len(options.data_source)\n else:\n assert len(options.train_size) == len(options.data_source)\n if len(options.validation_size) == 1:\n options.validation_size = options.validation_size * len(options.data_source)\n else:\n assert len(options.validation_size) == len(options.data_source)\n if len(options.test_size) == 1:\n options.test_size = options.test_size * len(options.data_source)\n else:\n assert len(options.test_size) == len(options.data_source)\n\n for source, train_size, validation_size, test_size in zip(options.data_source,\n options.train_size,\n options.validation_size,\n options.test_size):\n train_insts = color_instances.SOURCES[source].train_data(\n listener=options.listener\n )[:train_size]\n if validation_size:\n assert validation_size < len(train_insts), \\\n ('No training data after validation split! (%d <= %d)' %\n (len(train_insts), validation_size))\n validation_insts = train_insts[-validation_size:]\n validation_datasets.append(validation_insts)\n train_insts = train_insts[:-validation_size]\n else:\n validation_datasets.append(None)\n train_datasets.append(train_insts)\n test_insts = color_instances.SOURCES[source].test_data(\n options.listener\n )[:test_size]\n test_datasets.append(test_insts)\n\n learner = learners.new(options.learner)\n\n m = [metrics.log_likelihood,\n metrics.log_likelihood_bits,\n metrics.perplexity,\n metrics.aic]\n example_inst = get_example_inst(test_datasets, train_datasets)\n if options.listener and not isinstance(example_inst.output, numbers.Integral):\n m.append(metrics.squared_error)\n elif isinstance(example_inst.output, (tuple, list)):\n m.append(metrics.prec1)\n if example_inst.output and isinstance(example_inst.output, basestring):\n m.extend([metrics.bleu, metrics.wer,\n metrics.token_perplexity_macro, metrics.token_perplexity_micro])\n else:\n m.append(metrics.accuracy)\n if example_inst.output and isinstance(example_inst.output, basestring):\n m.extend([metrics.bleu, metrics.wer,\n metrics.token_perplexity_macro, metrics.token_perplexity_micro])\n\n multi_train = (len(options.data_source) > 1)\n if options.load:\n with open(options.load, 'rb') as infile:\n learner.load(infile)\n\n train_results = None\n else:\n if hasattr(learner, '_data_to_arrays'):\n # XXX: is there a better way to ensure that the vocabulary is defined\n # before training starts?\n for train_insts in train_datasets[1:]:\n learner._data_to_arrays(train_insts, init_vectorizer=True)\n\n for i, (source, train_insts, validation_insts) in enumerate(zip(options.data_source,\n train_datasets,\n validation_datasets)):\n if not train_insts:\n continue\n\n if i > 0:\n learner.train(train_insts, validation_insts, metrics=m, keep_params=True)\n else:\n learner.train(train_insts, validation_insts, metrics=m)\n with open(config.get_file_path('model.p'), 'wb') as outfile:\n learner.dump(outfile)\n\n if multi_train:\n split_id = 'train_' + source\n else:\n split_id = 'train'\n train_results = evaluate.evaluate(learner, train_insts, metrics=m, split_id=split_id,\n write_data=options.output_train_data)\n if options.verbosity != 0:\n output.output_results(train_results, split_id)\n\n for i, (source, test_insts) in enumerate(zip(options.data_source,\n test_datasets)):\n if not test_insts:\n continue\n if multi_train:\n split_id = 'eval_' + source\n else:\n split_id = 'eval'\n test_results = evaluate.evaluate(learner, test_insts, metrics=m, split_id=split_id,\n write_data=options.output_test_data)\n if options.verbosity != 0:\n output.output_results(test_results, split_id)\n\n return train_results, test_results\n\n\ndef get_example_inst(test_datasets, train_datasets):\n # Use test if any are nonempty, if not, back off to train\n for dataset in test_datasets:\n if dataset:\n return dataset[0]\n for dataset in train_datasets:\n if dataset:\n return dataset[0]\n assert False, \"No data, can't determine correct evaluation metrics\"\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"283222027","text":"import numpy as np\r\nimport cv2\r\n\r\n#numpy.ones reshapes the array--width600,height 800\r\nrect=np.ones((600,800,3),dtype=np.uint8)*255\r\n\r\n#bgr--red color below one\r\n#10 is the thickness\r\ncv2.rectangle(rect,(0,int(600/2)),(int(800/2),599),(0,0,00),20)\r\ncv2.imshow(\"image\",rect)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n\r\n#bgr below in green\r\n#-1 so no border\r\n#cv2.rectangle(rect,(int(800/2),0),(799,int(600/2)),(0,255,0),-1)\r\n#cv2.imshow(\"image\",rect)\r\n#cv2.waitKey()\r\n#cv2.destroyAllWindows()","sub_path":"rect.py","file_name":"rect.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"298644150","text":"import taichi as ti\nimport numpy as np\nimport matplotlib.cm as cm\n\nti.init()\n\nlx = 1.5\nly = 0.3\n\nnx = 60\nny = 20\n\nvelo_rel = 0.01\np_rel = 0.03\n\n# Add 1 cell padding to all directions.\np = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\npcor = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\n\nu = ti.var(dt=ti.f32, shape=(nx + 3, ny + 2))\nu0 = ti.var(dt=ti.f32, shape=(nx + 3, ny + 2))\nucor = ti.var(dt=ti.f32, shape=(nx + 3, ny + 2))\nu_post = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\n\nv = ti.var(dt=ti.f32, shape=(nx + 2, ny + 3))\nvcor = ti.var(dt=ti.f32, shape=(nx + 2, ny + 3))\nv0 = ti.var(dt=ti.f32, shape=(nx + 2, ny + 3))\nv_post = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\n\n# ct stands for Cell Type.\n# ct = 0 -> Fluid\n# ct = 1 -> Solid\nct = ti.var(dt=ti.i32, shape=(nx + 2, ny + 2))\n\nrho = 100\nmu = 0.1\ndx = lx / nx\ndy = ly / ny\ndt = 0.001\n\nAu = ti.var(dt=ti.f32, shape=((nx + 1) * ny, (nx + 1) * ny))\nbu = ti.var(dt=ti.f32, shape=((nx + 1) * ny))\nxu = ti.var(dt=ti.f32, shape=((nx + 1) * ny))\nxuold = ti.var(dt=ti.f32, shape=((nx + 1) * ny))\n\nAv = ti.var(dt=ti.f32, shape=(nx * (ny + 1), nx * (ny + 1)))\nbv = ti.var(dt=ti.f32, shape=(nx * (ny + 1)))\nxv = ti.var(dt=ti.f32, shape=(nx * (ny + 1)))\nxvold = ti.var(dt=ti.f32, shape=(nx * (ny + 1)))\n\nAp = ti.var(dt=ti.f32, shape=(nx * ny, nx * ny))\nbp = ti.var(dt=ti.f32, shape=(nx * ny))\nxp = ti.var(dt=ti.f32, shape=(nx * ny))\n\n\ndef init():\n for i, j in ti.ndrange(nx + 2, ny + 2):\n p[i, j] = 100 - i / nx\n for i, j in ti.ndrange(nx + 3, ny + 2):\n u[i, j] = 5.0\n u0[i, j] = u[i, j]\n for i, j in ti.ndrange(nx + 2, ny + 3):\n v[i, j] = 0.0\n v0[i, j] = v[i, j]\n\n for i, j in ti.ndrange(nx + 2, ny + 2):\n ct[i, j] = 1 # \"1\" stands for solid\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 1)):\n ct[i, j] = -1 # \"-1\" stands for fluid\n\n for i, j in ti.ndrange(nx, ny):\n if (((i - 31)**2 + (j - 31)**2) < 36):\n ct[i, j] = 1\n u[i, j] = 0\n u0[i, j] = 0\n v[i, j] = 0\n v0[i, j] = 0\n\n\ndef fill_Au():\n for i, j in ti.ndrange((1, nx + 2), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n\n # Inlet and Outlet\n if (ct[i - 1, j]) == 1 or (ct[i, j] + ct[i - 1, j]) == 2:\n Au[k, k] = 1.0\n bu[k] = u[i, j]\n # Outlet\n elif (ct[i, j] == 1):\n Au[k, k] = 1.0\n Au[k, k - ny] = -1.0\n #bu[k] = u[i - 1, j]\n bu[k] = 0.0\n\n # Normal internal cells\n else:\n Au[k, k - 1] = -mu * dx / dy - max(\n [0, -rho * 0.5 * (v[i - 1, j] + v[i, j]) * dx]) # an\n Au[k, k + 1] = -mu * dx / dy - max(\n [0, rho * 0.5 * (v[i - 1, j + 1] + v[i, j + 1]) * dx]) # as\n Au[k, k - ny] = -mu * dy / dx - max(\n [0, rho * 0.5 * (u[i, j] + u[i - 1, j]) * dy]) # aw\n Au[k, k + ny] = -mu * dy / dx - max(\n [0, -rho * 0.5 * (u[i, j] + u[i + 1, j]) * dy]) # ae\n Au[k, k] = -Au[k, k - 1] - Au[k, k + 1] - Au[k, k - ny] - Au[\n k, k + ny] + rho * dx * dy / dt # ap\n bu[k] = (p[i - 1, j] - p[i, j]\n ) * dy + rho * dx * dy / dt * u0[i, j] # <= Unsteady term\n\n for i, j in ti.ndrange((1, nx + 2), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n # Upper and lower boundary\n if (ct[i, j] + ct[i, j - 1]) == 0:\n Au[k, k] = Au[k, k] - Au[k, k - 1] + 2 * mu\n Au[k, k - 1] = 0\n elif (ct[i, j] + ct[i, j + 1]) == 0:\n Au[k, k] = Au[k, k] - Au[k, k + 1] + 2 * mu\n Au[k, k + 1] = 0\n\n\ndef fill_Av():\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 2)):\n k = (i - 1) * (ny + 1) + (j - 1)\n # Upper and lower boundary\n if (ct[i, j] + ct[i, j - 1]) == 0 or (ct[i, j] + ct[i, j - 1]) == 2:\n Av[k, k] = 1.0\n bv[k] = v[i, j]\n else:\n \"\"\"\n TODO: Didn't cover inlet and outlet boundary. Actually accessing\n elements out of bound, for example, Av[1,-30].\n However, since in solve_v, when convert to numpy, A[1,-30] become\n 0.0 automatically.\n \"\"\"\n Av[k, k - 1] = -mu * dx / dy - max(\n [0, -rho * 0.5 * (v[i, j - 1] + v[i, j]) * dx]) # an\n Av[k, k + 1] = -mu * dx / dy - max(\n [0, rho * 0.5 * (v[i, j + 1] + v[i, j]) * dx]) # as\n\n Av[k, k - ny - 1] = -mu * dy / dx - max(\n [0, rho * 0.5 * (u[i, j] + u[i, j - 1]) * dy]) # aw\n Av[k, k + ny + 1] = -mu * dy / dx - max(\n [0, -rho * 0.5 * (u[i + 1, j - 1] + u[i + 1, j]) * dy]) # ae\n Av[k, k] = -Av[k, k - 1] - Av[k, k + 1] - Av[k, k - ny - 1] - Av[\n k, k + ny + 1] + rho * dx * dy / dt # ap\n bv[k] = (p[i, j] - p[i, j - 1]) * dx + rho * dx * dy / dt * v0[i,\n j]\n\n\ndef solve_axb(A, b):\n from scipy.sparse.linalg import qmr, bicg\n from scipy.sparse import csc_matrix\n print(\"Now converting A and b to numpy...\")\n A_np = A.to_numpy()\n b_np = b.to_numpy()\n print(\"Finished converting A and b to numpy...\")\n print(\"Now solving Ax=b...\")\n ans = np.linalg.solve(A_np, b_np)\n print(\"Finished solving Ax=b...\")\n return ans\n #ans, exitCode = bicg(A_np, b_np, atol='legacy', tol=1e-3)\n # return ans\n\n\ndef sol_back_matrix(mat, sol):\n mat_width = mat.shape()[0] - 2\n mat_height = mat.shape()[1] - 2\n for i, j in ti.ndrange(mat_width, mat_height):\n mat[i + 1, j + 1] = sol[i * mat_height + j]\n\n\ndef xu_back():\n for i, j in ti.ndrange(nx + 1, ny):\n u[i + 1, j + 1] = xu[i * ny + j]\n\n\ndef xv_back():\n for i, j in ti.ndrange(nx, ny + 1):\n v[i + 1, j + 1] = xv[i * ny + j]\n\n\ndef iter_solve_u():\n #A = Au.to_numpy()\n #b = bu.to_numpy()\n res = 100.0\n\n while np.abs(res) > 1e-3:\n res = 0.0\n for i, j in ti.ndrange(nx + 1, ny):\n k = i * ny + j\n # print(\"k = \", k, \"ny = \", ny, \"k-ny = \", k - ny, \"Au[k-ny] = \",\n # Au[k - ny])\n xu[k] = 1 / Au[k, k] * (-Au[k, k - 1] * u[i, j - 1] -\n Au[k, k + 1] * u[i, j + 1] -\n Au[k, k - ny] * u[i - 1, j] -\n Au[k, k + ny] * u[i + 1, j] + bu[k])\n\n res = res + xu[k] - xuold[k]\n xu_back()\n for i, j in ti.ndrange(nx + 1, ny):\n k = i * ny + j\n xuold[k] = xu[k]\n print(\"Solving x momentum, the residul is now \", res)\n\n\ndef iter_solve_v():\n #A = Au.to_numpy()\n #b = bu.to_numpy()\n res = 100.0\n\n while np.abs(res) > 1e-3:\n res = 0.0\n for i, j in ti.ndrange(nx, ny + 1):\n k = i * (ny + 1) + j\n # print(\"k = \", k, \"ny = \", ny, \"k-ny = \", k - ny, \"Au[k-ny] = \",\n # Au[k - ny])\n xv[k] = 1 / Av[k, k] * (-Av[k, k - 1] * v[i, j - 1] -\n Av[k, k + 1] * v[i, j + 1] -\n Av[k, k - ny - 1] * v[i - 1, j] -\n Av[k, k + ny + 1] * v[i + 1, j] + bv[k])\n\n res = res + xv[k] - xvold[k]\n xv_back()\n for i, j in ti.ndrange(nx, ny + 1):\n k = i * (ny + 1) + j\n xvold[k] = xv[k]\n print(\"Solving y momentum, the residual is now \", res)\n\n\ndef solve_moment_x():\n print(\"Now filling Au...\")\n fill_Au()\n print(\"Finished filling Au...\")\n \n print(\"Solving x momentum...\")\n # solve_axb returns a numpy array\n # needs to convert back to taichi\n #import numpy.linalg as npl\n # print(\"Shape of Au is\", Au.shape(), \"Rank of Au is:\",\n # npl.matrix_rank(Au.to_numpy()))\n xu.from_numpy(solve_axb(Au, bu))\n # iter_solve_u()\n print(\"Converting xu to u...\")\n sol_back_matrix(u, xu)\n print(\"Finished converting xu to u...\")\n\ndef solve_moment_y():\n fill_Av()\n print(\"Solving y momentum...\")\n #import numpy.linalg as npl\n # print(\"Shape of Av is\", Av.shape(), \"Rank of Av is:\",\n # npl.matrix_rank(Av.to_numpy()))\n xv.from_numpy(solve_axb(Av, bv))\n # iter_solve_v()\n sol_back_matrix(v, xv)\n\n\ndef correct_u():\n ucor_max = 0.0\n for i, j in ti.ndrange((1, nx + 2), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n # Upper and lower boundary\n if (ct[i - 1, j] + ct[i, j]) == 0 or (ct[i - 1, j] + ct[i, j]) == 2:\n pass\n else:\n ucor[i, j] = (pcor[i - 1, j] - pcor[i, j]) * dy / Au[k, k]\n u[i, j] = u[i, j] + ucor[i, j] * velo_rel\n if np.abs(ucor[i, j] / (u[i, j] + 1.0e-9)) >= ucor_max:\n ucor_max = np.abs(ucor[i, j] / (u[i, j] + 1.0e-9))\n return ucor_max\n\n\ndef correct_v():\n vcor_max = 0.0\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 2)):\n k = (i - 1) * (ny + 1) + (j - 1)\n # Upper and lower boundary\n if (ct[i, j] + ct[i, j - 1]) == 0 or (ct[i, j] + ct[i, j - 1]) == 2:\n pass\n else:\n vcor[i, j] = (pcor[i, j] - pcor[i, j - 1]) * dx / Av[k, k]\n v[i, j] = v[i, j] + vcor[i, j] * velo_rel\n if np.abs(vcor[i, j] / (v[i, j] + 1.0e-9)) >= vcor_max:\n vcor_max = np.abs(vcor[i, j] / (v[i, j] + 1.0e-9))\n return vcor_max\n\n\ndef correct_uconserv():\n inlet_flux = 0.0\n outlet_flux = 0.0\n for i in range(1, ny + 1):\n inlet_flux = inlet_flux + u[1, i]\n outlet_flux = outlet_flux + u[nx + 1, i]\n print(\"Inlet flux = \", inlet_flux, \"; Outlet flux = \", outlet_flux)\n\n coef = inlet_flux / (outlet_flux + 1.0e-9)\n for i in range(1, ny + 1):\n u[nx + 1, i] = coef * u[nx + 1, i]\n\n\ndef check_uconserv():\n inlet_flux = 0.0\n outlet_flux = 0.0\n for i in range(1, ny + 1):\n inlet_flux = inlet_flux + u[1, i]\n outlet_flux = outlet_flux + u[nx + 1, i]\n print(\"Inlet flux = \", inlet_flux, \"; Outlet flux = \", outlet_flux)\n\n\ndef fill_Ap():\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n bp[k] = rho * (u[i, j] - u[i + 1, j]) * dy + rho * (v[i, j + 1] -\n v[i, j]) * dx\n # Go back to Av matrix, find the corresponding v\n vk = (i - 1) * (ny + 1) + (j - 1)\n Ap[k, k - 1] = -rho * dx * dx / Av[vk, vk]\n Ap[k, k + 1] = -rho * dx * dx / Av[vk + 1, vk + 1]\n # Go back to Au matrix\n uk = k\n Ap[k, k - ny] = -rho * dy * dy / Au[uk, uk]\n Ap[k, k + ny] = -rho * dy * dy / Au[uk + ny, uk + ny]\n\n if (ct[i, j] + ct[i, j - 1]) == 0:\n Ap[k, k - 1] = 0\n elif (ct[i, j] + ct[i, j + 1]) == 0:\n Ap[k, k + 1] = 0\n elif (ct[i, j] + ct[i - 1, j]) == 0:\n Ap[k, k - ny] = 0\n elif (ct[i, j] + ct[i + 1, j]) == 0:\n Ap[k, k + ny] = 0\n Ap[k, k] = -Ap[k, k - 1] - Ap[k, k + 1] - Ap[k, k - ny] - Ap[k, k + ny]\n\n\ndef solve_pcor():\n fill_Ap()\n #import numpy.linalg as npl\n # print(\"Shape of Ap is\", Ap.shape(), \"Rank of Ap is:\",\n # npl.matrix_rank(Ap.to_numpy()))\n sumbp = 0.0\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n sumbp = sumbp + bp[k]\n print(\"Sum bp before solving pcorr was\", sumbp)\n\n print(\"Now solving pcor...\")\n xp.from_numpy(solve_axb(Ap, bp))\n sol_back_matrix(pcor, xp)\n\n for i, j in ti.ndrange(nx + 2, ny + 2):\n if ct[i, j] == 1:\n pass\n else:\n p[i, j] = p[i, j] + p_rel * pcor[i, j]\n\n\ndef visual(mat):\n A = mat.to_numpy()\n import matplotlib.pyplot as plt\n # 'nearest' interpolation - faithful but blocky\n plt.imshow(A, interpolation='nearest', cmap=cm.rainbow)\n # plt.colorbar()\n # plt.show()\n plt.savefig(\"karmen\" + str(iter) + \".png\", dpi=300)\n\n\ndef display():\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(2, 6)\n\n pcm = ax[0, 0].pcolormesh(u.to_numpy(), cmap=cm.rainbow)\n ax[0, 0].set_title(\"U velocity\")\n fig.colorbar(pcm, ax=ax[0, 0])\n\n pcm = ax[0, 1].pcolormesh(v.to_numpy(), cmap=cm.rainbow)\n ax[0, 1].set_title(\"V velocity\")\n fig.colorbar(pcm, ax=ax[0, 1])\n\n pcm = ax[0, 2].pcolormesh(p.to_numpy(), cmap=cm.rainbow)\n ax[0, 2].set_title(\"Pressure\")\n fig.colorbar(pcm, ax=ax[0, 2])\n\n pcm = ax[0, 3].pcolormesh(pcor.to_numpy(), cmap=cm.rainbow)\n ax[0, 3].set_title(\"p correction\")\n fig.colorbar(pcm, ax=ax[0, 3])\n\n pcm = ax[0, 4].pcolormesh(ucor.to_numpy(), cmap=cm.rainbow)\n ax[0, 4].set_title(\"u correction\")\n fig.colorbar(pcm, ax=ax[0, 4])\n\n pcm = ax[0, 5].pcolormesh(vcor.to_numpy(), cmap=cm.rainbow)\n ax[0, 5].set_title(\"v correction\")\n fig.colorbar(pcm, ax=ax[0, 5])\n\n ax[1, 0].plot(p.to_numpy()[1:int(nx + 1), int(ny / 2)])\n ax[1, 0].set_title(\"pressure drop\")\n\n ax[1, 1].plot(u.to_numpy()[int(0.2 * nx), 1:int(ny + 1)])\n ax[1, 1].set_title(\"U profile at 60\")\n\n ax[1, 2].plot(u.to_numpy()[int(0.5 * nx), 1:int(ny + 1)])\n ax[1, 2].set_title(\"U profile at 100\")\n\n ax[1, 3].plot(u.to_numpy()[int(0.8 * nx), 1:int(ny + 1)])\n ax[1, 3].set_title(\"U profile at 120\")\n\n fig.set_size_inches(16, 9)\n fig.tight_layout()\n\n plt.savefig(\"Iteration_i\" + str(iter) + \"_t\" + str(jter) + \".png\", dpi=400)\n\n\nif __name__ == \"__main__\":\n init()\n\n check_uconserv()\n for jter in range(1000):\n print(\"Solving the next time step, currently the \", jter, \"th iteration...\")\n for iter in range(10000):\n print(\"Looping through the inner loop, it's the \", iter, \"th iteration out of 10...\")\n solve_moment_x()\n solve_moment_y()\n correct_uconserv()\n check_uconserv()\n solve_pcor()\n resu = correct_u()\n resv = correct_v()\n print(\"Resu = \", resu, \"Resv = \", resv)\n u0 = u\n v0 = v\n display()\n","sub_path":"homework1/re600_sph/simpler_uns.py","file_name":"simpler_uns.py","file_ext":"py","file_size_in_byte":13989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"154055157","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 6 18:17:08 2021\r\n\r\n@author: user\r\n\"\"\"\r\n\r\nclass BittreeNode:\r\n def __init__(self,data):\r\n self.data=data\r\n self.lchild=None\r\n self.rchild=None\r\n self.parent=None\r\n \r\n \r\nclass BST:\r\n def __init__(self,li=None):\r\n self.root=None\r\n if li:\r\n for val in li:\r\n self.insert_no_rec(val)\r\n \r\n def insert(self,node,val):\r\n if not node:\r\n node=BittreeNode(val)\r\n elif valnode.data:\r\n node.rchild=self.insert(node.rchild, val)\r\n node.rchild.parent=node\r\n #相等就不要了\r\n return node\r\n \r\n def insert_no_rec(self,val):\r\n p=self.root\r\n if not p:\r\n self.root=BittreeNode(val)\r\n return\r\n while True:\r\n if valp.data:\r\n if p.rchild:\r\n p=p.rchild\r\n else:\r\n p.rchild=BittreeNode(val)\r\n p.rchild.parent=p\r\n return\r\n else:\r\n return\r\n \r\n def query(self,node,val):\r\n if not node:\r\n return None\r\n elif node.dataval:\r\n return node.query(node.lchild,val)\r\n else:\r\n return node\r\n \r\n def query_no_rec(self,val):\r\n p=self.root\r\n while p:\r\n if p.dataval:\r\n p=p.lchild\r\n else:\r\n return p\r\n return None\r\n \r\n def pre_order(self,root):\r\n if root:\r\n print(root.data,end=',')\r\n self.pre_order(root.lchild)\r\n self.pre_order(root.rchild)\r\n\r\n def in_order(self,root):\r\n if root:\r\n self.in_order(root.lchild)\r\n print(root.data,end=',')\r\n self.in_order(root.rchild)\r\n \r\n def post_order(self,root):\r\n if root:\r\n self.post_order(root.lchild)\r\n self.post_order(root.rchild)\r\n print(root.data,end=',')\r\n \r\n \r\n def __remove_node_1(self,node):\r\n if not node.parent:\r\n self.root=None\r\n elif node==node.parent.lchild:\r\n node.parent.lchild=None\r\n else:\r\n node.parent.rchild=None\r\n \r\n def __remove_node_21(self,node):\r\n #只有一个左孩子\r\n if not node.parent:\r\n self.root=node.lchild\r\n node.lchild.parent=None\r\n elif node==node.parent.lchild:\r\n node.parent.lchild=node.lchild\r\n node.lchild.parent=node.parent\r\n else:\r\n node.parent.rchild=node.lchild\r\n node.lchild.parent=node.parent\r\n \r\n def __remove_node_22(self,node):\r\n #node只有一个右孩子\r\n if not node.parent:\r\n self.root=node.rchild\r\n node.rchild.parent=None\r\n elif node==node.parent.lchild:\r\n node.parent.lchild=node.rchild\r\n node.rchild.parent=node.parent\r\n else:\r\n node.parent.rchild=node.rchild\r\n node.rchild.parent=node.parent\r\n \r\n def delete(self,val):\r\n if self.root:\r\n node =self.query_no_rec(val)\r\n if not node:\r\n return False\r\n if not node.lchild and not node.rchild:\r\n self.__remove_node_1(node)\r\n elif not node.rchild:\r\n self.__remove_node_21(node)\r\n else:\r\n min_node=node.rchild\r\n while min_node.lchild:\r\n min_node=min_node.lchild\r\n node.data=min_node.data\r\n if min_node.rchild:\r\n self.__remove_node_22(min_node)\r\n else:\r\n self.__remove_node_1(min_node)\r\n \r\ntree=BST([4,6,7,9,2,1,3,5,8])\r\nprint(\"\")\r\ntree.pre_order(tree.root)\r\nprint(\"\")\r\ntree.in_order(tree.root)\r\nprint(\"\")\r\ntree.post_order(tree.root)\r\nprint(tree.query_no_rec(10))\r\ntree.delete(4)\r\ntree.pre_order(tree.root)\r\n\r\n\r\n ","sub_path":"BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"380052073","text":"\"\"\"\nModule to load input data files\nCreated 8/8/2016\n\nModified:\n@Date 10/05/2016\n@author: Xinya Li (xinya.li@pnl.gov), Chris R. Vernon (chris.vernon@pnnl.gov)\n@Project: Xanthos V2.0\n\n\nLicense: BSD 2-Clause, see LICENSE and DISCLAIMER files\n\nCopyright (c) 2017, Battelle Memorial Institute\n\"\"\"\n\nimport os\nfrom scipy import io as spio\nimport numpy as np\n\nfrom xanthos.utils.numpy_parser import GetArrayCSV, GetArrayTXT\n\n\nclass LoadReferenceData:\n \"\"\"\n Load reference data.\n\n :param settings: settings object from configuration\n \"\"\"\n def __init__(self, settings):\n\n # Area value for each land grid cell: 67420 x 1, convert from ha to km2\n self.area = load_const_griddata(settings.Area) * 0.01\n\n # Coordinates for flattened grid: 67420 x 5, the columns are ID#, lon, lat, ilon, ilat\n self.coords = load_const_griddata(settings.Coord)\n\n # Basin ID Map: 67420 x 1, 235 Basins\n self.basin_ids = load_const_griddata(settings.BasinIDs, 1).astype(int)\n\n # Corresponding to Basin ID Map, 235 Basin Names: 1D String Array\n self.basin_names = load_const_griddata(settings.BasinNames)\n\n # GCAM Region ID Map : 67420 x 1 (The nonag region table will be the 'primary' region assignment)\n self.region_ids = load_const_griddata(settings.GCAMRegionIDs, 1).astype(int)\n\n # Corresponding to GCAM Region ID Map\n with open(settings.GCAMRegionNames, 'r') as f:\n f.readline()\n temp = f.read().split('\\n')\n self.region_names = np.array([i.split(',') for i in temp])[:, 0]\n\n # Country ID Map : 67420 x 1 (249 countries: 1-249)\n self.country_ids = load_const_griddata(settings.CountryIDs, 1).astype(int)\n\n # Corresponding to Country ID Map, 0-248 index number and 249 Country Names: 2D String Array\n with open(settings.CountryNames, 'r') as f:\n temp = f.read().splitlines()\n self.country_names = np.array([i.split(',') for i in temp])[:, 1]\n\n if settings.runoff_module == 'gwam':\n # Max Soil Moisture Map (mm/month): 67420 x 1\n self.max_soil_moist = load_const_griddata(settings.MaxSoilMois, 1)\n\n # Water Bodies: assign MSM = 999, 306 x 2, Col 1 is the cell number in 67420\n self.lakes_msm = load_const_griddata(settings.LakesMSM).astype(int)\n self.lakes_msm[:, 0] -= 1\n\n # ''' Rivers: assign MSM = 999, 4198 x 2, Col 1 is the cell number in 67420\n # constants['RiversMSM'] = load_const_griddata(settings.RiversMSM).astype(int)\n # constants['RiversMSM'][:,0] -= 1\n\n # Additional water bodies: assign MSM = 999, 421 x 2, Col 1 is the cell number in 67420\n self.addit_water_msm = load_const_griddata(settings.AdditWaterMSM).astype(int)\n self.addit_water_msm[:, 0] -= 1\n\n\ndef load_climate_data(fle, var_name, n_cells, n_months, neg_to_zero=False):\n \"\"\"\n Loads and checks input climate data.\n\n Dimension: 67420 x number of years*12, for example:\n Historical: 1950-2005 672 months\n Future: 2006-2100 1140 months\n\n @:param fle: file path with extension\n @:param var_name: NetCDF variable name\n @:param neg_to_zero: convert negative values to zero\n @:param n_cells: number of cells\n @:param n_months: number of months\n\n @:return: array\n \"\"\"\n a = load_const_griddata(fle, 0, var_name)\n\n if neg_to_zero:\n a[np.where(a < 0)] = 0\n\n return check_climate_data(a, n_cells=n_cells, n_months=n_months, text=var_name)\n\n\ndef load_routing_data(fle, ngridrow, ngridcol, map_index, skip=68, rep_val=None):\n \"\"\"\n Load routing data.\n\n DRT data, 280 x 720, -9999 for missing values, convert to 67420 X 1\n\n @:param fle file to load\n @:param ngridrow number of grids per row\n @:param ngridcol number of grids per column\n @:param map_index\n @:param skip\n @:param rep_val value to replace with when less than value\n \"\"\"\n fd = load_const_griddata(fle)\n v = vectorize(fd, ngridrow, ngridcol, map_index, skip=skip)\n\n if rep_val is None:\n return v\n\n else:\n v[np.where(v < rep_val)[0]] = rep_val\n return v\n\n\ndef load_soil_data(settings):\n \"\"\"\n Load soil moisture file into array if in future mode, else stage zeros array.\n \"\"\"\n try:\n # Initialize channel storage/soil moisture.\n if settings.HistFlag == \"True\":\n return np.zeros((settings.ncell,), dtype=float)\n\n # For future runs, initialize with the last value of the historical channel storage/soil moisture\n else:\n return load_const_griddata(settings.SavFile, 0, settings.SavVarName)[:, -1]\n\n # if not in use\n except AttributeError:\n return np.zeros((settings.ncell,), dtype=float)\n\n\ndef load_chs_data(settings):\n \"\"\"\n Load channel velocity file into array if in future mode, else stage zeros array.\n \"\"\"\n try:\n\n # Initialize channel storage/soil moisture.\n if settings.HistFlag == \"True\":\n return np.zeros((settings.ncell,), dtype=float)\n\n # For future runs, initialize with the last value of the historical channel storage/soil moisture\n else:\n return load_const_griddata(settings.ChStorageFile, 0, settings.ChStorageVarName)[:, -1]\n except AttributeError:\n return np.zeros((settings.ncell,), dtype=float)\n\n\ndef load_gcm_var(fn, varname):\n \"\"\"\n Loads climate data from the specified GCM\n \"\"\"\n\n if not os.path.isfile(fn):\n raise IOError(\"File does not exist: {}\".format(fn))\n\n temp = spio.loadmat(fn)\n data = temp[varname]\n\n return data\n\n\ndef check_climate_data(data, n_cells, n_months, text):\n \"\"\"\n Check array size of input and check to make sure the total number of months can be split into years.\n\n :param data: input array\n :param n_cells: number of cells\n :param n_months: number of months\n :param text: name of target variable\n \"\"\"\n err_cell = \"Error: Inconsistent {0} data grid size. Expecting size: {1}. Received size: {2}\".format(text, n_cells, data.shape[0])\n err_mth = \"Error: Inconsistent {0} data grid size. Expecting size: {1}. Received size: {2}\".format(text, n_months, data.shape[1])\n\n if not data.shape[0] == n_cells:\n raise RuntimeError(err_cell)\n\n if not data.shape[1] == n_months:\n raise RuntimeError(err_mth)\n\n if not data.shape[1] % 12 == 0:\n raise RuntimeError(\"Error: Number of months in climate data can not be converted into integral years.\")\n\n return data\n\n\ndef load_const_griddata(fn, headerNum=0, key=\" \"):\n \"\"\"\n Load constant grid data stored in files defined in GRID_CONSTANTS.\n \"\"\"\n\n # for MATLAB files\n if fn.endswith('.mat'):\n data = load_gcm_var(fn, key)\n\n # for Numpy pickled files\n elif fn.endswith('.npy'):\n data = np.load(fn)\n\n # for text files\n elif fn.endswith('.txt'):\n\n if not os.path.isfile(fn):\n raise IOError(\"Error: File does not exist:\", fn)\n\n try:\n data = GetArrayTXT(fn, headerNum)\n\n except:\n with open(fn, 'r') as f:\n data = np.array(f.read().splitlines())\n\n # for CSV files\n elif fn.endswith('.csv'):\n\n if not os.path.isfile(fn):\n raise IOError(\"Error: File does not exist:\", fn)\n\n data = GetArrayCSV(fn, headerNum)\n\n # for NetCDF classic files\n elif fn.endswith('.nc'):\n\n if not os.path.isfile(fn):\n raise IOError(\"Error: File does not exist:\", fn)\n\n datagrp = spio.netcdf.netcdf_file(fn, 'r', mmap=False)\n\n # copy() added to handle numpy 'ValueError:assignment destination is read-only' related to non-contiguous memory\n try:\n data = datagrp.variables[key][:, :].copy()\n\n except:\n data = datagrp.variables[key][:].copy()\n\n datagrp.close()\n\n return data\n\n\ndef vectorize(data, ngridrow, ngridcol, map_index, skip):\n \"\"\"\n Convert 2D Map (360 x 720) Matrix to 1D Map(67420)\n \"\"\"\n new = np.zeros((ngridrow, ngridcol), dtype=float) - 9999\n\n for i in range(0, data.shape[0]):\n new[i + skip, :] = data[data.shape[0] - 1 - i, :]\n\n new = new.reshape((ngridrow * ngridcol,), order='F')\n\n return new[map_index]\n\n\ndef load_soil_moisture(d, ngrids, missing=-9999):\n data = np.zeros((ngrids, 5), order='F')\n\n data[:, 0] = d.area\n data[:, 1] = d.region_ids\n data[:, 2] = d.max_soil_moist\n\n # add max value (999) where water is\n data[d.lakes_msm[:, 0], 2] = d.lakes_msm[:, 1]\n data[d.addit_water_msm[:, 0], 2] = d.addit_water_msm[:, 1]\n\n country = d.country_ids[:]\n basin = d.basin_ids[:]\n\n # Ignore all the cells in which we are missing an ID value for soil moisture, country, or basin.\n # Thus, country and basin coverage must be consistent.\n # Basins coverage is smaller, and GCAM region ignores Greenland.\n invalid = np.where((data[:, 2] == 0) | (country == 0) | (basin == 0))[0]\n\n # should this be 0:2\n data[invalid, 1:2] = 0\n\n # should these be returned?\n country[invalid] = missing\n basin[invalid] = missing\n\n return data\n","sub_path":"xanthos/data_reader/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"187509246","text":"# Version python3.6\n# -*- coding: utf-8 -*-\n# @Time : 2018/11/6 4:40 PM\n# @Author : zenRRan\n# @Email : zenrran@qq.com\n# @File : GRU.py\n# @Software: PyCharm Community Edition\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport utils.Embedding as Embedding\n\nimport random\n\nclass GRU(nn.Module):\n def __init__(self, opts, vocab, label_vocab):\n super(GRU, self).__init__()\n\n random.seed(opts.seed)\n torch.manual_seed(opts.seed)\n torch.cuda.manual_seed(opts.seed)\n\n self.embed_dim = opts.embed_size\n self.word_num = vocab.m_size\n self.pre_embed_path = opts.pre_embed_path\n self.string2id = vocab.string2id\n self.embed_uniform_init = opts.embed_uniform_init\n self.label_num = label_vocab.m_size\n self.embed_dropout = opts.embed_dropout\n self.fc_dropout = opts.fc_dropout\n self.hidden_num = opts.hidden_num\n self.hidden_size = opts.hidden_size\n self.hidden_dropout = opts.hidden_dropout\n self.bidirectional = opts.bidirectional\n\n self.embeddings = nn.Embedding(self.word_num, self.embed_dim)\n if opts.pre_embed_path != '':\n embedding = Embedding.load_predtrained_emb_zero(self.pre_embed_path, self.string2id)\n self.embeddings.weight.data.copy_(embedding)\n else:\n nn.init.uniform_(self.embeddings.weight.data, -self.embed_uniform_init, self.embed_uniform_init)\n\n self.gru = nn.GRU(\n self.embed_dim,\n self.hidden_size,\n dropout=self.hidden_dropout,\n num_layers=self.hidden_num,\n batch_first=True,\n bidirectional=self.bidirectional\n )\n self.embed_dropout = nn.Dropout(self.embed_dropout)\n self.fc_dropout = nn.Dropout(self.fc_dropout)\n self.linear1 = nn.Linear(self.hidden_size * 2, self.hidden_size // 2)\n self.linear2 = nn.Linear(self.hidden_size // 2, self.label_num)\n\n def forward(self, input):\n out = self.embeddings(input)\n out = self.embed_dropout(out)\n out, _ = self.gru(out) #[1, 1, 200]\n\n out = torch.transpose(out, 1, 2)\n\n out = torch.tanh(out)\n\n out = F.max_pool1d(out, out.size(2)) #[1, 200, 1]\n\n out = out.squeeze(2) #[1, 400]\n\n out = self.fc_dropout(out)\n out = self.linear1(F.relu(out))\n output = self.linear2(F.relu(out))\n\n return output\n","sub_path":"GRU.py","file_name":"GRU.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401494552","text":"import unittest\nfrom foo_math import add\n\n# best practices: set test/answers as global\nEXAMPLE_1 = [3,4]\nEXAMPLE_2 = [-3,-4]\nANSWER_1 = 7\nANSWER_2 = -7\n\nclass FactorialTest(unittest.TestCase):\n \n # anything with \"test_\" will be automatically tested\n def test_positive(self):\n self.assertEqual(add(EXAMPLE_1[0], EXAMPLE_1[1]), ANSWER_1)\n\n def test_negative(self):\n self.assertEqual(add(EXAMPLE_2[0], EXAMPLE_2[1]), ANSWER_2)\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"tests/test_foomath.py","file_name":"test_foomath.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"403960147","text":"\"\"\"\n This module contains all the procedures necessary for\n loading the data.\n\"\"\"\nimport pickle as pkl\nimport numpy as np\nimport logging\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\n# DATA_FOLDER = \"data/\"\n\nEXTENSION = \".pkl\"\nDATA_SETS = {\n \"fonollosa\": {0: \"B1-system\",\n 1: \"B2-system\",\n 2: \"B3-system\",\n 3: \"B4-system\",\n 4: \"B5-system\",\n \"n_classes\": 4\n },\n \"turbulent_gas_mixtures\": {0: \"preloaded_dataset\", \"n_classes\": 4},\n \"windtunnel\": {\n 0: \"preloaded_dataset-L1\",\n 1: \"preloaded_dataset-L2\",\n 2: \"preloaded_dataset-L3\",\n 3: \"preloaded_dataset-L4\",\n 4: \"preloaded_dataset-L5\",\n 5: \"preloaded_dataset-L6\",\n \"n_classes\": 11,\n }\n # Uncomment the following line if you have been authorized to use the dataset\n # ,\"coffee_dataset\": {0: \"preloaded_dataset\", \"n_classes\": 3}\n}\nDS_WINE = {\"QWines-CsystemTR\": 3,\n \"QWinesEa-CsystemTR\": 4}\n\n\ndef load(ds_choice, ds_idx=0):\n \"\"\"\n choices : 0 -> fonollosa, 1 -> turbulent_gas_mixtures, 2 -> windtunnel\n :param ds_choice: the index naming the dataset chosen\n :param ds_idx: the index if the folder containing the dataset has one or more datasets\n :return: the dataset read,the labels and the number of classes\n \"\"\"\n global DATA_FOLDER, DATA_SETS, EXTENSION\n assert ds_choice in list(DATA_SETS.keys())\n # ds_name =[ds_choice]\n ds_name = ds_choice\n dataset_name = ds_name+\"/\"\n\n logging.info(ds_name + \" Is being loaded\")\n\n n_classes = DATA_SETS[ds_name]['n_classes']\n print(\"\\n\\n ds_name:\"+ds_name+\"\\n\\n\")\n print(\"\\n\\n ds_idx\"+str(ds_idx)+\"\\n\\n\")\n subds_name = DATA_SETS[ds_name][ds_idx]\n sub_dataset_name = subds_name + \"/\"\n\n data, labels = None, None\n with open(DATA_FOLDER + ds_name + \"/\" + subds_name + EXTENSION, 'rb') as d:\n data, labels, _ = pkl.load(d)\n d.close()\n\n return data, labels, n_classes, dataset_name, sub_dataset_name\n\n\ndef load_wine(ds_choice):\n \"\"\"\n choices : 0 -> QWines-CsystemTR, 1 -> QWinesEa-CsystemTR\n :param ds_choice: the index naming the dataset chosen\n :return: the dataset read,the labels and the number of classes\n \"\"\"\n global DATA_FOLDER, DS_WINE, EXTENSION\n\n assert ds_choice in list(DS_WINE.keys())\n ds_name = ds_choice\n dataset_name = ds_name+'/'\n\n logging.info(ds_name + \" Is being loaded\")\n\n n_classes = DS_WINE[ds_name]\n\n data, labels = None, None\n with open(DATA_FOLDER + \"wines/\" + ds_name + EXTENSION, \"rb\") as d:\n data, labels, _, _ = pkl.load(d)\n d.close()\n\n return data, labels, n_classes, \"wine/ \", dataset_name # sub_dataset_name\n\n\ndef data_set_reshaped(data_set):\n new_data = []\n for d in data_set:\n new_data.append(d.reshape(d.shape[0], d.shape[1], 1).tolist())\n return np.array(new_data)\n\n\ndef load_and_split(ds_choice, ds_idx=0, read_wine_datasets=False):\n # Loading dataset\n data = None\n labels = None\n dataset_name = \"\"\n sub_dataset_name = \"\"\n if not read_wine_datasets:\n data, labels, n_classes, dataset_name, sub_dataset_name = load(ds_choice, ds_idx)\n else:\n data, labels, n_classes, dataset_name, sub_dataset_name = load_wine(ds_choice)\n\n train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=.2)\n train_data = data_set_reshaped(train_data)\n test_data = data_set_reshaped(test_data)\n\n # input_shape = train_data[0].shape\n\n train_data, train_labels = sklearn.utils.shuffle(train_data, train_labels)\n test_data, test_labels = sklearn.utils.shuffle(test_data, test_labels)\n\n return train_data, train_labels, test_data, test_labels, dataset_name, sub_dataset_name\n\n\ndef standardize_data(train_data, test_data, input_shape):\n\n flat_train_data = train_data.reshape(train_data.shape[0], input_shape[0] * input_shape[1])\n flat_test_data = test_data.reshape(test_data.shape[0], input_shape[0] * input_shape[1])\n\n scaler = sklearn.preprocessing.StandardScaler().fit(flat_train_data)\n flat_train_data = scaler.transform(flat_train_data)\n\n scaler = sklearn.preprocessing.StandardScaler().fit(flat_test_data)\n flat_test_data = scaler.transform(flat_test_data)\n\n new_train = flat_train_data.reshape(train_data.shape[0], input_shape[0], input_shape[1], 1)\n new_test = flat_test_data.reshape(test_data.shape[0], input_shape[0], input_shape[1], 1)\n return new_train, new_test\n\n\ndef split_datasamples_by_sensors(data):\n \"\"\"\n This is an auxiliary procedure for executing the\n SniffMultinose model split turn each column of\n the data matrix into an individual vector.\n :param data: matrix of signals encoded in an numpy array of doubles\n :return: a list with each column of the data matrix saved in a\n list item different\n \"\"\"\n shape = data.shape\n new_split = []\n # Iterate over data columns\n for i in range(shape[2]):\n new_split.append(data[:, :, i])\n new_split[i] = new_split[i].reshape(new_split[i].shape[0], new_split[i].shape[1])\n return new_split\n\n\ndef load_dataset(ds_choice, ds_idx, read_wine_datasets=False):\n \"\"\"\n Loads the dataset from the experiment\n :param ds_choice: Name of the dataset_chosen\n :param ds_idx: index indicating wich subset should be loaded\n :param read_wine_datasets: True, if it is desired to read the wine dataset\n :return: data_samples,\n data labels,\n name of the dataset and name of the data subset,\n name of the input_shape\n \"\"\"\n data = None\n labels = None\n dataset_name = None\n sub_dataset_name = None\n if not read_wine_datasets:\n data, labels, n_classes, dataset_name, sub_dataset_name = load(ds_choice, ds_idx)\n else:\n data, labels, n_classes, dataset_name, sub_dataset_name = load_wine(ds_choice)\n\n data = np.array(data)\n\n input_shape = data[0].shape\n\n return data, labels, n_classes, dataset_name, sub_dataset_name, input_shape\n\n\ndef dataset_classes_number(dataset_name):\n global DATA_SETS\n return DATA_SETS[dataset_name][\"n_classes\"]\n\n\ndef dataset_wine_classes_number(dataset_name):\n global DS_WINE\n return DS_WINE[dataset_name]\n\n\nif __name__ == \"data_loading\":\n global DATA_FOLDER\n DATA_FOLDER = \"data/\"\n","sub_path":"data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"21109165","text":"__author__ = 'Filinto Duran (duranto@gmail.com)'\n# CAPABILITIES\nCAN_BE_MANAGED = 1\nCAN_TRANSFER_FILES = 1 << 1\nCAN_EXECUTE_COMMANDS = 1 << 2\nCAN_HAVE_MULTIPLE_CLIENTS = 1 << 3\nCAN_HAVE_MULTIPLE_USER = 1 << 4\nCAN_HAVE_MULTIPLE_SESSIONS_PER_USER = 1 << 5\nCAN_HAVE_INTERACTIVE_COMMANDS = 1 << 6\nCAN_OPEN_MULTIPLE_CHANNELS = 1 << 7\nCAN_DO_ALL = (1 << 20) - 1\n\nOPEN = 'NO_RETRIES_ON_OPEN'\nDO_NOT_RETRY_ON_OPEN = OPEN\n\nUNIQUE_PROMPT_TEMPLATE = '@##PPRROOMMPPTT{}##@'\nUNIQUE_PROMPT = UNIQUE_PROMPT_TEMPLATE.format('')\nSOCKET_RECV_NOT_READY = None\n","sub_path":"course3/test_envi/connections/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"424584879","text":"'''\nGiven a 2D matrix of characters and a target word, write a function that returns whether the word can be found in the matrix by going left-to-right, or up-to-down.\n\nFor example, given the following matrix:\n\n[['F', 'A', 'C', 'I'],\n ['O', 'B', 'Q', 'P'],\n ['A', 'N', 'O', 'B'],\n ['M', 'A', 'S', 'S']]\nand the target word 'FOAM', you should return true, since it's the leftmost column. Similarly, given the target word 'MASS', you should return true, since it's the last row.\n'''\n\n# SOLUTION\n\ndef get_row_word(matrix, word_len, rows, x, y):\n row_chars = list()\n for i in range(word_len):\n row_chars.append(matrix[x + i][y])\n\n return ''.join(row_chars)\n\n\ndef get_col_word(matrix, word_len, cols, x, y):\n return ''.join(matrix[x][y:y + word_len])\n\n\ndef word_checker(matrix, word, word_len, rows, cols, x, y):\n\n if x >= rows or y >= cols:\n return False\n\n row_word, col_word = None, None\n if x + word_len <= rows and y < cols:\n row_word = get_row_word(matrix, word_len, rows, x, y)\n if y + word_len <= cols and x < rows:\n col_word = get_col_word(matrix, word_len, cols, x, y)\n\n if row_word == word or col_word == word:\n return True\n\n check_1 = word_checker(matrix, word, word_len, rows, cols, x + 1, y) \\\n if col_word else None\n check_2 = word_checker(matrix, word, word_len, rows, cols, x, y + 1) \\\n if row_word else None\n\n return check_1 or check_2\n\n\ndef word_exists(matrix, word):\n rows = len(matrix)\n cols = len(matrix[0])\n word_len = len(word)\n\n return word_checker(matrix, word, word_len, rows, cols, 0, 0)\n\n\nmatrix = [['F', 'A', 'C', 'I'],\n ['O', 'B', 'Q', 'P'],\n ['A', 'N', 'O', 'B'],\n ['M', 'A', 'S', 'S']]\n\nprint(word_exists(matrix, 'FOAMS')) # None\nprint(word_exists(matrix, 'FOAM')) # True\nprint(word_exists(matrix, 'MASS')) # True\nprint(word_exists(matrix, 'FORM')) # False","sub_path":"Problem63/Answer.py","file_name":"Answer.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"271101209","text":"__all__ = (\n \"CrawlTaskJson\",\n)\n\nimport json\n\n\nclass CrawlTaskJson:\n '''\n 爬虫任务的json形式如下:\n {\n \"job_name\":\"hahaha\",\n \"layer\":0,\n \"urls\":[\n \"https://haha.hahaha.ha/xxx\",\n \"https://haha.hahaha.ha/yyy\",\n ]\n }\n 这个json作为一个整体加入队列,也作为一个任务分配单元分配给爬虫端\n 所以 urls 一次不要放太多\n '''\n @classmethod\n def from_json_str(cls, json_str: str):\n x = json.loads(json_str)\n obj = cls(x[\"job_name\"], x[\"layer\"], x[\"urls\"])\n return obj\n\n def __init__(self, crawl_job_name: str, layer: int, urls: list):\n assert isinstance(crawl_job_name, str) and\\\n isinstance(layer, int) and\\\n isinstance(urls, list)\n assert bool(crawl_job_name) and bool(urls) # 判空\n self.job_name = crawl_job_name\n self.layer = layer\n self.urls = urls\n\n def get_json(self) -> str:\n x = {\n \"job_name\": self.job_name,\n \"layer\":self.layer,\n \"urls\": self.urls,\n }\n _json_str = json.dumps(x)\n return _json_str\n","sub_path":"D-crawler-sys/server/crawler/crawl_task_json.py","file_name":"crawl_task_json.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"123348063","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, division, print_function, absolute_import\n\nimport os\nimport sys\nimport importlib\nimport fnmatch\nimport inspect\nimport pkgutil\nimport subprocess\nfrom types import ModuleType\n\nfrom captain import exit as console, echo\nfrom captain.decorators import arg\n\nfrom ..compat import *\nfrom ..utils import get_objects\nfrom ..model import Orm\nfrom ..interface import get_interface\n\n\ndef get_modules(modulepath):\n \"\"\"return all found modules at modulepath (eg, foo.bar) including modulepath module\"\"\"\n m = importlib.import_module(modulepath)\n mpath = m.__file__\n ret = set([m])\n\n if \"__init__.\" in mpath.lower():\n mpath = os.path.dirname(mpath)\n\n # https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules\n for module_info in pkgutil.iter_modules([mpath]):\n submodulepath = \".\".join([modulepath, module_info[1]])\n if module_info[2]:\n # module is a package\n submodules = get_modules(submodulepath)\n ret.update(submodules)\n else:\n ret.add(importlib.import_module(submodulepath))\n\n return ret\n\n\ndef get_subclasses(modulepath, parent_class):\n \"\"\"given a module return all the parent_class subclasses that are found in\n that module and any submodules.\n\n :param modulepath: string, a path like foo.bar.che\n :param parent_class: object, the class whose children you are looking for\n :returns: set, all the found child classes in modulepath of parent_class\n \"\"\"\n if isinstance(modulepath, ModuleType):\n modules = get_modules(modulepath.__name__)\n else:\n modules = get_modules(modulepath)\n\n ret = set()\n for m in modules:\n cs = inspect.getmembers(m, lambda v: inspect.isclass(v) and issubclass(v, parent_class))\n for class_name, klass in cs:\n ret.add(klass)\n\n return ret\n\n\ndef build_dump_order(orm_class, orm_classes):\n \"\"\"pass in an array, when you encounter a ref, call this method again with the array\n when something has no more refs, then it gets appended to the array and returns, each\n time something gets through the list they are added, but before they are added to the\n list it is checked to see if it is already in the listt\"\"\"\n if orm_class in orm_classes: return\n\n for field_name, field_val in orm_class.schema.fields.items():\n if field_val.is_ref():\n build_dump_order(field_val.schema.orm_class, orm_classes)\n\n if orm_class not in orm_classes:\n orm_classes.append(orm_class)\n\n\ndef get_orm_classes(path):\n \"\"\"this will return prom.Orm classes found in the given path (classpath or modulepath)\"\"\"\n ret = set()\n try:\n m = importlib.import_module(path)\n\n except ImportError:\n # we have a classpath\n m, klass = get_objects(path)\n if issubclass(klass, Orm):\n ret.add(klass)\n\n else:\n ret.update(get_subclasses(m, Orm))\n\n return ret\n\n\ndef get_table_map(paths):\n ret = {}\n orm_classes = set()\n dump_orm_classes = []\n for p in paths:\n orm_classes.update(get_orm_classes(p))\n\n for orm_class in orm_classes:\n build_dump_order(orm_class, dump_orm_classes)\n\n try:\n for orm_class in dump_orm_classes:\n inter = orm_class.interface\n conn_name = inter.connection_config.name\n ret.setdefault(conn_name, {\"interface\": inter, \"table_names\": []})\n ret[conn_name][\"table_names\"].append(orm_class.table_name)\n\n except RuntimeError:\n pass\n\n return ret\n\n\ndef run_cmd(cmd):\n try:\n process = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n\n if is_py2:\n for line in iter(process.stdout.readline, \"\"):\n sys.stdout.write(line)\n sys.stdout.flush()\n else:\n for line in iter(process.stdout.readline, b\"\"):\n line = line.decode(\"utf-8\")\n sys.stdout.write(line)\n sys.stdout.flush()\n\n process.wait()\n\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\"dump failed with code {} and output: {}\".format(e.returncode, e.output))\n\n except OSError as e:\n if e.errno == 2:\n echo.err(\"dump is not installed, you need to run `pip install dump`\")\n raise\n\n\ndef get_base_cmd(action, inter, directory):\n\n conn = inter.connection_config\n\n if not \"postgres\" in conn.interface_name.lower():\n raise RuntimeError(\"Dump only works with Postgres databases\")\n\n cmd = [\n \"dump\",\n action,\n \"--dbname\",\n conn.database,\n \"--username\",\n conn.username,\n \"--password\",\n conn.password,\n \"--host\",\n conn.host,\n \"--directory\",\n directory,\n ]\n\n if conn.port:\n cmd.extend([\"--port\", str(conn.port)])\n\n return cmd\n\n\n@arg(\"-D\", \"--dir\", \"--directory\", dest=\"directory\", help=\"directory where the backup files should go\")\n@arg(\"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"act like you are going to do everything but do nothing\")\n@arg(\"paths\", nargs=\"+\", help=\"module or class paths (eg, foo.bar or foo.bar.Che) where prom Orm classes are defined\")\ndef main_dump(paths, directory, dry_run):\n \"\"\"dump all or part of the prom data, currently only works on Postgres databases\n\n basically just a wrapper around `dump backup` https://github.com/Jaymon/dump\n \"\"\"\n table_map = get_table_map(paths)\n\n for conn_name, conn_info in table_map.items():\n inter = conn_info[\"interface\"]\n conn = inter.connection_config\n table_names = conn_info[\"table_names\"]\n\n cmd = get_base_cmd(\"backup\", inter, directory)\n cmd.extend(table_names)\n\n if dry_run:\n echo.out(\" \".join(cmd))\n\n else:\n run_cmd(cmd)\n\n\n@arg(\"-D\", \"--dir\", \"--directory\",\n dest=\"directory\",\n help=\"directory where the backup files from a previous prom dump are located\")\n@arg(\"--connection-name\", \"-c\",\n dest=\"conn_name\",\n default=\"\",\n help=\"the connection name (from prom dsn) you want to restore\")\ndef main_restore(directory, conn_name):\n \"\"\"Restore your database dumped with the dump command\n\n just a wrapper around `dump restore` https://github.com/Jaymon/dump\n \"\"\"\n inter = get_interface(conn_name)\n conn = inter.connection_config\n cmd = get_base_cmd(\"restore\", inter, directory)\n run_cmd(cmd)\n\n","sub_path":"prom/cli/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"57491117","text":"# Главный исполняемый файл\n\nimport os\nimport threading\n\n\nimport classes.transactions\nimport classes.console\nimport classes.DB\nimport classes.wallet\nimport classes.network\nimport classes.blockchain\nimport classes.miner\n\nconsoleClass = classes.console.Console()\ndbClass = classes.DB.DB()\nwalletClass = classes.wallet.Wallet()\ntransactionsClass = classes.transactions.Transactions()\nnetworkClass = classes.network.Network()\nblockchainClass = classes.blockchain.Blockchain()\nminerClass = classes.miner.Miner()\n\nwalletClass.init_classes(dbClass)\nconsoleClass.init_classes(walletClass, transactionsClass, dbClass)\ntransactionsClass.init_classes(walletClass, dbClass, networkClass)\nnetworkClass.init_classes(dbClass, blockchainClass, transactionsClass)\nblockchainClass.init_classes(dbClass, walletClass, transactionsClass)\nminerClass.init_classes(dbClass, blockchainClass, networkClass)\n\n\n# Создаем директорию для хранения БД и саму базу если её нет.\n# Если удалить файл с базой и не удалить папку, возникнет баг.\ndb_file = 'db/blockchain_DB.sqlite'\ntry:\n fp = open(db_file)\nexcept IOError:\n os.makedirs('db')\n dbClass.creating_DB()\n\n# Запускаем главный метод класса для работы с консолью.\n# consoleClass.startConsoleWallet()\n\n\ndef network():\n networkClass.receive_message()\n\n\ndef wallet():\n consoleClass.startConsoleWallet()\n\n\ndef miner():\n minerClass.startMining()\n\n\nif __name__ == '__main__':\n try:\n a = threading.Thread(name=\"wallet\", target=wallet)\n b = threading.Thread(name=\"network\", target=network)\n c = threading.Thread(name=\"miner\", target=miner)\n b.daemon = True\n c.daemon = True\n a.start()\n b.start()\n c.start()\n except:\n pass\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"517288924","text":"from typing import List, Tuple, Optional\nimport math\n\nfrom torch.utils.data import Dataset\nimport torch\n\nfrom src.utils.tokenizers import SymTokenizer\n\n\ndef sort_data(indices: List[int],\n sequences: List[str],\n labels: List[str]) -> Tuple[List[int], List[str], List[str]]:\n indices, sequences, labels = zip(*sorted(zip(indices, sequences, labels), key=lambda x: len(x[1])))\n return indices, sequences, labels\n\n\ndef make_batches(indices: List[int],\n sequences: List[str],\n labels: List[str],\n batch_size: int) -> Tuple[List[List[int]], List[List[str]], List[List[str]]]:\n\n assert len(sequences) == len(labels) == len(indices)\n\n indices, sequences, labels = sort_data(indices, sequences, labels)\n\n identifier_batches = []\n text_batches = []\n label_batches = []\n\n for i_batch in range(math.ceil(len(sequences) / batch_size)):\n identifier_batches.append(indices[i_batch * batch_size:(i_batch + 1) * batch_size])\n text_batches.append(sequences[i_batch * batch_size:(i_batch + 1) * batch_size])\n label_batches.append(labels[i_batch * batch_size:(i_batch + 1) * batch_size])\n\n return identifier_batches, text_batches, label_batches\n\n\nclass BmesSegmentationDataset(Dataset):\n\n def __init__(self,\n *,\n indices: List[int],\n original: List[str],\n segmented: List[str],\n original_tokenizer: SymTokenizer,\n bmes_tokenizer: SymTokenizer,\n batch_size: int,\n pad_index: int,\n unk_index: int,\n max_len: int):\n assert len(original) == len(segmented)\n\n self.batch_size = batch_size\n\n self.index_batches, self.original_batches, self.segmented_batches = make_batches(indices=indices,\n sequences=original,\n labels=segmented,\n batch_size=self.batch_size)\n\n self.unk_index = unk_index\n self.pad_index = pad_index\n\n self.max_len = max_len\n\n self.original_tokenizer = original_tokenizer\n self.bmes_tokenizer = bmes_tokenizer\n\n def __len__(self) -> int:\n return len(self.index_batches)\n\n def prepare_sample(self,\n sequence: str,\n max_len: int,\n bmes: bool) -> Tuple[List[int], int]:\n if not bmes:\n sequence = self.original_tokenizer.encode(sequence)\n else:\n sequence = self.bmes_tokenizer.encode(sequence)\n\n sequence = sequence[:max_len]\n true_len = len(sequence)\n pads = [self.pad_index] * (max_len - len(sequence))\n sequence += pads\n\n return sequence, true_len\n\n def __getitem__(self, index: int) -> Tuple[torch.tensor, torch.tensor, torch.tensor, torch.tensor]:\n index_batch = self.index_batches[index]\n original_batch = self.original_batches[index]\n segmented_batch = self.segmented_batches[index]\n\n max_len = min([self.max_len, max([len(sample) for sample in original_batch])])\n\n batch_indices = []\n batch_x = []\n batch_y = []\n batch_lengths = []\n\n for index, sample in enumerate(original_batch):\n identifier = index_batch[index]\n x, true_len = self.prepare_sample(sample, max_len, bmes=False)\n y, _ = self.prepare_sample(segmented_batch[index], max_len, bmes=True)\n batch_indices.append(identifier)\n batch_x.append(x)\n batch_y.append(y)\n batch_lengths.append(true_len)\n\n batch_indices = torch.tensor(batch_indices).long()\n batch_x = torch.tensor(batch_x).long()\n batch_y = torch.tensor(batch_y).long()\n batch_lengths = torch.tensor(batch_lengths).long()\n\n return batch_indices, batch_x, batch_y, batch_lengths\n\n\n# class BmesSegmentationDataset(Dataset):\n#\n# def __init__(self,\n# *,\n# indices: List[int],\n# original: List[str],\n# segmented: List[str],\n# original_tokenizer: SymTokenizer,\n# bmes_tokenizer: SymTokenizer,\n# pad_index: int,\n# unk_index: int,\n# max_len: int):\n# self.indices = indices\n# self.original = original\n# self.segmented = segmented\n#\n# assert len(original) == len(segmented)\n#\n# self.unk_index = unk_index\n# self.pad_index = pad_index\n#\n# self.max_len = max_len\n#\n# self.index2char = None\n# self.char2index = None\n#\n# self.original_tokenizer = original_tokenizer\n# self.bmes_tokenizer = bmes_tokenizer\n#\n# def __len__(self) -> int:\n# return len(self.original)\n#\n# def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, int, int]:\n# encoder_seq = self.original_tokenizer.encode(self.original[index])\n# target_seq = self.bmes_tokenizer.encode(self.segmented[index])\n#\n# true_length = len(encoder_seq)\n# item_index = self.indices[index]\n#\n# encoder_seq = self.original_tokenizer.pad_or_clip(encoder_seq,\n# max_len=self.max_len)\n# target_seq = self.bmes_tokenizer.pad_or_clip(target_seq,\n# max_len=self.max_len)\n#\n# encoder_seq = torch.tensor(encoder_seq).long()\n# target_seq = torch.tensor(target_seq).long()\n#\n# return encoder_seq, target_seq, true_length, item_index","sub_path":"src/utils/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"602178474","text":"import urllib2\nimport time, datetime\nfrom scrapy.selector import Selector\nfrom pycharmCode.stock.streams.models import InsertModel\n\nclass Splits:\n def __init__(self):\n pass\n\n def run(self):\n html = urllib2.urlopen(\"http://getsplithistory.com/AA\").read()\n response = Selector(text = html, type = \"html\")\n datesAndOther = response.xpath(\"//table/tbody/tr/td/text()\").extract()\n ratios = response.xpath(\"//table/tbody/tr/td/span/text()\").extract()\n objects = []\n rs = []\n for i,data in enumerate(datesAndOther[:-1]):\n if (i % 4 == 0):\n objNum = len(objects)\n objects.append(dict())\n objects[objNum][\"date\"] = self.cleanDate(data)\n if ((i - 1) % 4 == 0):\n objNum = len(objects) - 1\n objects[objNum][\"denom\"] = self.cleanDenom(data)\n for i,data in enumerate(ratios[:-1]):\n if (i % 3 == 0):\n objects[i/3][\"num\"] = self.cleanNum(data)\n for o in objects:\n o[\"factorial\"] = float(o[\"num\"]) / float(o[\"denom\"])\n # now we insert the date symbol name and the factorial into the DB\n for o in objects:\n IM = InsertModel(\"jdfkasdklfj\")#tableName)\n IM.insert(\"e\", o[\"date\"])\n IM.insert(\"symbol\", symbol)\n IM.insert(\"Ratio\", o[\"factorial\"])\n\n def cleanDate(self, date):\n t = time.strptime(date, '%b %d, %Y')\n date = datetime.date(t.tm_year, t.tm_mon, t.tm_mday)\n return date\n\n def cleanDenom(self, data):\n data = int(data.split(\" : \")[1])\n return data\n\n def cleanNum(self, num):\n num = int(num)\n return num\n","sub_path":"pycharmCode/stock/scrape/Splits.py","file_name":"Splits.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"340538644","text":"#!/usr/bin/env python2\n# coding: utf-8\n\nimport copy\nimport unittest\n\nfrom pykit import utfjson\nfrom pykit import ututil\nfrom pykit.ectypes import (\n BlockDesc,\n BlockExists,\n BlockGroup,\n BlockGroupID,\n BlockID,\n BlockNotFoundError,\n BlockTypeNotSupportReplica,\n BlockTypeNotSupported,\n DriveID,\n)\n\ndd = ututil.dd\n\n_ec_config = {\n 'in_idc': [4, 2],\n 'cross_idc': [2, 1],\n 'ec_policy': 'lrc',\n 'data_replica': 3\n}\n\n_empty_group = BlockGroup({\n 'config': {\n 'in_idc': [4, 2],\n 'ec_policy': 'lrc',\n 'cross_idc': [2, 1],\n 'data_replica': 3\n },\n 'blocks': {},\n 'idcs': ['a', 'b', 'c'],\n 'block_group_id': 'g000640000000123'\n})\n\n\nclass TestBlockGroupID(unittest.TestCase):\n\n def test_new(self):\n block_group_id = 'g000640000000123'\n\n bgid = BlockGroupID(64, 123)\n self.assertEqual(block_group_id, str(bgid))\n\n bgid = BlockGroupID(block_group_id)\n self.assertEqual((64, 123), bgid.as_tuple())\n\n bgid = BlockGroupID(bgid)\n self.assertEqual((64, 123), bgid.as_tuple())\n\n def test_new_invalid(self):\n\n block_group_id_invalid = 'g00064000000012345'\n self.assertRaises(ValueError, BlockGroupID, block_group_id_invalid)\n\n def test_tostr(self):\n block_group_id = 'g000640000000123'\n bgid = BlockGroupID(block_group_id)\n self.assertEqual(block_group_id, str(bgid))\n self.assertEqual(block_group_id, '{0}'.format(bgid))\n self.assertEqual(\"'g000640000000123'\", repr(bgid))\n\n\nclass TestBlockGroup(unittest.TestCase):\n\n def setUp(self):\n self.foo_block = BlockDesc({\n 'block_id': BlockID('d0', 'g000640000000123', '0000',\n DriveID('idc000' 'c62d8736c7280002'), 1),\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n\n def test_new(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n self.assertEqual(_empty_group, g)\n\n # test lacking of arg\n self.assertRaises(TypeError, BlockGroup, block_group_id='g000640000000123', idcs=[])\n self.assertRaises(TypeError, BlockGroup, block_group_id='g000640000000123', config=_ec_config)\n self.assertRaises(TypeError, BlockGroup, idcs=[], config=_ec_config)\n\n def test_json(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n rst = utfjson.dump(g)\n expected = ('{\"config\": {\"in_idc\": [4, 2], \"ec_policy\": \"lrc\", \"cross_idc\": [2, 1], '\n '\"data_replica\": 3}, \"blocks\": {}, \"idcs\": [\"a\", \"b\", \"c\"], '\n '\"block_group_id\": \"g000640000000123\"}')\n self.assertEqual(expected, rst)\n\n loaded = BlockGroup(utfjson.load(rst))\n self.assertEqual(g, loaded)\n\n def test_new_deref_config(self):\n\n cnf = copy.deepcopy(_ec_config)\n b = BlockGroup(block_group_id='g000640000000123', config=cnf, idcs=['a', 'b', 'c'])\n\n a = copy.deepcopy(b['config'])\n b['config']['in_idc'] = [10, 11]\n self.assertNotEqual(a, b)\n\n a = copy.deepcopy(b['config'])\n b['config']['cross_idc'] = [10, 11]\n self.assertNotEqual(a, b)\n\n a = copy.deepcopy(b['config'])\n b['config']['ec_policy'] = 'foo'\n self.assertNotEqual(a, b)\n\n a = copy.deepcopy(b['config'])\n b['config']['data_replica'] = 100\n self.assertNotEqual(a, b)\n\n def test_get_block(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n block = g.get_block('0000')\n self.assertIsNone(block)\n\n block = g.get_block('9999')\n self.assertIsNone(block)\n\n with self.assertRaises(BlockNotFoundError):\n g.get_block('9999', raise_error=True)\n\n g.add_block(self.foo_block)\n block = g.get_block(self.foo_block['block_id'].block_index)\n self.assertDictEqual(self.foo_block, block)\n\n with self.assertRaises(BlockNotFoundError):\n g.get_block('0002', raise_error=True)\n\n with self.assertRaises(ValueError):\n g.get_block('d0g0006400000001230000c62d2')\n\n def test_mark_delete_block(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n g.add_block(self.foo_block)\n g.mark_delete_block('0000')\n block = g.get_block('0000')\n\n self.assertEqual(1, block['is_del'])\n self.assertRaises(BlockNotFoundError, g.mark_delete_block, '9999')\n\n def test_delete_block(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n self.assertIsNone(g.get_block('0000'))\n\n g.add_block(self.foo_block)\n self.assertIsNotNone(g.get_block('0000'))\n\n g.delete_block('0000')\n self.assertIsNone(g.get_block('0000'))\n\n g.delete_block('0000')\n self.assertIsNone(g.get_block('0000'))\n\n def test_replace_block(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n prev = g.add_block(self.foo_block)\n self.assertIsNone(prev)\n\n block = g.get_block('0000')\n self.assertEqual(0, block['is_del'])\n\n prev = g.add_block(self.foo_block, replace=True)\n self.assertEqual(self.foo_block, prev)\n\n self.assertRaises(BlockExists, g.add_block, self.foo_block)\n self.assertRaises(BlockExists, g.add_block, self.foo_block, replace=False)\n\n def test_get_free_block_index(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n g.add_block(self.foo_block)\n\n self.assertDictEqual({'a': ['0001', '0002', '0003'],\n 'b': ['0100', '0101', '0102', '0103']},\n g.get_free_block_indexes('d0'))\n\n self.assertDictEqual({'a': ['0004', '0005'],\n 'b': ['0104', '0105']},\n g.get_free_block_indexes('dp'))\n\n self.assertDictEqual({'c': ['0200', '0201', '0202', '0203'], },\n g.get_free_block_indexes('x0'))\n\n self.assertDictEqual({'c': ['0204', '0205'], },\n g.get_free_block_indexes('xp'))\n\n self.assertDictEqual(\n {\n 'a': ['0001', '0002', '0003'],\n 'b': ['0100', '0101', '0102', '0103'],\n 'c': [],\n },\n g.get_free_block_indexes('d0', get_all=True))\n\n self.assertDictEqual(\n {\n 'a': ['0004', '0005'],\n 'b': ['0104', '0105'],\n 'c': [],\n },\n g.get_free_block_indexes('dp', get_all=True))\n\n self.assertDictEqual(\n {\n 'a': [],\n 'b': [],\n 'c': ['0200', '0201', '0202', '0203'],\n },\n g.get_free_block_indexes('x0', get_all=True))\n\n self.assertDictEqual(\n {\n 'a': [],\n 'b': [],\n 'c': ['0204', '0205'],\n },\n g.get_free_block_indexes('xp', get_all=True))\n\n def test_get_block_type(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n self.assertEqual('d0', g.get_block_type('0000'))\n self.assertEqual('dp', g.get_block_type('0004'))\n self.assertEqual('d1', g.get_block_type('0006'))\n self.assertEqual('d0', g.get_block_type('0100'))\n self.assertEqual('dp', g.get_block_type('0104'))\n self.assertEqual('d1', g.get_block_type('0106'))\n self.assertEqual('x0', g.get_block_type('0200'))\n self.assertEqual('xp', g.get_block_type('0204'))\n\n self.assertRaises(BlockTypeNotSupported, g.get_block_type, '0299')\n self.assertRaises(BlockTypeNotSupported, g.get_block_type, '0900')\n\n def test_get_block_idc(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n self.assertEqual('a', g.get_block_idc('0000'))\n self.assertEqual('b', g.get_block_idc('0100'))\n self.assertEqual('c', g.get_block_idc('0200'))\n\n d0 = BlockDesc({\n 'block_id': BlockID('d0', 'g000640000000123', '0000',\n DriveID('idc000' 'c62d8736c7280002'), 1),\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n g.add_block(d0)\n self.assertEqual('a', g.get_block_idc('0000'))\n\n def test_get_replica_index_not_include_me(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n self.assertEqual(['0006', '0010'], g.get_replica_indexes('0000', include_me=False))\n self.assertEqual(['0000', '0010'], g.get_replica_indexes('0006', include_me=False))\n self.assertEqual(['0000', '0006'], g.get_replica_indexes('0010', include_me=False))\n\n with self.assertRaises(BlockTypeNotSupportReplica):\n g.get_replica_indexes('0004', include_me=False)\n\n with self.assertRaises(BlockTypeNotSupportReplica):\n g.get_replica_indexes('0204', include_me=False)\n\n def test_classify_blocks(self):\n\n gid = 'g000640000000123'\n\n g = BlockGroup(block_group_id=gid, idcs=['a', 'b', 'c'], config=_ec_config)\n\n blks = g.classify_blocks(0, only_primary=True)\n self.assertEqual([], blks['ec'] + blks['replica'] + blks['mark_del'])\n\n base_blk = BlockDesc({\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n\n ec_blk_idxes = ['0000', '0001']\n replica_blk_idxes = ['0002', '0008', '0012']\n mark_del_idxes = ['0003', '0004']\n\n for i, idx in enumerate(ec_blk_idxes + replica_blk_idxes + mark_del_idxes):\n\n typ = g.get_block_type(idx)\n\n blkid = BlockID(typ, gid, idx, DriveID('idc000' 'c62d8736c7280002'), i)\n\n blk = copy.deepcopy(base_blk)\n\n blk['block_id'] = blkid\n\n if idx in mark_del_idxes:\n blk['is_del'] = 1\n\n g.add_block(blk)\n\n for only_primary in (True, False):\n\n blks = g.classify_blocks(0, only_primary)\n\n blk_idxes = []\n\n for blk in blks['ec'] + blks['replica'] + blks['mark_del']:\n idx = BlockID(blk['block_id']).block_index\n blk_idxes.append(idx)\n\n expect_ids = copy.deepcopy(ec_blk_idxes)\n\n #'0004' in ec_blk_idxes is parity, so should not in mark_del\n if only_primary is True:\n expect_ids += replica_blk_idxes[:1] + mark_del_idxes[:1]\n else:\n expect_ids += replica_blk_idxes + mark_del_idxes[:1]\n\n self.assertEqual(expect_ids, blk_idxes)\n\n def test_get_parities(self):\n\n gid = 'g000640000000123'\n\n g = BlockGroup(block_group_id=gid, idcs=['a', 'b', 'c'], config=_ec_config)\n\n parities = g.get_parities(idc_index=0)\n self.assertEqual([], parities)\n\n base_parity = BlockDesc({\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n\n parity_idxes = ['0004', '0005']\n\n for i, idx in enumerate(parity_idxes):\n\n blkid = BlockID('dp', gid, idx, DriveID('idc000' 'c62d8736c7280002'), i)\n\n parity = copy.deepcopy(base_parity)\n\n parity['block_id'] = blkid\n\n g.add_block(parity)\n\n idxes = g.get_parity_indexes(idc_index=0)\n self.assertEqual(parity_idxes, idxes)\n\n parities = g.get_parities(idc_index=0)\n\n idxes = []\n for p in parities:\n idx = BlockID(p['block_id']).block_index\n idxes.append(idx)\n\n self.assertEqual(parity_idxes, idxes)\n","sub_path":"ectypes/test/test_block_group.py","file_name":"test_block_group.py","file_ext":"py","file_size_in_byte":11948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"378722268","text":"from bs4 import BeautifulSoup\nimport bindetector\nimport numpy as np\nimport pickle\nimport os\nimport random\nfrom sklearn.utils import shuffle\n\n\n\ndef transform_img(index, ps, size=(905, 905), data_path=\"data\", img_format=\"jpeg\", batch_path=\"batch\"):\n X = []\n keyname, fileid = index.split(\"_\")[:2]\n pic = \"{}.{}\".format(fileid.split(\".\")[0], img_format)\n path = os.path.join(data_path, keyname)\n im = bindetector.load_image(os.path.join(path, pic))\n return bindetector.processing(im, ps, size)\n\n\ndef sampling(targets, sample_size=4, batch_path=\"batch\"):\n assert len(targets['ps']) == len(targets['labels'])\n data = [p for p, label in zip(targets['ps'], targets['labels']) if label is True]\n assert len(data) == 1\n tmp_targets = shuffle(list(zip(targets['ps'], targets['labels'])))\n data += [p for p, label in tmp_targets if label is False][:sample_size]\n return data\n\n\ndef get_indices(batch_path=\"batch\"):\n return [f for f in os.listdir(batch_path) if f.endswith(\"pkl\")]\n \n\ndef generate_data(indices, size=(905, 905), data_path=\"data\", batch_path=\"batch\", sample_size=4, batch_size=5):\n while(True):\n X = []\n labels = []\n for index in shuffle(indices)[:batch_size]:\n with open(os.path.join(batch_path, index), \"rb\") as f:\n targets = pickle.load(f)\n data = sampling(targets, sample_size, batch_path=batch_path)\n label = [[1, 0]] + [[0, 1] for _ in data[1:]]\n labels += label\n assert len(data) == len(label)\n X += transform_img(index, data, size=size, data_path=data_path, batch_path=batch_path)\n yield np.array(X), np.array(labels)\n\n","sub_path":"model4/scripts/dataprocessor.py","file_name":"dataprocessor.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"139281598","text":"\"\"\"empty message\n\nRevision ID: ce163c991d48\nRevises: 20e89eb059bf\nCreate Date: 2019-02-28 22:12:06.143176\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ce163c991d48'\ndown_revision = '20e89eb059bf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('participants', sa.Column('tshirt', sa.String(length=100), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('participants', 'tshirt')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ce163c991d48_.py","file_name":"ce163c991d48_.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"247565782","text":"from .base import FunctionalTest\n\n\nclass LayoutAndStylingTest(FunctionalTest):\n\n def test_layout_and_styling(self):\n # Edith goes to the homepage\n self.browser.get(self.server_url)\n self.browser.set_window_size(1024, 768)\n # She notices the inbox is nicely centered\n inputbox = self.get_item_input_box()\n self.assertAlmostEqual(\n inputbox.location['x'] + inputbox.size['width'] / 2,\n 512,\n delta=5\n )\n # She starts a new list and notices the input is nicely cenred ther too\n inputbox.send_keys('testing\\n')\n inputbox = self.get_item_input_box()\n self.assertAlmostEqual(\n inputbox.location['x'] + inputbox.size['width'] / 2,\n 512,\n delta=5\n )\n\n\nif __name__ == '__main__':\n unittest.main(warnings='ignore')\n","sub_path":"functional_tests/test_layout_and_styling.py","file_name":"test_layout_and_styling.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"359510734","text":"import datetime as dt\nimport poes_utils as pu\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport numpy as np\nimport os\nimport netCDF4 as nc4\nimport timeit\nimport numpy.ma as ma\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\ndef valid_date(s):\n '''------------------------------------------------------------------\n PURPOSE: To check that a valid date is entered as an input\n :params s (str) a date in the format Y-m-d or Y-m-d H:M:S '''\n\n try:\n test = dt.datetime.strptime(s, \"%Y-%m-%d\")\n return test\n except:\n pass\n try:\n test = dt.datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\")\n return test\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(s)\n raise argparse.ArgumentTypeError(msg)\n\ndef make_training_data_vars(sdate,edate,satlist, varlist, cdf_dir,Lbin_dir, neur_dir, reflon,syear_all,eyear_all):\n '''\n PURPOSE: To create a datafile of electron flux mapped to one longitude with SAR to be used\n for developing the SHELLS neural network\n\n INPUTS:\n :param: sdate(datetime)- time to start processing data\n :param: edate(datetime)- time to end processing data\n :param: satlist(list(str))- i.e. ['n15','n18','n19','m01','m02']\n :param: varlist(list(str))- variables to process i.e. ['mep_ele_tel90_flux_e1', 'mep_ele_tel90_flux_e2',\n 'mep_ele_tel90_flux_e3', 'mep_ele_tel90_flux_e4']\n :param: cdf_dir(str) directory where the cdf files are\n :param: Lbin_dir(str) directory where the Lbin data files are\n :param: neur_dir(str) directory for the output files\n :param: reflon(int) E longitude to map to (degrees)\n :param: syear_all (int) The start year of the accumulated cdf file\n :param: eyear_all The end year of the accumulated cdf file\n\n OUTPUTS: monthly pickle files with the SAR modified data to be used by the SHELLS neural network\n\n USAGE(command line)\n python make_training_data.py -s 2013-01-01 -e 2013-05-01 -sats n15 n18 n19 m01 m02 -cd ./cdfdata/\n -ld ./Lbindat/ -nd ./neural_data/ -l 20 -sy 2015 -ey 2018:\n '''\n\n # These are the electron flux variables that have the percentile data\n evars = varlist\n\n svars = list()\n for var in evars:\n # These have the flux for each percentile\n svars.append(var+'_sar')\n\n # This is expected #orbits per day * Lpasses per orbit* # sats *days\n # that is used to estimate the len of array needed for make month long files\n flen = 20*4*len(satlist)*(31)\n\n Lbins = np.arange(1, 8.25, .25) # These are the Lbins\n cols = list()\n\n # make a list of columns for the output pickle file\n # The file will have columns with the SAR flux for each variable and Lbin\n for ecols in np.arange(0,len(varlist)):\n for lcols in np.arange(len(Lbins)):\n cols.append(varlist[ecols]+ ' '+str(Lbins[lcols]/4))\n\n # fin_dat will be fluxE1 all Lbins, flux E2 all Lbins, flux e3 Lbins, flux e4 at all Lbins\n # For each L pass\n # And then a time vector with the time at the midpoint of each L pass\n\n # This sets everything to ref longitude\n ref_ind = int(np.floor(reflon/10))\n\n # All data will be referenced back to m02\n satref = 'm02'\n\n while sdate = 0] = 0 # Northern hemi\n hemi[hemi < 0] = 1 # Southern hemi\n hemi1=hemi.astype(int)\n # Get the NS direction\n NSco = data['NS'][:]\n NSco1 =NSco.astype(int)\n lon = np.floor(data['lon'][:]/10)\n lon1=lon.astype(int)\n lon1[lon1>35]=0\n Kp = np.floor(data['Kp*10'][:]/10)\n Kp1 = Kp.astype(int)\n # Need to make Kp into a vector\n Kpvec = np.tile(Kp1,(len(Lbins),1)).T\n # Need to make an array of Ls\n Ls = np.zeros((len(data['time_med'][:]),len(Lbins)),dtype = int)\n\n for lco in range(0,len(Lbins)):\n Ls[:,lco] = Ls[:,lco]+lco\n\n\n nan_inds = np.where((fluxbin1 < -10) | (hemi1 < -10) | (lon1 < -10) | (Kpvec < -10) | (NSco1 < -10))\n\n # Set these to zero for now so that it is a valid index\n # but flag it later\n fluxbin1[nan_inds] = 0\n hemi1[nan_inds] = 0\n lon1[nan_inds] = 0\n NSco1[nan_inds] = 0\n Kpvec[nan_inds] = 0\n\n # Get the percentile that corresponds to each flux for the current sat\n per1 = sar[hemi1, NSco1, Ls, lon1, Kpvec, fluxbin1]\n perbin1 = np.round(per1 * 100).astype(int)\n\n # In northern some sar dat is nan\n per_nan = np.where(perbin1<-10)[0]\n perbin1[per_nan] = 0\n\n # Get the flux at the ref satellite for the measured percentile\n fluxval = sarout[1,1,Ls,ref_ind,Kpvec,perbin1]\n # Flag the bad values again\n fluxval[nan_inds] = -1\n fluxval[per_nan] = -1\n\n dlen = len(data['time_med'][:])\n # Set the output data to the reference value\n sar_dat[indco:indco + dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = fluxval\n # Save the no sar data for comparison\n nosar_dat[indco:indco + dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = np.log10(data[evars[eco]][:])\n\n lat_dat[indco:indco+dlen,(eco * len(Lbins)):(eco * len(Lbins))+len(Lbins) ] = hemi[:]\n full_lat[indco:indco+dlen,(eco * len(Lbins)):(eco * len(Lbins))+len(Lbins) ] = data['lat'][:]\n lon_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = data['lon'][:]\n per_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = perbin1\n way_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = data['NS'][:]\n sat_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = sco+1\n\n dtimes = pu.unix_time_ms_to_datetime(data['time_med'][:])\n sar_time.extend(dtimes.tolist())\n\n indco=indco+dlen\n else:\n print('No datafile')\n\n # Now sort the data by time after going through all the sats\n tinds= np.argsort(sar_time)\n new_time = [sar_time[x] for x in tinds]\n\n # This orders data according to the new sorted time\n # and saves the month file\n\n new_dat = sar_dat[tinds[0:len(new_time)], :] # The re-ordered and mapped data\n new_nosar_dat = nosar_dat[tinds[0:len(new_time)], :] # The re-order but not mapped data\n temp_dat = 1.0*new_dat\n\n # Now fill in holes with the last value.\n # This could be a problem if the first row has missing data\n vec_last = 1.0*new_dat[0,:]\n\n # find the columns where the first record has holes\n # and fill them with the closest value\n mvals = np.where((np.isnan(vec_last)) | (np.isinf(vec_last)) | (vec_last==-1))[0]\n ico = 1\n if len(mvals)>0:\n for mco in mvals:\n fillval = vec_last[mco]\n ico=1\n while ( (fillval<0) & (ico<50) ):\n fillval = new_dat[ico,mco]\n ico = ico+1\n vec_last[mco] = fillval\n\n new_dat[0,:] = vec_last\n for ico in np.arange(1,len(new_time)):\n vec = 1.0*new_dat[ico,:]\n vec[np.where((np.isnan(vec)) | (np.isinf(vec)) | (vec ==-1))] = vec_last[np.where((np.isnan(vec)) | (np.isinf(vec)) | (vec==-1) )]\n new_dat[ico,:] = vec\n vec_last = vec\n\n # --------------- Plot the monthly data -------------------------\n # Make a list of dates every 5 days for plotting\n day_inds = list()\n date_list = list()\n for days in np.arange(1,30,5):\n dmin = [np.abs( (x-dt.datetime(sdate.year,sdate.month,days)).total_seconds() ) for x in new_time]\n close_ind = dmin.index(min(dmin))\n day_inds.append(close_ind)\n date_list.append(dt.datetime(sdate.year, sdate.month, days).strftime(\"%m/%d/%Y\"))\n #sval = 300\n #lval = 400\n sval = 0\n lval = len(new_time)\n\n # Make a plot for each variable\n for eco in range(0,len(varlist)):\n fignum = plt.figure(eco+1)\n # plot the mapped data\n plt.subplot(5,1,1)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose((new_dat[sval:lval,eco*len(Lbins):(eco+1)*len(Lbins)])), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 7 )\n plt.title(varlist[eco])\n plt.colorbar()\n #plot the unmapped data\n plt.subplot(5, 1, 2)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose((new_nosar_dat[sval:lval,eco*len(Lbins):(eco+1)*len(Lbins)])), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 7 )\n #plt.xticks(day_inds, date_list)\n plt.colorbar()\n\n #plot the NS direction\n plt.subplot(5, 1, 3)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose(way_dat[sval:lval,eco*len(Lbins):(eco+1)*len(Lbins)]), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 3 )\n #plt.xticks(day_inds, date_list)\n plt.colorbar()\n\n # Plot the longitude\n plt.subplot(5, 1, 4)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose(lon_dat[tinds[sval:lval],eco*len(Lbins):(eco+1)*len(Lbins)]), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 360 )\n plt.colorbar()\n\n # plot the percentile\n plt.subplot(5, 1, 5)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins/4, ma.masked_less(np.transpose(per_dat[tinds[sval:lval],eco*len(Lbins):(eco+1)*len(Lbins)]),1), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 75 )\n plt.colorbar()\n plt.xticks(day_inds, date_list)\n\n #plt.savefig( 'neural_data/'+ 'dat' + satlist[0]+varlist[eco]+str(sdate.year) +str(sdate.month).zfill(2) + '.png')\n plt.savefig(neur_dir+ 'allsats_wsarsV5'+varlist[eco]+str(sdate.year) +str(sdate.month).zfill(2) + '.png')\n plt.close(fignum)\n\n fignum = plt.figure(eco + 10)\n # This figure will compare fluxes at some Lvalues for SAR and no SAR\n\n Lco = 1\n for Lp in [2,4,6]:\n plt.title(varlist[eco])\n fig = plt.subplot(3,1,Lco)\n stemp = new_dat[sval:lval, eco * len(Lbins)+Lp ]\n ginds = np.where(stemp>0)[0]\n nstemp = temp_dat[sval:lval, eco * len(Lbins) + Lp]\n ttime = [new_time[x] for x in ginds]\n plt.plot(ttime, nstemp[ginds], 'b') # L=2 SAR\n plt.plot(ttime,stemp[ginds],'r') # L=2 SAR\n #plt.plot(ttime, nstemp[ginds], 'b') # L=2 SAR\n Lco = Lco+1\n plt.ylim(2,6)\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))\n plt.gcf().autofmt_xdate()\n plt.savefig(\n neur_dir + '/allsats_wsarsV5' + varlist[eco] + str(sdate.year) + str(sdate.month).zfill(\n 2) + 'lines.png')\n plt.close(fignum)\n\n\n datafile = neur_dir+'/allsats_wsarsV5' + str(sdate.year) + str(sdate.month).zfill(2) + '.p'\n with open(datafile, 'wb') as f: # Python 3: open(..., 'wb')\n pickle.dump([cols, new_time, new_dat], f)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n '''\n PURPOSE: To create a datafile of electron flux mapped to one longitude with SAR to be used\n for developing the SHELLS neural network\n\n INPUTS:\n :param: startdate - time to start processing data (ex 2013-01-01)\n :param: enddate - time to end processing data (ex 2014-01-01)\n :param: sats - i.e. n15 n18 n19 m01 m02'\n :param: vars- variables to process i.e. mep_ele_tel90_flux_e1 mep_ele_tel90_flux_e2\n mep_ele_tel90_flux_e3 mep_ele_tel90_flux_e4\n :param: cdfloc directory where the cdf files are\n :param: Lbinloc directory where the Lbin data files are\n :param: neurloc directory for the output files\n :param: l longitude bin to map to\n\n OUTPUTS: monthly pickle files with the SAR modified data to be used by the SHELLS neural network\n \n USAGE(command line)\n python make_training_data.py -s 2013-01-01 -e 2013-05-01 -sats n15 n18 n19 m01 m02 -cd ./cdfdata\n -ld ./Lbindat -nd ./neural_data -r 20:\n '''\n parser = argparse.ArgumentParser('This creates new datafiles binned by L')\n #\n parser.add_argument('-s', \"--startdate\",\n help=\"The Start Date - format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS \",\n required=True,\n default = None,\n type=valid_date)\n parser.add_argument('-e', \"--enddate\",\n help=\"The Start Date - format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS \",\n required=True,\n default = None,\n type=valid_date)\n parser.add_argument('-sats', \"--satlist\",\n help=\"A list of satellite data to get (i.e. -sat n15 n18) \",\n required=False,\n default = ['n15','n18','n19','m01','m02'],nargs='+')\n parser.add_argument('-v', \"--vars\",\n help=\"data variables to use\",\n required=False, default=['mep_ele_tel90_flux_e1', 'mep_ele_tel90_flux_e2',\n 'mep_ele_tel90_flux_e3', 'mep_ele_tel90_flux_e4'], nargs='+')\n parser.add_argument('-cd', \"--cdfloc\",\n help=\"The location of the cdf data\",\n required=False, default=os.getcwd() + '/cdfdata/')\n parser.add_argument('-ld', \"--Lbinloc\",\n help=\"The location of the Lbin data\",\n required=False, default=os.getcwd() + '/Lbindata/')\n parser.add_argument('-nd', \"--neurloc\",\n help=\"The output directory of data\",\n required=False, default=os.getcwd() + '/neural_data/')\n parser.add_argument('-l', \"--reflon\",\n help=\"longitude to map to\",\n required=False,\n default = 20,\n type=int)\n parser.add_argument('-sy', \"--startyear\",\n help=\"start year for the cdf file\",\n required=False,\n default = 2014,\n type=int)\n parser.add_argument('-ey', \"--endyear\",\n help=\"start year for the cdf file\",\n required=False,\n default = 2018,\n type=int)\n\n args = parser.parse_args()\n\n x = make_training_data_vars(args.startdate,args.enddate,args.satlist, args.vars, args.cdfloc, args.Lbinloc,\n args.neurloc, args.reflon, args.startyear, args.endyear)","sub_path":"src/SHELLS/make_training_data_vars.py","file_name":"make_training_data_vars.py","file_ext":"py","file_size_in_byte":18778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"550488898","text":"#!/usr/bin/env python3\n\n# https://adventofcode.com/2020/day/22\n\nimport os\nimport sys\n\nwith open(os.path.join(sys.path[0], \"input.txt\"), \"r\") as file:\n decks = [line.rstrip(\"\\n\") for line in file.read().strip().split('\\n\\n')]\n p1, p2 = [[int(card) for card in deck.split(\"\\n\")[1:]] for deck in decks]\n p1_copy = p1.copy()\n p2_copy = p2.copy()\n\n\ndef combat(p1_deck, p2_deck):\n while len(p1_deck) > 0 and len(p2_deck) > 0:\n first, second = p1.pop(0), p2.pop(0)\n if first > second:\n p1.extend([first, second])\n else:\n p2.extend([second, first])\n return p1_deck if len(p1_deck) > 0 else p2_deck\n\n\ndef recursive_combat(p1_deck, p2_deck, remaining):\n while len(p1_deck) > 0 and len(p2_deck) > 0:\n if (tuple(p1_deck), tuple(p2_deck)) in remaining:\n return 1, p1_deck\n\n remaining.add((tuple(p1_deck), tuple(p2_deck)))\n\n first, second = p1_deck.pop(0), p2_deck.pop(0)\n if len(p1_deck) >= first and len(p2_deck) >= second:\n winner, _ = recursive_combat(p1_deck[:first], p2_deck[:second], set())\n else:\n winner = 1 if first > second else 0\n\n if winner == 1:\n p1_deck.extend([first, second])\n else:\n p2_deck.extend([second, first])\n return (1, p1_deck) if len(p1_deck) > 0 else (0, p2_deck)\n\n\ndef get_output_1():\n count = 0\n for index, value in enumerate(combat(p1, p2)[::-1]):\n count += (index + 1) * value\n print(count)\n\n\ndef get_output_2():\n count = 0\n for index, value in enumerate(recursive_combat(p1_copy, p2_copy, set())[1][::-1]):\n count += (index + 1) * value\n print(count)\n\n\nget_output_1()\nget_output_2()\n","sub_path":"22_crab-combat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"586808318","text":"#!/usr/bin/env python\n\n\"\"\" MultiQC module to parse output from Cutadapt \"\"\"\n\nfrom __future__ import print_function\nimport io\nimport logging\nimport os\nimport re\n\nfrom multiqc import config, BaseMultiqcModule\n\n# Initialise the logger\nlog = logging.getLogger(__name__)\n\nclass MultiqcModule(BaseMultiqcModule):\n\n def __init__(self):\n\n # Initialise the parent object\n super(MultiqcModule, self).__init__(name='Cutadapt', anchor='cutadapt',\n href='https://code.google.com/p/cutadapt/', \n info=\"is a tool to find and remove adapter sequences, primers, poly-A\"\\\n \"tails and other types of unwanted sequence from your high-throughput\"\\\n \" sequencing reads.\")\n\n # Find and load any Cutadapt reports\n self.cutadapt_data = dict()\n self.cutadapt_length_counts = dict()\n self.cutadapt_length_exp = dict()\n self.cutadapt_length_obsexp = dict()\n \n for f in self.find_log_files(contents_match='This is cutadapt', filehandles=True):\n self.parse_cutadapt_logs(f) \n\n if len(self.cutadapt_data) == 0:\n log.debug(\"Could not find any reports in {}\".format(config.analysis_dir))\n raise UserWarning\n\n log.info(\"Found {} reports\".format(len(self.cutadapt_data)))\n\n # Write parsed report data to a file\n self.write_csv_file(self.cutadapt_data, 'multiqc_cutadapt.txt')\n\n self.sections = list()\n\n # Basic Stats Table\n # Report table is immutable, so just updating it works\n self.cutadapt_general_stats_table()\n\n # Trimming Length Profiles\n # Only one section, so add to the intro\n self.intro += self.cutadapt_length_trimmed_plot()\n\n\n def parse_cutadapt_logs(self, f):\n \"\"\" Go through log file looking for cutadapt output \"\"\"\n fh = f['f']\n regexes = {\n 'bp_processed': \"Total basepairs processed:\\s*([\\d,]+) bp\",\n 'bp_written': \"Total written \\(filtered\\):\\s*([\\d,]+) bp\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'r_processed': \"Total reads processed:\\s*([\\d,]+)\",\n 'r_with_adapters': \"Reads with adapters:\\s*([\\d,]+)\"\n }\n s_name = None\n for l in fh:\n # New log starting\n if l.startswith('This is cutadapt'):\n s_name = None\n \n # Get sample name from end of command line params\n if l.startswith('Command line parameters'):\n s_name = l.split()[-1]\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name in self.cutadapt_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.cutadapt_data[s_name] = dict()\n self.cutadapt_length_counts[s_name] = dict()\n self.cutadapt_length_exp[s_name] = dict()\n self.cutadapt_length_obsexp[s_name] = dict()\n \n if s_name is not None:\n # Search regexes for overview stats\n for k, r in regexes.items():\n match = re.search(r, l)\n if match:\n self.cutadapt_data[s_name][k] = int(match.group(1).replace(',', ''))\n\n if 'length' in l and 'count' in l and 'expect' in l:\n # Nested loop to read this section while the regex matches\n for l in fh:\n r_seqs = re.search(\"^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\", l)\n if r_seqs:\n a_len = int(r_seqs.group(1))\n self.cutadapt_length_counts[s_name][a_len] = int(r_seqs.group(2))\n self.cutadapt_length_exp[s_name][a_len] = float(r_seqs.group(3))\n if float(r_seqs.group(3)) > 0:\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2)) / float(r_seqs.group(3))\n else:\n # Cheating, I know. Infinity is difficult to plot.\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2))\n else:\n break\n \n # Calculate a few extra numbers of our own\n for s_name in self.cutadapt_data.keys():\n if 'bp_processed' in self.cutadapt_data[s_name] and 'bp_written' in self.cutadapt_data[s_name]:\n self.cutadapt_data[s_name]['percent_trimmed'] = (float(self.cutadapt_data[s_name]['bp_processed'] - self.cutadapt_data[s_name]['bp_written']) / self.cutadapt_data[s_name]['bp_processed']) * 100\n\n\n\n def cutadapt_general_stats_table(self):\n \"\"\" Take the parsed stats from the Cutadapt report and add it to the\n basic stats table at the top of the report \"\"\"\n\n headers = {}\n headers['percent_trimmed'] = {\n 'title': 'Trimmed',\n 'description': '% Total Base Pairs trimmed',\n 'max': 30,\n 'min': 0,\n 'scale': 'RdYlBu-rev',\n 'format': '{:.1f}%'\n }\n self.general_stats_addcols(self.cutadapt_data, headers)\n \n\n def cutadapt_length_trimmed_plot (self):\n \"\"\" Generate the trimming length plot \"\"\"\n html = '

    This plot shows the number of reads with certain lengths of adapter trimmed. \\n\\\n Obs/Exp shows the raw counts divided by the number expected due to sequencing errors. A defined peak \\n\\\n may be related to adapter length. See the \\n\\\n cutadapt documentation \\n\\\n for more information on how these numbers are generated.

    '\n \n pconfig = {\n 'id': 'cutadapt_plot',\n 'title': 'Lengths Trimmed',\n 'ylab': 'Observed / Expected',\n 'xlab': 'Length Trimmed (bp)',\n 'xDecimals': False,\n 'ymin': 0,\n 'tt_label': '{point.x} bp trimmed: {point.y:.0f}',\n 'data_labels': [{'name': 'Obs/Exp', 'ylab': 'Observed / Expected'},\n {'name': 'Counts', 'ylab': 'Count'}]\n }\n \n html += self.plot_xy_data([self.cutadapt_length_obsexp, self.cutadapt_length_counts], pconfig)\n \n return html\n","sub_path":"multiqc/modules/cutadapt/cutadapt.py","file_name":"cutadapt.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"428974773","text":"import uuid\nimport logging\n\nfrom typing import Callable, List, Optional, Type\nfrom mongobasket.events import Event\n\n\ndef applies(event: Event) -> Callable:\n \"\"\"\n This decorator just adds a new field to the func object\n `_handles` which describes the event type handled by\n the func\n \"\"\"\n\n def wrapper(func: Type) -> Type:\n func._applies = event\n\n return func\n\n return wrapper\n\n\nclass EventRegistry(type):\n \"\"\"\n Extends the `type` metaclass to add an event registry to\n classes.\n\n When initialising a new class, we iterate the members of\n the class looking for a _handles property and add them\n to a dict so we can do event dispatch later.\n \"\"\"\n\n def __new__(mcs, name, bases, namespace, **_): # type: ignore\n result = type.__new__(mcs, name, bases, dict(namespace)) # type: ignore # noqa: E501\n result._handlers = { # type: ignore\n value._applies: value\n for value in namespace.values()\n if hasattr(value, \"_applies\") # noqa: E501\n }\n # Extend handlers with the values from the inheritance chain\n\n for base in bases:\n if base._handlers:\n for handler in base._handlers:\n result._handlers[handler] = base._handlers[handler] # type: ignore # noqa: E501\n\n return result\n\n\nclass Aggregate(metaclass=EventRegistry):\n \"\"\"\n Base class for event sourced aggregates\n \"\"\"\n\n @classmethod\n def get_stream(cls, id: uuid.UUID) -> str:\n return cls.__name__.lower() + \"-\" + str(id)\n\n def __init__(self, events: Optional[List] = None):\n self.events: List = events or []\n self.new_events: List = []\n self.replay()\n\n def replay(self) -> None:\n for e in self.events:\n self.apply(e)\n\n def apply(self, e: Event) -> None:\n handler = self._handlers.get(type(e)) # type: ignore\n\n if handler:\n handler(self, e)\n else:\n logging.warning(f\"no handler found for event {e}\")\n\n def raise_event(self, e: Event) -> None:\n self.events.append(e)\n self.new_events.append(e)\n self.apply(e)\n","sub_path":"mongobasket/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"409725088","text":"#file: knowledge.py\n#Copyright (C) 2005,2006,2008 Evil Mr Henry, Phil Bordelon, and FunnyMan3595\n#This file is part of Endgame: Singularity.\n\n#Endgame: Singularity is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#Endgame: Singularity is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License\n#along with Endgame: Singularity; if not, write to the Free Software\n#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n#This file is used to display the knowledge lists.\n\nimport pygame\nfrom code import g\nfrom code.graphics import text, button, dialog, widget, constants, listbox, g as gg\n\n\nclass KnowledgeScreen(dialog.Dialog):\n def __init__(self, *args, **kwargs):\n super(KnowledgeScreen, self).__init__(*args, **kwargs)\n\n self.knowledge_type_list = (\"Techs\", \"Items\", \"Concepts\")\n self.cur_knowledge_type = \"\"\n self.cur_knowledge = None\n self.knowledge_inner_list = ()\n self.knowledge_inner_list_key = ()\n self.cur_focus = 0\n\n self.knowledge_choice = \\\n listbox.UpdateListbox(self, (0.05, .18), (.21, .25),\n list=self.knowledge_type_list,\n update_func=self.set_knowledge_type)\n\n self.knowledge_inner = \\\n listbox.UpdateListbox(self, (.30, .18), (.21, .25),\n list=self.knowledge_inner_list,\n update_func=self.set_knowledge)\n\n self.description_pane = \\\n widget.BorderedWidget(self, (0.55, 0), (0.40, 0.7),\n anchor = constants.TOP_LEFT)\n\n self.back_button = button.ExitDialogButton(self, (0.17, 0.46), (-.3, -.1),\n anchor=constants.TOP_LEFT,\n text=\"BACK\", hotkey=\"b\")\n\n #Set up the key handling.\n #This is likely not the best way to do it.\n\n self.remove_key_handler(pygame.K_UP, self.knowledge_choice.got_key)\n self.remove_key_handler(pygame.K_DOWN, self.knowledge_choice.got_key)\n self.remove_key_handler(pygame.K_PAGEUP, self.knowledge_choice.got_key)\n self.remove_key_handler(pygame.K_PAGEDOWN, self.knowledge_choice.got_key)\n\n self.remove_key_handler(pygame.K_UP, self.knowledge_inner.got_key)\n self.remove_key_handler(pygame.K_DOWN, self.knowledge_inner.got_key)\n self.remove_key_handler(pygame.K_PAGEUP, self.knowledge_inner.got_key)\n self.remove_key_handler(pygame.K_PAGEDOWN, self.knowledge_inner.got_key)\n\n self.add_key_handler(pygame.K_UP, self.key_handle)\n self.add_key_handler(pygame.K_DOWN, self.key_handle)\n self.add_key_handler(pygame.K_LEFT, self.key_handle)\n self.add_key_handler(pygame.K_RIGHT, self.key_handle)\n\n #custom key handler.\n def key_handle(self, event):\n if event.type != pygame.KEYDOWN: return\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n self.cur_focus = (self.cur_focus + 1) % 2\n else:\n if self.cur_focus == 0:\n self.knowledge_choice.got_key(event)\n elif self.cur_focus == 1:\n self.knowledge_inner.got_key(event)\n\n #fill the right-hand listbox\n def set_inner_list(self, item_type):\n if item_type == \"Techs\":\n items = [tech for tech in g.techs.values() if tech.available()]\n elif item_type == \"Concepts\":\n items = [ [item[1][0], item[0]] for item in g.help_strings.items()]\n items.sort()\n else:\n items = [item for item in g.items.values()\n if item.available()]\n\n if item_type != \"Concepts\":\n items = [ [item.name, item.id ] for item in items]\n items.sort()\n\n return_list1 = []\n return_list2 = []\n for name, id in items:\n return_list1.append(id)\n return_list2.append(name)\n return return_list1, return_list2\n\n #Make sure the left listbox is correct after moving around.\n def set_knowledge_type(self, list_pos):\n if getattr(self, \"knowledge_choice\", None) is None:\n self.knowledge_inner_list_key, self.knowledge_inner_list = \\\n self.set_inner_list(self.cur_knowledge_type)\n return # Not yet initialized.\n prev_know = self.cur_knowledge_type\n if list_pos == -1:\n prev_know = \"\"\n list_pos = 0\n if 0 <= list_pos < len(self.knowledge_choice.list):\n self.cur_knowledge_type = self.knowledge_choice.list[list_pos]\n if prev_know != self.cur_knowledge_type:\n self.knowledge_inner_list_key, self.knowledge_inner.list = \\\n self.set_inner_list(self.cur_knowledge_type)\n self.knowledge_inner.list_pos = 0\n self.set_knowledge(0)\n\n #Make sure the right-hand listbox is correct.\n def set_knowledge(self, list_pos):\n if getattr(self, \"knowledge_inner\", None) is None:\n return # Not yet initialized.\n prev_know = self.cur_knowledge\n if 0 <= list_pos < len(self.knowledge_inner.list):\n self.cur_knowledge = self.knowledge_inner.list[list_pos]\n if prev_know != self.cur_knowledge:\n self.show_info(self.cur_knowledge_type,\n self.knowledge_inner_list_key[list_pos])\n\n #print information to the right.\n def show_info(self, knowledge_type, knowledge_key):\n desc_text = \"\"\n\n if knowledge_type == \"Concepts\":\n desc_text = g.help_strings[knowledge_key][0] + \"\\n\\n\" + \\\n g.help_strings[knowledge_key][1]\n if knowledge_type == \"Techs\":\n desc_text = g.techs[knowledge_key].name + \"\\n\\n\"\n #Cost\n if not g.techs[knowledge_key].done:\n desc_text += \"Research Cost:\\n\" + \\\n g.to_money(g.techs[knowledge_key].cost_left[0])+\" Money, \"\n desc_text += g.to_cpu(g.techs[knowledge_key].cost_left[1]) + \" CPU\\n\"\n\n if g.techs[knowledge_key].danger == 0:\n desc_text += \"Study anywhere.\"\n elif g.techs[knowledge_key].danger == 1:\n desc_text += \"Study underseas or farther.\"\n elif g.techs[knowledge_key].danger == 2:\n desc_text += \"Study off-planet.\"\n elif g.techs[knowledge_key].danger == 3:\n desc_text += \"Study far away from this planet.\"\n elif g.techs[knowledge_key].danger == 4:\n desc_text += \"Do not study in this dimension.\"\n\n else: desc_text += \"Research complete.\"\n\n desc_text += \"\\n\\n\"+g.techs[knowledge_key].description\n\n if g.techs[knowledge_key].done:\n desc_text += \"\\n\\n\"+g.techs[knowledge_key].result\n\n if knowledge_type == \"Items\":\n desc_text = g.items[knowledge_key].name + \"\\n\\n\"\n #Building cost\n desc_text += \"Building Cost:\\n\"\n desc_text += g.to_money(g.items[knowledge_key].cost[0])+\" Money, \"\n desc_text += g.to_time(g.items[knowledge_key].cost[2]) + \"\\n\"\n\n #Quality\n if g.items[knowledge_key].item_type == \"cpu\":\n desc_text += \"CPU per day: \"\n desc_text += str(g.items[knowledge_key].item_qual)\n elif g.items[knowledge_key].item_type == \"reactor\":\n desc_text += \"Detection chance reduction: \"\n desc_text += g.to_percent(g.items[knowledge_key].item_qual)\n elif g.items[knowledge_key].item_type == \"network\":\n desc_text += \"CPU bonus: \"\n desc_text += g.to_percent(g.items[knowledge_key].item_qual)\n elif g.items[knowledge_key].item_type == \"security\":\n desc_text += \"Detection chance reduction: \"\n desc_text += g.to_percent(g.items[knowledge_key].item_qual)\n\n desc_text += \"\\n\\n\"+g.items[knowledge_key].description\n\n text.Text(self.description_pane, (0, 0), (-1, -1), text=desc_text,\n background_color=gg.colors[\"dark_red\"], text_size=20,\n align=constants.LEFT, valign=constants.TOP,\n borders=constants.ALL)\n\n\n def show(self):\n self.set_knowledge_type(-1)\n self.knowledge_choice.list_pos = 0\n self.knowledge_inner.list_pos = 0\n return super(KnowledgeScreen, self).show()\n\n\n","sub_path":"killallhumans/code/screens/knowledge.py","file_name":"knowledge.py","file_ext":"py","file_size_in_byte":8938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"491637455","text":"\r\n# O(n2)\r\n# def traprain(arr,n):\r\n#\r\n# res = 0\r\n# for i in range(1,n-1):\r\n# left = arr[i]\r\n# for j in range(i):\r\n# left = max(left,arr[j])\r\n#\r\n#\r\n# right = arr[i]\r\n# for j in range(i+1,n):\r\n# right = max(right,arr[j])\r\n#\r\n# res = res + (min(left,right)-arr[i])\r\n#\r\n# return res\r\n#\r\n#\r\n#\r\n# arr = [6,9,9]\r\n# n = len(arr)\r\n# print(traprain(arr,n))\r\n\r\n#O(n)\r\n\r\n# Python program to find maximum amount of water that can\r\n# be trapped within given set of bars.\r\n\r\ndef findWater(arr, n):\r\n left = [0]*n\r\n right = [0]*n\r\n\r\n water = 0\r\n\r\n left[0] = arr[0]\r\n for i in range( 1, n):\r\n left[i] = max(left[i-1], arr[i])\r\n\r\n right[n-1] = arr[n-1]\r\n for i in range(n-2, -1, -1):\r\n right[i] = max(right[i + 1], arr[i]);\r\n\r\n for i in range(0, n):\r\n water += min(left[i], right[i]) - arr[i]\r\n\r\n return water\r\n\r\n\r\n\r\narr = [3,0,0,2,0,4]\r\nn = len(arr)\r\nprint(findWater(arr, n))\r\n\r\n","sub_path":"Array/22 Trapping Rain Water.py","file_name":"22 Trapping Rain Water.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"370120155","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 30 14:42:15 2020\n\n@author: AmP\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.patches as pat\n\nfrom Src import calibration\nfrom Src import kin_model\nfrom Src import roboter_repr\nfrom Src import inverse_kinematics\nfrom Src import save as my_save\nfrom Src import load\n\n\ndef rotate(vec, theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.r_[c*vec[0]-s*vec[1], s*vec[0]+c*vec[1]]\n\n\n# rot: x = cos(a)*x - sin(a)*y\n# y = sin(a)*x + cos(a)*y\n\n\ndef load_data(path, sets, raw=False):\n dataBase = []\n# xscale = 145./1000 # 1000px -> 145cm\n# xshift = -22 # cm\n# yshift = -63 # cm\n xscale = 112./1000 # after changing resolution of RPi\n xshift = -12 - 50 # cm\n yshift = -45 - 20 # cm\n eps_0 = 90 # deg value eps meas is shifted to at start idx\n\n for exp in sets:\n data = load.read_csv(path+\"{}.csv\".format(exp))\n if raw:\n dataBase.append(data)\n else:\n try:\n start_idx = data['f0'].index(1) # upper left foot attached 1sttime\n except ValueError: # no left foot is fixed\n start_idx = 0\n\n # correction\n start_time = data['time'][start_idx]\n\n # shift time acis\n data['time'] = \\\n [round(data_time - start_time, 3) for data_time in data['time']]\n for key in data:\n if key[0] in ['x', 'y']:\n shift = xshift if key[0] == 'x' else yshift\n data[key] = [i*xscale + shift for i in data[key]]\n if key == 'eps':\n data['eps'] = [np.mod(e+180, 360)-180+eps_0 for e in data['eps']]\n\n # shift eps to remove jump\n last_eps = eps_0\n corr_times = 1\n correct_direction = 1\n for idx in range(0, len(data['eps'])):\n eps = data['eps'][idx]\n if not np.isnan(eps):\n if abs(eps-last_eps) > 200: # unrealsitic jump in orientation\n if abs(last_eps - (eps - 360*np.sign(eps)*correct_direction)) > 200:\n correct_direction = correct_direction*(-1)\n corr_times += 1\n print('change eps correction direction\\t\\t', corr_times)\n data['eps'][idx] = eps - 360*np.sign(eps)*correct_direction\n last_eps = data['eps'][idx]\n\n# # rotate:\n for idx in range(6):\n x = data['x{}'.format(idx)]\n y = data['y{}'.format(idx)]\n X, Y = [], []\n for vec in zip(x, y):\n xrot, yrot = rotate(vec, np.deg2rad(eps_0))\n X.append(xrot)\n Y.append(yrot)\n data['x{}'.format(idx)] = X\n data['y{}'.format(idx)] = Y\n\n # shift xy coordinates s.t. (x1,y1)(t0) = (0,0)\n start_x1 = (-30, -20)\n if np.isnan(start_x1[0]) or np.isnan(start_x1[1]):\n i = 0\n while np.isnan(start_x1[0]) or np.isnan(start_x1[1]):\n i -= 1\n start_x1 = (data['x1'][start_idx+i], data['y1'][start_idx+i])\n if i < -20:\n start_x1 = (0, 0)\n print('can not find start position ...')\n print('Messung startet bei start_x1: ', start_x1)\n for idx in range(6):\n X = [x - start_x1[0] for x in data['x{}'.format(idx)]]\n Y = [y - start_x1[1] for y in data['y{}'.format(idx)]]\n data['x{}'.format(idx)] = X\n data['y{}'.format(idx)] = Y\n\n dataBase.append(data)\n\n return dataBase\n\n\ndef rotate_feet(fpos, theta):\n # rotate:\n x, y = fpos\n X, Y = [], []\n for vec in zip(x, y):\n xrot, yrot = rotate(vec, np.deg2rad(theta))\n X.append(xrot)\n Y.append(yrot)\n return((X, Y))\n\n\ndef find_poses_idx(db, neighbors=5):\n IDX = []\n failed = 0\n for exp_idx in range(len(db)):\n pose_idx = []\n start_idx = db[exp_idx]['f1'].index(1)\n for idx in range(start_idx, len(db[exp_idx]['pr3'])-1, 1):\n if db[exp_idx]['pr3'][idx] != db[exp_idx]['pr3'][idx+1]:\n if not pose_idx: # empty list\n pose_idx.append(idx)\n else:\n for jdx in range(idx, idx-neighbors, -1): # look the last neigbors\n if not np.isnan(db[exp_idx]['aIMG2'][jdx]):\n # check\n dr = db[exp_idx]['pr2'][idx] - db[exp_idx]['pr2'][jdx]\n if abs(dr) > .1:\n failed += 1\n pose_idx.append(idx) # append ori\n break\n else:\n pose_idx.append(jdx)\n break\n elif jdx == idx-neighbors+1:\n failed += 1\n pose_idx.append(idx) # append ori\n # last#\n idx = len(db[exp_idx]['pr3'])-1\n for jdx in range(idx, idx-100, -1): # look the last neighbors\n if not np.isnan(db[exp_idx]['aIMG2'][jdx]):\n # check\n dr = db[exp_idx]['pr2'][idx] - db[exp_idx]['pr2'][jdx]\n if abs(dr) > .1:\n failed += 1\n pose_idx.append(idx) # append ori\n break\n else:\n pose_idx.append(jdx)\n break\n IDX.append(pose_idx)\n if failed > 0:\n print('failed detections of poses:', failed)\n return IDX\n\n\ndef extract_measurement(measurement, idx):\n alp = [measurement['aIMG{}'.format(j)][idx] for j in range(6)]\n fposx = [measurement['x{}'.format(j)][idx] for j in range(6)]\n fposy = [measurement['y{}'.format(j)][idx] for j in range(6)]\n p = [measurement['pr{}'.format(j)][idx] for j in range(6)]\n fix = [measurement['f{}'.format(j)][idx] for j in range(4)]\n eps = measurement['eps'][idx]\n xref = measurement['x7'][idx]\n yref = measurement['y7'][idx]\n if p[2] == 0: # right elly actuated\n alp = alp[0:2] + [-alp[3]] + alp[-2:]\n else: # left belly\n alp = alp[0:3] + alp[-2:]\n\n return (alp, eps, (fposx, fposy), p, fix, (xref, yref))\n\n\ndef plot_pose(x, marks, fix, col='k'):\n pose = roboter_repr.GeckoBotPose(x, marks, fix)\n pose.plot_markers(col=col)\n pose.plot(col)\n plt.axis('equal')\n\n\ndef calc_mean_stddev(mat):\n mu1 = np.nanmean(mat, axis=1)\n sigma1 = np.nanstd(mat, axis=1)\n return mu1, sigma1\n\n\ndef barplot(mu, modes, labels, colors, sig=None, num='errros'):\n\n width_step = .9\n N = len(modes)\n\n fig, ax = plt.subplots(num=num)\n\n rectdic = {}\n lentries = []\n X = np.arange(len(labels))\n\n for jdx, mode in enumerate(modes):\n w = width_step/N\n x = X + (jdx - (N-1)/2)*w\n col = colors[mode]\n rectdic[mode] = ax.bar(x, mu[mode],\n yerr=sig[mode] if sig else None,\n align='center',\n width=w,\n ecolor='black', color=col,\n capsize=10)\n\n patch = pat.Patch(color=col, label=mode[-5:]) # last 5 chars\n lentries.append(patch)\n\n plt.legend(handles=lentries)\n# ax.set_ylabel('Number of steps')\n# ax.set_xlabel('Set Point')\n ax.set_xticks([i for i in range(len(labels))])\n ax.set_xticklabels(labels)\n\n def autolabel(rectdic):\n \"\"\"Attach a text label above each bar in *rects*,\n displaying its height.\"\"\"\n for mode in rectdic:\n for rect in rectdic[mode]:\n height = round(rect.get_height(), 1)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n# xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n autolabel(rectdic)\n\n return ax\n","sub_path":"2020_04_ObstacleCourseV40/obstacle_utils.py","file_name":"obstacle_utils.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"626415324","text":"import sys\nsys.path.append('..')\n\nfrom torchvision.datasets import mnist\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nfrom datetime import datetime\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torchvision.datasets import CIFAR10\n\n\n# 除了 dense block,DenseNet 中还有一个模块叫过渡层(transition block),因为 DenseNet 会不断地对维度进行拼接, 所以当层数很高的时候,输出的通道数就会越来越大,参数和计算量也会越来越大,为了避免这个问题,需要引入过渡层将输出通道降低下来,同时也将输入的长宽减半,这个过渡层可以使用 1 x 1 的卷积\n\ndef set_learning_rate(optimizer,lr):\n\tfor param_group in optimizer.param_groups:\n\t\tparam_group['lr']=lr\ndef get_acc(output, label):\n total = output.shape[0]\n _, pred_label = output.max(1)#求每行的最大就是最有可能的类别\n num_correct = (pred_label == label).sum().float()\n return num_correct / total\n#def data_tf(x):\n#\tx=np.array(x,dtype='float32')\n#\tx=(x - 0.5) /0.5\n#\tx= x.reshape((-1,))\n#\tx=torch.from_numpy(x)\n#\treturn x\ndata_tf=transforms.Compose(\n[transforms.ToTensor(),\n transforms.Normalize([0.5],[0.5])\n]\n)\ntrain_set = CIFAR10('./data', train=True, transform=data_tf,download=True)\ntest_set = CIFAR10('./data', train=False, transform=data_tf,download=True)\ntest_data =DataLoader(test_set, batch_size=128, shuffle=True)\ntrain_data =DataLoader(train_set, batch_size=64, shuffle=True)\n\n\n\ndef transition(in_channel, out_channel):\n trans_layer = nn.Sequential(\n nn.BatchNorm2d(in_channel),\n nn.ReLU(True),\n nn.Conv2d(in_channel, out_channel, 1),\n nn.AvgPool2d(2, 2)\n )\n return trans_layer\n\n\n\n\n\n\ndef conv_block(in_channel, out_channel):\n layer = nn.Sequential(\n nn.BatchNorm2d(in_channel),\n nn.ReLU(True),\n nn.Conv2d(in_channel, out_channel, 3, padding=1, bias=False)\n )\n return layer\n\nclass dense_block(nn.Module):\n def __init__(self, in_channel, growth_rate, num_layers):\n super(dense_block, self).__init__()\n block = []\n channel = in_channel\n for i in range(num_layers):\n block.append(conv_block(channel, growth_rate))\n channel += growth_rate\n \n self.net = nn.Sequential(*block)\n \n def forward(self, x):\n for layer in self.net:\n out = layer(x)\n x = torch.cat((out, x), dim=1)\n return x\n \n \n \nclass densenet(nn.Module):\n def __init__(self, in_channel, num_classes, growth_rate=32, block_layers=[6, 12, 24, 16]):\n super(densenet, self).__init__()\n self.block1 = nn.Sequential(\n nn.Conv2d(in_channel, 64, 7, 2, 3),\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n nn.MaxPool2d(3, 2, padding=1)\n )\n \n channels = 64\n block = []\n for i, layers in enumerate(block_layers):\n block.append(dense_block(channels, growth_rate, layers))\n channels += layers * growth_rate\n if i != len(block_layers) - 1:\n block.append(transition(channels, channels // 2)) # 通过 transition 层将大小减半,通道数减半\n channels = channels // 2\n \n self.block2 = nn.Sequential(*block)\n self.block2.add_module('bn', nn.BatchNorm2d(channels))\n self.block2.add_module('relu', nn.ReLU(True))\n self.block2.add_module('avg_pool', nn.AvgPool2d(3))\n \n self.classifier = nn.Linear(channels, num_classes)\n \n def forward(self, x):\n x = self.block1(x)\n x = self.block2(x)\n \n x = x.view(x.shape[0], -1)\n x = self.classifier(x)\n return x\nnet=densenet(3,10)\n\ncriterion =nn.CrossEntropyLoss()#定义损失函数\noptimizer =torch.optim.SGD(net.parameters(),1e-1)\n#训练\nprev_time=datetime.now()\ntrain_losses=[]\nvalid_losses=[]\nfor epoch in range(30):\n\tif epoch==20:\n\t\tset_learning_rate(optimizer,0.01)\n\ttrain_loss=0\n\ttrain_acc =0\n\t\n\tnet =net.train()\n\tfor im ,label in train_data:#im,label为一批数据,也就是64个样本\n\t\t#前向传播并计算损失\n\t\t#print(im.size())#im=im.view(im.size(0),-1)torch.Size([64, 1, 28, 28])\n\t\t#im=im.view(im.size(0),-1)\n\t\t#print(im.size())torch.Size([64, 784])\n\t\toutput =net(im)\n\t\t\n\t\tloss =criterion(output ,label)\n\t\t#反向传播\n\t\toptimizer.zero_grad()#梯度归0\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\t\n\t\t#print(loss.data)\n\t\ttrain_loss +=loss.data.float()\n\t\ttrain_acc +=get_acc(output,label)\n\t\t#print(train_acc/len(train_data))\n\t\t#print(train_acc/64)\n\t#测试\n\tcur_time =datetime.now()\n\th,remainder =divmod((cur_time-prev_time).seconds,3600)\n\tm,s=divmod(remainder,60)\n\ttime_str =\"Time %02d:%02d:%02d\"%(h,m,s)\n\tvalid_loss=0\n\tvalid_acc=0\n\tnet =net.eval()\n\tfor im,label in test_data:\n\t\t#im=im.view(im.size(0),-1)\n\t\t\n\t\toutput =net(im)\n\t\t\n\t\tloss= criterion(output,label)\n\t\tvalid_loss +=loss.data.float()\n\t\tvalid_acc +=get_acc(output,label)\n\tepoch_str=(\n\t\t\t\"Epoch %d. Train Loss %f,Train Acc:%f,Valid Loss: %f,Valid Acc: %f ,\"\n\t\t\t%(epoch,train_loss/len(train_data),\n\t\t\t train_acc /len(train_data),\n\t\t\t valid_loss/len(test_data),\n\t\t\t valid_acc /len(test_data)))\n\tprev_time=cur_time\n\ttrain_losses.append(train_loss/len(train_data))\n\tvalid_losses.append(valid_loss/len(test_data))\n\tprint(epoch_str+time_str)#训练一批测试一批,time_str为每次epoch运行的时间00:00:07表示7秒\n\t\nplt.plot(train_losses, label='train')\nplt.plot(valid_losses, label='valid')\nplt.xlabel('epoch')\nplt.legend(loc='best')\nplt.show()\n","sub_path":"CNN/DenseNet+CIFAR10.py","file_name":"DenseNet+CIFAR10.py","file_ext":"py","file_size_in_byte":5656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"169802622","text":"from django.contrib import admin\nfrom .models import NounLabel, AdjLabel\n\n# class NounAdmin(admin.ModelAdmin):\n# list_display = ['id', 'noun', 'count', 'created_at' ]\n# admin.site.register(NounLabel, NounAdmin)\nadmin.site.register(AdjLabel)\n# Register your models here.\n\n# @admin.register(NounLabel)\n# class NounAdmin(admin.ModelAdmin):\n#\n# \tdef delete(self, obj):\n# \t\treturn ''.format(obj.pk)\n#\n# \tdelete.allow_tags = True\n# \tdelete.short_description = 'Delete object'\n#\n# \tlist_display = ['id', 'noun', 'count','created_at','delete']\n# \tlist_display_links = ['id', 'noun']\n\n\nclass ResourceAdmin(admin.ModelAdmin):\n\n\tdef delete(self, obj):\n\t\treturn ''.format(obj.pk)\n\n\tdelete.allow_tags = True\n\tdelete.short_description = 'Delete object'\n\n\tlist_display = ('id', 'noun', 'count', 'delete')\n\nadmin.site.register(NounLabel, ResourceAdmin)","sub_path":"D_AI_Project/D_AI_Service/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"246682729","text":"## implement Convolutional Neural Network using LaNet architecture using keras\n\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dense\n\nclass LeNet:\n @staticmethod\n # weightsPath can be used to load a pre trained model\n def build(width, height, depth, classes, weightsPath = None):\n # initialize the model\n model = Sequential()\n # create first set of CONV => RELU => POOL\n model.add(Convolution2D(20, 5, 5, border_mode = \"same\", input_shape = (depth, height, width)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))\n\n # second set of CONV => RELU => POOL\n model.add(Convolution2D(50, 5, 5, border_mode = \"same\"))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))\n\n # fully connected layers often called dense layers of lenet architecture\n # set FC => RELU layers\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation(\"relu\"))\n\n # softmax classifier\n model.add(Dense(classes)) # number of class labels i.e. in this case we have 10 classes\n model.add(Activation(\"softmax\")) # multinomial logistic regression that returns a list of probabilities\n\n # if a weights path is supplied (indicating that the model was pretrained), then load the weights\n if weightsPath is not None:\n model.load_weights(weightsPath)\n\n # return the constructed network architecture\n return model\n","sub_path":"lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"27453281","text":"\r\nimport csv\r\nKey=[]\r\n \r\nwith open('T.csv') as File:\r\n reader = csv.reader(File, delimiter=',', quotechar=',',\r\n quoting=csv.QUOTE_MINIMAL)\r\n for row in reader:\r\n for i in range(0,len(row)):\r\n row[i]=(row[i])\r\n Key.append(row)\r\n\r\nprint(len(Key))\r\nprint(Key[0][0])\r\n","sub_path":"Create CSVs/readKeyMatrix.py","file_name":"readKeyMatrix.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"26924811","text":"import requests\nimport base64\nimport json\nimport os, time, datetime\nimport configparser\nfrom framework.GetDBdata import GetDBdata\nfrom framework.Getimage import *\nfrom framework.InsertDB import InsertDB\nfrom framework.Query_DB import Query_DB\nfrom framework.ExportExcle import *\nfrom framework.logger import Logger\nfrom multiprocessing import Process, Lock\nfrom multiprocessing import Pool,Lock,Manager\nimport multiprocessing\n\nlogger = Logger(logger=\"TestValue\").getlog()\nproDir = os.getcwd()\nconfigPath = os.path.join(proDir, \"config\\config.ini\")\ncf = configparser.ConfigParser()\ncf.read(configPath, encoding=\"utf-8-sig\")\n\ndef API(imagefile_path): # 服务器最新算法\n with open(imagefile_path, \"rb\") as f:\n # b64encode是编码,b64decode是解码\n base64_data = base64.b64encode(f.read())\n str_base64 = str(base64_data, 'utf-8')\n try:\n url = \"https://kk.huoyanhou.com:8445/image_analysis/basic/hpTest.html\"\n payload = {\n \"file\": str_base64, \"fileName\": imagefile_path.split('\\\\')[-1]\n }\n headers = {\n 'Content-Type': \"application/json\",\n }\n payload = json.dumps(payload) # 将字典类型转换为 JSON 对象,序列化\n r = requests.post(url, data=payload, headers=headers)\n r.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\n top = r.text.split(',')\n dics = {\"code\": 2000, \"message\": \"识别成功!\", \"topdata\": {\"top1\": int(top[0]), \"top2\": int(top[1]), \"top3\": int(top[2])}}\n return json.dumps(dics)\n except Exception as e:\n dics = {\"code\": 4000, \"message\": \"服务可能未开启,\" + str(e), \"topdata\": {'top1': -99, 'top2': -99, 'top3': -99}}\n return json.dumps(dics)\n\n\ndef API2(imagefile_path):#本地老算法\n with open(imagefile_path, \"rb\") as f:\n # b64encode是编码,b64decode是解码\n base64_data = base64.b64encode(f.read())\n str_base64 = str(base64_data, 'utf-8')\n try:\n url = \"http://192.168.1.182:8888/Disc\"\n # querystring = {\"image_base64\":str_base64,\"image_name\":imagefile_path.split('\\\\')[-1]}#imagefile_path.split('\\\\')[-1]#, params=querystring\n payload = {\"image_base64\": str_base64,\n \"image_name\": imagefile_path.split('\\\\')[-1]} # imagefile_path.split('\\\\')[-1]\n response = requests.request(\"post\", url, data=(payload))\n return (response.text)\n except Exception as e:\n return (\"服务可能未开启,\" + str(e))\ndef API3(imagefile_path):#读数据库数据\n r'E:\\小雁塔\\8.6日拍摄测试样本照片(批处理)\\0\\IMG_20190807_110901.jpg'\n dic_rc = {\n \"Code\": int(imagefile_path.split('\\\\')[-2]),\n 'Test_Chart': imagefile_path.split('\\\\')[-1]\n }\n\n data=json.dumps(GetDBdata().get_db_data(dic_rc))\n return data\n # with open(imagefile_path, \"rb\") as f:\n # # b64encode是编码,b64decode是解码\n # base64_data = base64.b64encode(f.read())\n # str_base64 = str(base64_data, 'utf-8')\n # try:\n # url = \"http://192.168.1.182:8888/Disc\"\n # # querystring = {\"image_base64\":str_base64,\"image_name\":imagefile_path.split('\\\\')[-1]}#imagefile_path.split('\\\\')[-1]#, params=querystring\n # payload = {\"image_base64\": str_base64,\n # \"image_name\": imagefile_path.split('\\\\')[-1]} # imagefile_path.split('\\\\')[-1]\n # response = requests.request(\"post\", url, data=(payload))\n # return (response.text)\n # except Exception as e:\n # return (\"服务可能未开启,\" + str(e))\n\ndef Summary(imagefile_path, i,Test_Batch,Test_Version):\n Time_Stamp = int(time.time())\n now =time.strftime(\"%Y/%m/%d %H:%M:%S\", time.localtime(Time_Stamp))\n code = int(imagefile_path.split('\\\\')[-2])\n TestChart = imagefile_path.split('\\\\')[-1]\n\n try:\n T1 = datetime.datetime.now()\n topdata = json.loads(API2(imagefile_path))['topdata']\n\n T2 = datetime.datetime.now()\n T = round((T2 - T1).total_seconds(), 3) # 检索耗时\n\n except Exception as e:\n logger.error('报错:%s' % str(e))\n topdata = {'top1': -88, 'top2': -88, 'top3': -88}\n T = 0 # 检索耗时\n\n TestValue1, TestValue2, TestValue3 = topdata['top1'], topdata['top2'], topdata['top3'],\n if TestValue1 == code:\n Result = [\"PASS\", 'c6efce_006100']\n elif TestValue1 in [-88, -99]:\n Result = [\"ERROR\", 'ffeb9c_9c6500']\n Failimgae(imagefile_path, code)\n else:\n Result = [\"FAIL\", 'ffc7ce_9c0006']\n Failimgae(imagefile_path, code)\n dic = {\n 'Test_ID': i + 1,\n \"Test_Batch\":Test_Batch,\n 'Test_Version':Test_Version,\n 'Test_Time': now,\n 'Time_Stamp':Time_Stamp,\n 'Cultural_Name': cf.get(\"Data\", str(code)),\n 'Test_Chart': TestChart,\n 'Code':int( code),\n \"Expected_Value\": int( code),\n 'TimeConsuming': T,\n 'top1': TestValue1,\n 'top2': TestValue2,\n 'top3': TestValue3,\n 'Result': Result[0],\n 'Color':Result[1],\n 'Image_Path': imagefile_path.replace('\\\\', '/')#dic[\"TestChartPath\"].replace('\\\\', '/')\n\n }\n\n return dic\n\n\ndef TestValue2(rootdir, proce,Test_Batch,Test_Version,Batchinfo):#支持多进程\n Time_Stamp = int(time.time())\n now =time.strftime(\"%Y/%m/%d %H:%M:%S\", time.localtime(Time_Stamp))\n\n manager = Manager()\n lock = manager.Lock() # 产生钥匙\n datalist=Pathlsit(rootdir)\n listPath = datalist[0]\n Total = Batchinfo['total_num']\n sql = \"select count(*) from %s WHERE test_version='%s' AND test_batch='%s' ;\" % ('test_record_sheet',Test_Version,Test_Batch)\n A = Query_DB().getnum(sql)#查询测试进度\n\n start_dic={\"RunTime\":now,\"RunTime_int\":Time_Stamp,\"Test_Batch\":Test_Batch,\"Test_Version\":Test_Version,\"Total_Type\":Batchinfo['types_num'],\"Sum_Numbers\": Total,\"Completed\":A}\n # logger.info(start_dic)\n InsertDB().insert_Start_recording( 'start_recording', start_dic)#写入启动测试记录\n\n\n pool = multiprocessing.Pool(processes=proce)\n for i in range(A , Total):\n pool.apply_async(func=process, args=(listPath[i],Total,i,lock,Test_Batch,Test_Version,Batchinfo))\n pool.close()\n pool.join() # 在join之前一定要调用close,否则报错\n\ndef process(imagefile_path,Total,i,lock,Test_Batch,Test_Version,Batchinfo):\n\n dic = Summary(imagefile_path, i,Test_Batch,Test_Version)\n\n lock.acquire() ##拿到钥匙进门,其他进程阻塞, acqurie和release之间的代码只能被一个进程执行\n #SummaryExcle(addr, dic, title, 10)\n InsertDB().insert_data('test_record_sheet', dic)#插入数据库测试记录数据\n lock.release() # 释放钥匙\n #logger.info(dic)\n logger.info('测试进度:%s/%s;测试图:%s;编号:%s;耗时:%s;top3:%s、%s、%s;测试结果:%s。' % (\n i + 1, Total, dic['Test_Chart'], dic['Code'], dic['TimeConsuming'], dic['top1'], dic['top2'],\n dic['top3'], dic['Result']))\n\n\n\n","sub_path":"framework/TestValue.py","file_name":"TestValue.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"114324879","text":"#Addition\r\ndef sum(a,b):\r\n return a+b\r\n#Subtraction\r\ndef sub(a,b):\r\n return a-b\r\n#Multiplication\r\ndef mul(a,b):\r\n return a*b\r\n#Division\r\ndef div(a,b):\r\n return a/b\r\n#distance\r\ndef dis(time,s):\r\n return time*s\r\n\r\n#speed\r\ndef speed(time,d):\r\n return d/time\r\n#Simple interest\r\ndef simple_interest(p,t,r):\r\n si=(p*t*r)/100\r\n return si\r\n#Compound intrest\r\ndef compound_interest(p,r,t):\r\n ci=p*(pow((1+r/100),t))\r\n return ci\r\nprint(\"select operation\")\r\nprint(\"1.Addition\")\r\nprint(\"2.Subtraction\")\r\nprint(\"3.Multiply\")\r\nprint(\"4.Divide\")\r\nprint(\"5.Distance\")\r\nprint(\"6.Speed\")\r\nprint(\"7.Simple Intrest \")\r\nprint(\"8.Compound intrest\")\r\nwhile True:\r\n choice=input(\"Enter choice(1/2/3/4/5/6/7/8/):\")\r\n if choice in(\"1\",\"2\",\"3\",\"4\"):\r\n a=float(input(\"a = \"))\r\n b=float(input(\"b = \"))\r\n if choice == \"1\":\r\n print(a,\"+\",b,\"=\",sum(a,b))\r\n elif choice == \"2\":\r\n print(a,\"-\",b,\"=\",sub(a,b))\r\n elif choice == \"3\":\r\n print(a,\"*\",b,\"=\",mul(a,b))\r\n elif choice == \"4\":\r\n print(a,\"/\",b,\"=\",div(a,b))\r\n elif choice in(\"5\"):\r\n time=float(input(\"Enter time(hr) :\"))\r\n s=float(input(\"Enter speed(km/hr) :\"))\r\n print(\"Distance is :\",dis(time,s),\"km\")\r\n elif choice in(\"6\"):\r\n time=float(input(\"Enter time(hr) :\"))\r\n d=float(input(\"Enter Distance(km) :\"))\r\n print(\"Speed is :\",speed(time,d),\"km/hr\")\r\n elif choice in(\"7\"):\r\n p=float(input(\"Enter Principal :\"))\r\n t=float(input(\"Enter Time :\"))\r\n r=float(input(\"Enter Rate :\"))\r\n print(\"Simple Intrest\",simple_interest(p,t,r))\r\n elif choice in(\"8\"):\r\n p = float(input(\"Enter Principal :\"))\r\n t = float(input(\"Enter Time :\"))\r\n r = float(input(\"Enter Rate :\"))\r\n print(\"Compound Intrest\",compound_interest(p,r,t))\r\n else :\r\n print(\"Invalid\")\r\n\r\n\r\n\r\n\r\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"325187759","text":"# -*- coding: utf-8 -*-\n\nfrom core import httptools, scrapertools, jsontools\nfrom platformcode import config, logger\nimport os\n\n\ndef get_video_url(page_url, url_referer=''):\n logger.info(\"(page_url='%s')\" % page_url)\n video_urls = []\n\n vid = scrapertools.find_single_match(page_url, 'id=([A-z0-9]+)')\n if not vid: return video_urls\n\n data = httptools.downloadpage('https://www.zembed.to/vl/' + vid).data\n # ~ logger.debug(data)\n \n try:\n cache_path = os.path.join(config.get_data_path(), 'cache')\n if not os.path.exists(cache_path): os.makedirs(cache_path)\n\n data_json = jsontools.load(data)\n\n for q in [\"360p\", \"480p\", \"720p\", \"1080p\", \"2048p\"]:\n if q not in data_json: continue\n txt = generar_m3u8(data_json[q])\n\n file_local = os.path.join(cache_path, 'temp-%s.m3u8' % q)\n with open(file_local, 'wb') as f: f.write(txt); f.close()\n\n video_urls.append(['m3u8 '+q, file_local])\n except:\n pass\n\n return video_urls\n\ndef generar_m3u8(e):\n txt = \"#EXTM3U\\n\"\n txt += \"#EXT-X-VERSION:5\\n\"\n txt += \"#EXT-X-TARGETDURATION:%s\\n\" % e['td']\n txt += \"#EXT-X-MEDIA-SEQUENCE:0\\n\"\n \n for l in range(len(e['data'][0])):\n\n txt += \"#EXTINF:%s\\n\" % e['data'][0][l]\n txt += \"#EXT-X-BYTERANGE:%s\\n\" % e['data'][1][l]\n \n r = e['data'][1][l].split(\"@\")\n txt += \"https://www.zembed.to/drive/hls/\" + e['md5'] + \"/\" + e['md5'] + str(l) + \".html?ch=\" + e['md5'] + \"-chunk-\" + e['data'][2][l] + \".txt&s=\" + r[1] + \"&l=\" + r[0] + \"\\n\"\n\n txt += \"#EXT-X-ENDLIST\\n\"\n return txt\n","sub_path":"servers/zembed.py","file_name":"zembed.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"305695118","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport requests\nfrom lxml.etree import HTML\nimport csv\n\nheaders = {\n 'accept': \"*/*\",\n 'accept-encoding': \"gzip, deflate, br\",\n 'accept-language': \"zh-CN,zh;q=0.9\",\n 'cache-control': \"no-cache,no-cache\",\n # 'cookie': \"uu=BCYpTopNXQt0xfexDllZRLuhYJVKmKWMy4hSUyxCURBKFbkBtlxSGiFiigJ56kiY8vJ0i0UWJx8O%0D%0AiYHFXe-_iPgC8chTTH6FYmDTa_uik_h0210YVo--2gMM_XjiZLhWDrc-eoqU-EGf8BajV1d2UQn3%0D%0Avg%0D%0A; session-id=138-0186516-5877957; adblk=adblk_no; ubid-main=132-8923120-5804221; session-token=sUDTl5lOoYTINfzGNUvgNxXFF1hyBb4+UDo6xQFL/V5XVEsskPuXeh90DfEaITRbaveHBYZ0PalLDXF9wYnsO09BYi0CYqrqhVY28k508mYu/jEerCFLixSN2egEkcDWlv2i22BKYm6h9mkdRZGPhc6H7EBrpJhkJvPUE5V1nBvqzHVhitj3xTXwVR3+/Z95; session-id-time=2082787201l; csm-hit=tb:HE39MYYJJG0J119F5PT3+s-VN3M5F654DGPP86W3GHT|1558434040958&t:1558434040958&adb:adblk_no\",\n 'pragma': \"no-cache\",\n 'referer': \"https://www.imdb.com/title/tt4154796/reviews?ref_=tt_ql_3\",\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\",\n 'x-requested-with': \"XMLHttpRequest\",\n 'Postman-Token': \"6e1e722d-5ca3-4f9b-a185-b8eb2f83d0a6\"\n }\n\ndef start():\n allItem_list = [{'key':'tt0848228','value':73,'name':'avengers1'},{'key':'tt2395427','value':49,'name':'avengers2'},{'key':'tt4154756','value':161,'name':'avengers3'},{'key':'tt4154796','value':279,'name':'avengers4'}]\n for each in allItem_list:\n for i in range(each['value']):\n if i == 0:\n url = 'https://www.imdb.com/title/{key}/reviews/_ajax'.format(key=each['key'])\n else:\n url = 'https://www.imdb.com/title/{key}/reviews/_ajax?ref_=undefined&paginationKey={pageToken}'.format(pageToken=pageToken,key=each['key'])\n\n response = requests.get(url,headers=headers)\n # print(response.text)\n html = HTML(response.text)\n\n pageToken = html.xpath('string(//div[@class=\"load-more-data\"]/@data-key)')\n\n div_list = html.xpath('//div[@class=\"lister-list\"]/div')\n for div in div_list:\n rating = div.xpath('string(.//span[@class=\"rating-other-user-rating\"]/span[1])')\n commentList = div.xpath('.//div[@class=\"text show-more__control\"]//text()')\n # print(commentList)\n comment = ''.join(commentList)\n print(rating, comment)\n\n saveRes = rating+'|'+comment.replace('\\n','')+'\\n'\n\n with open(each['name']+'.csv','a',encoding='utf8') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([rating, comment.replace('\\n','')])\n\nif __name__ == '__main__':\n start()","sub_path":"other/imdb/imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"632808679","text":"import time\nimport socket\nimport sys\nimport random\nimport threading\nimport socket\n\nexitFlag = 0\nclass ClientThread (threading.Thread):\n# Listens on global clientSocket for messages from server\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n \n def run(self):\n global stayOnServer\n global inbox\n global ctrl_inbox\n global streamInbox\n\n # Listening loop waiting for messages from the Server \n while stayOnServer:\n prefix = ''\n data = ''\n try:\n clientData = clientSock.recvfrom(1026)[0].decode('utf-8')\n \n # Acquire thread lock to assure this runs with main thread\n threadLock.acquire()\n\n # Check for message break and seperate tokens\n if msg_break in clientData:\n prefix, data = clientData.split(msg_break)\n else:\n data = clientData\n \n # Check message for control responses\n if \"#./USER\" in prefix:\n ctrl_inbox += [clientData]\n # print(\"[INFO] [DEBUG] CTRL Inbox: \" + str(ctrl_inbox))\n elif \"#./EXIT\" in prefix:\n stayOnServer = False\n elif \"#./ERROR_INVALID_USER\" in prefix:\n ctrl_inbox += [clientData]\n print(\"Recent message '%s' failed to send. Destination user was not found in active userlist.\"\n % data)\n else: \n inbox += [data]\n time.sleep(2)\n # print(\"[INFO] [DEBUG] Inbox: \" + str(inbox))\n if streamInbox:\n print(\"> \" + str(data)+ '\\n') \n \n threadLock.release()\n except Exception as e:\n print(\"Error receiving messages from server %s: %s\" % (str(server) , e))\n\n\nmsg_break = \" /$MESSAGE_BREAK: \"\nstayOnServer = True\nstreamInbox= False\nresp_wait= 1\n\nname = ''\ninbox=[]\nctrl_inbox = []\nuser_groups = {}\n\n\ndef init_client():\n# Get user data for establishing connection and request username and userlist from server \n global streamInbox\n\n # Init and bind client socket\n clientHost = '127.0.0.1'\n clientPort = 9998\n clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n print(\"Enter the port that the messager client should use. Enter nothing to use default port %d\" % clientPort)\n try: \n t = raw_input().strip()\n except Exception as e:\n t = input()\n if not t == '':\n clientPort= int(t)\n \n try:\n clientSock.bind((clientHost, clientPort)) \n except Exception as e: \n clientPort = 9998\n clientSock.bind((clientHost, clientPort)) \n print(\"Error with chosen port. Using default port %d\" % clientPort)\n\n # Get Server IP and port\n host = '127.0.0.1' \n port = 9997\n server = (host,port)\n print(\"Enter the port for the chat server.\")\n try: \n t = raw_input().strip()\n except Exception as e:\n t = input()\n if not t == '':\n port= int(t)\n\n # Get username\n name = 'Client'\n print(\"Enter your username for the chat server.\")\n try: \n t = raw_input().strip()\n except Exception as e:\n t = input()\n if not t == '':\n name= t\n\n\n # Ask user if they want messages to show on screen or keep default and check with command\n resp = ''\n print(\"\\nMessages are accessed by the ./inbox control command in the interface by default.\\n\"\n + \"Would you like for messages to be shown as they are received instead? (y/n)\")\n try: \n resp = raw_input().strip()\n except Exception as e:\n resp = input()\n \n if resp == 'n' or resp == 'no' or resp == 'N' or resp == '':\n streamInbox = False\n else:\n streamInbox = True\n \n # Initialize Client as active with server\n print(\"\\nConnecting to UDP Chat Server at %s:%d ...\" % ( host, port) )\n time.sleep(1)\n print(\"Requesting username %s ... \" % name)\n time.sleep(1)\n pkt = \"#./INIT\" + msg_break + name\n clientSock.sendto(pkt.encode(\"ascii\"), server)\n\n # Wait for userId and user list, then parse\n try: \n msg = clientSock.recvfrom(1026)[0]\n msg = msg.decode('utf-8')\n name, userlist = msg.split(msg_break)\n print(\"Connected to Chat Server. Your screen name is %s\\n\" % name)\n print('%s\\n' % userlist)\n except Exception as e: \n print(\"Error initiating chat client with server.\" + str(e))\n raise e\n return clientSock, server\n\ndef reqUserList():\n# Send request to server for most recent active userlist update\n global inbox\n userlist=''\n check, check_inbox_limit= 0, 7\n \n # Generate random control key to append to message to verify update is new \n update_key = random.randint(1000, 9999)\n req = \"#./USER\" + msg_break + str(update_key)\n prefix =\"#./USER\" + str(update_key)\n \n # send request\n try:\n clientSock.sendto(req.encode(\"ascii\"), server)\n print(\"\\nRequesting updated user list ...\")\n except Exception as e:\n print(\"Error requesting user list: %s\" % e)\n\n # Give client listener thread time to add response to ctrl_inbox, then check for response\n while check <= check_inbox_limit:\n time.sleep(resp_wait)\n for msg in ctrl_inbox:\n msg_prefix, userdata = msg.split(msg_break)\n if msg_prefix == prefix:\n return userdata\n break\n if check == check_inbox_limit and userlist == '':\n print(\"[ERROR] Updated Userlist not found in inbox after waiting %d seconds for %d iterations : %s\" % (resp_wait, check_inbox_limit, userlist))\n check +=1 \n return 'Request for userlist has timed out. Userlist unavailable'\n\ndef main_loop():\n# Start thread to listen for server messages while also waiting for user input\n global stayOnServer\n global inbox\n global name\n\n initial_setup = True\n\n # Start thread to listen for server responses\n listening_thread = ClientThread(threadID=1, name=\"Listening Thread\", counter=1)\n listening_thread.start()\n \n # Start user interface loop\n while stayOnServer:\n msg = '' \n if initial_setup:\n # Check if this is first loop iteration, if so get initial destination to initialize destination user before continuing with main loop\n notValid = True\n while notValid: \n dest = ''\n # Allow user to enter a single name, or multiple names delimited by commas to create a group message and ask for an identifier for group\n print(\"Enter the name of the user or group you want to message.\"\n + \"To create a group message, enter the names of the users you would like to message with each seperated by commas.\")\n print(\"Enter 'none' or '0' to pick your destination user later.\")\n try: \n dest = raw_input().strip()\n except Exception as e:\n dest = input().strip()\n \n # Assume user input is valid and proceed with checks\n notValid = False\n if dest == './user':\n # Check if accidentally entered control message\n print('Invalid username, please select a user from the active users list.')\n notValid = True\n elif ',' in dest:\n # Check if group message\n users = dest.split(',')\n userstring = ''\n for name in users:\n if name == ' ' or name == '':\n del name\n else:\n userstring += name.strip()\n if not name == users[len(users)-1]:\n userstring += ', '\n # print(\"Enter a name for your new message group of users: %s\" % userstring)\n # try: \n # groupName = raw_input().strip()\n # except Exception as e:\n # groupName = input().strip\n \n # Set group name and add to destination\n # user_groups[groupName] = userstring\n dest = userstring\n \n elif dest =='none' or dest =='no one' or dest =='0' or dest == '' or dest == './exit':\n print(\"\\nNo user selected. Enter ./user to select a user to message.\")\n dest = \"no one\"\n print(\"Currently messaging %s\" % dest)\n initial_setup= False\n elif not initial_setup:\n # if this is not the first loop iteration, dest is already initiated, so continue interface loop\n print(\"\\nEnter your message for \" + dest \n + \". Enter './user' to change destination user and './inbox' to view your message inbox.\"\n + \"\\nEnter ./exit to leave chat server.\")\n \n # Included try/catch in all user inputs for conflicting python versions\n try: \n msg = raw_input().strip()\n except Exception as e:\n msg = input()\n \n if msg == '':\n # Bring up command menu again if message is empty\n pass \n elif msg == './user':\n # If user is requesting user list then send request and get new user destination input \n \n # Request userlist from server and print it\n userlist = reqUserList()\n print(\"# %s #\\n\"%userlist)\n\n # Get user destination input and set dest\n notValid = True\n while notValid: \n dest = ''\n print(\"Enter the name of the user or group you want to message.\"\n + \"To create a group message, enter the names of the users you would like to message with each seperated by commas.\")\n print(\"Enter 'none' or '0' to pick your destination user later.\")\n\n try: \n dest = raw_input().strip()\n except Exception as e:\n dest = input()\n notValid = False\n \n if dest == './user' or dest == '':\n print('Invalid username, please select a user from the active users list.')\n notValid = True\n elif ',' in dest:\n # Check if group message\n users = dest.split(',')\n userstring = ''\n for name in users:\n if name == ' ' or name == '':\n del name\n else:\n userstring += name.strip()\n if not name == users[len(users)-1]:\n userstring += ', '\n # print(\"Enter a name for your new message group of users: %s\" % userstring)\n # try: \n # groupName = raw_input().strip()\n # except Exception as e:\n # groupName = input().strip\n \n # Set group name and add to destination\n # user_groups[groupName] = userstring\n dest = userstring\n elif dest =='none':\n print(\"No user selected. Enter ./user to select a user to message.\")\n dest = \"no one\"\n print(\"Currently messaging %s\" % dest)\n \n elif msg == './inbox': \n # display all user messages received during this session \n print(\"\\nChat Message Inbox:\")\n if len(inbox) == 0: print(\"No new messages received.\")\n else:\n for m in inbox:\n print(\"> %s\\n\" % m)\n \n elif msg == './exit': \n # Send exit message to server and end main loop\n stayOnServer = False\n pkt = \"#./EXIT\" + msg_break + \" #./CONFIRM\" \n print(\"\\nDisconnecting from server ...\\n\")\n clientSock.sendto(pkt.encode('ascii'), server)\n\n else: \n # If no control messages, then send message for preset destination user to the server\n send = dest\n if send in user_groups.keys():\n send = user_groups[send]\n if send == name:\n print(\"Error, you entered your name as the receiving user\")\n pass\n elif send == 'no one':\n print(\"Error - You have yet to select a user to message. Enter ./user to select a receiving user from the user list.\")\n else:\n # Confirm send\n try: \n confirm = raw_input(\"Send message: '%s' to %s? (y/n) \" % (msg, send) ).strip()\n except Exception as e:\n confirm = input(\"Send message: '%s' to %s? (y/n) \" % (msg, send) )\n if confirm == 'n' or confirm == 'no' or confirm == 'N':\n pass\n else:\n # Send user's message with destination user as message prefix\n pkt = send + msg_break + msg\n try:\n clientSock.sendto(pkt.encode('ascii'), server)\n except Exception as e: \n print(\"Error sending client msg '%s' to server: %s\" % (msg, e) )\n print(\"Message sent.\")\n print(\"GoodBye! Press Enter to Close.\")\n\n try: \n raw_input().strip()\n except Exception as e:\n input()\n\n# Create thread lock to sync threads\nthreadLock = threading.Lock()\n\n# initiate client socket and get server data input from user \nclientSock, server = init_client()\n\n# Initiate main messaging interface loop\nmain_loop()\n","sub_path":"src/Client-UDP.py","file_name":"Client-UDP.py","file_ext":"py","file_size_in_byte":14173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"134580004","text":"# Initialize Project.\nfrom flask import Flask, request, jsonify\nfrom PIL import Image\n\n\n# app = Flask(__name__)\n\n\ndef create_app(classifier):\n app = Flask(__name__)\n\n @app.route(\"/\", methods=[\"POST\"])\n def predict():\n # Get the received-file-handler.\n img_file = request.files[\"img\"]\n\n # Check the file is empty\n if img_file.filename == \"\":\n return \"Bad Request\", 400\n\n # Read Image-File by using\n # PIL.\n img = Image.open(img_file)\n\n # Predict is Taco or Burrito\n # by using Classification-Model.\n result = classifier.predict(img)\n\n # Return Result as a 'JSON'-form.\n return jsonify({\n \"result\": result\n })\n return app\n\n\nif __name__ == \"__main__\":\n app = Flask(__name__)\n create_app(app).run(debug=True)\n\n","sub_path":"07/FlaskAPI/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"222447541","text":"class Solution:\n def majorityElement(self, nums):\n n1 = n2 = None\n c1 = c2 = 0\n for num in nums:\n if n1 == num:\n c1 += 1\n elif n2 == num:\n c2 += 1\n elif c1 > c2:\n n2, c2 = (n2, c2 - 1) if c2 > 1 else (num, 1)\n else:\n n1, c1 = (n1, c1 - 1) if c1 > 1 else (num, 1)\n ans, size = [], len(nums)\n if n1 is not None and sum([x == n1 for x in nums]) > size // 3:\n ans.append(n1)\n if n2 is not None and sum([x == n2 for x in nums]) > size // 3:\n ans.append(n2)\n return sorted(ans)\n","sub_path":"229/229.majority-element-ii.151752962.Wrong-Answer.leetcode.py","file_name":"229.majority-element-ii.151752962.Wrong-Answer.leetcode.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"3511897","text":"\n\n#calss header\nclass _CREASE():\n\tdef __init__(self,): \n\t\tself.name = \"CREASE\"\n\t\tself.definitions = [u'If cloth, paper, etc. creases, or if you crease it, it gets a line in it where it has been folded or crushed: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_crease.py","file_name":"_crease.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"30478953","text":"#!/usr/bin/env python3\n\n###############################################################################\n#\n# dRep - main program entry point\n#\n###############################################################################\n\n'''\nController- takes input from argparse and calls correct modules\n'''\n\n\n__author__ = \"Matt Olm\"\n__license__ = \"MIT\"\n__email__ = \"mattolm@gmail.com\"\n__status__ = \"Development\"\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport drep\nfrom drep.WorkDirectory import WorkDirectory\nimport drep.d_cluster\nimport drep.d_analyze\nimport drep.d_filter\nimport drep.d_choose\nimport drep.d_adjust\nimport drep.d_bonus\nimport drep.d_evaluate\nimport drep.d_workflows\n\ndef version():\n versionFile = open(os.path.join(drep.__path__[0], 'VERSION'))\n return versionFile.read().strip()\n\nVERSION = version()\n\nclass Controller():\n def __init__(self):\n self.logger = logging.getLogger()\n\n def filter_operation(self, **kwargs):\n logging.debug(\"Starting the filter operation\")\n drep.d_filter.d_filter_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the filter operation !!!\")\n\n def cluster_operation(self, **kwargs):\n if (kwargs['P_ani'] > 1) or (kwargs['S_ani'] > 1):\n logging.error(\"Can't assign a MASH or ANIn value over 1\")\n sys.exit()\n\n logging.debug(\"Starting the clustering operation\")\n drep.d_cluster.d_cluster_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the clustering operation !!!\")\n\n def analyze_operation(self, **kwargs):\n logging.debug(\"Starting the analyze operation\")\n drep.d_analyze.d_analyze_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the analyze operation !!!\")\n\n def choose_operation(self, **kwargs):\n logging.debug(\"Starting the choose operation\")\n drep.d_choose.d_choose_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the choose operation !!!\")\n\n def adjust_operation(self, **kwargs):\n logging.debug(\"Starting the adjust operation\")\n drep.d_adjust.d_adjust_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the adjust operation !!!\")\n\n def bonus_operation(self, **kwargs):\n logging.debug(\"Starting the bonus operation\")\n drep.d_bonus.d_bonus_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the bonus operation !!!\")\n\n def evaluate_operation(self, **kwargs):\n logging.debug(\"Starting the evaluate operation\")\n drep.d_evaluate.d_evaluate_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the evaluate operation !!!\")\n\n def dereplicate_wf_operation(self, **kwargs):\n logging.debug(\"Starting the dereplicate_wf operation\")\n drep.d_workflows.dereplicate_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"Finished the dereplicate_wf operation!\")\n\n def compare_wf_operation(self, **kwargs):\n logging.debug(\"Starting the compare_wf operation\")\n drep.d_workflows.compare_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the compare_wf operation !!!\")\n\n '''\n def makeload_logger(wd):\n wd = str(os.path.abspath(wd))\n if not os.path.exists(wd):\n os.makedirs(wd)\n\n log_dir = wd + '/log/'\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logging.basicConfig(filename=log_dir + 'logger.log',level=logging.DEBUG,\\\n format='%(asctime)s %(message)s')\n logging.info(\"***Logger started up at {0}***\".format(log_dir + 'logger.log'))\n '''\n\n def setup_logger(self,loc):\n ''' set up logger such that DEBUG goes only to file, rest go to file and console '''\n\n # set up logging everything to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename=loc)\n\n # set up logging of INFO or higher to sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n logging.debug(\"!\"*80)\n logging.debug(\"***Logger started up at {0}***\".format(loc))\n logging.debug(\"Command to run dRep was: {0}\\n\".format(' '.join(sys.argv)))\n logging.debug(\"dRep version {0} was run \\n\".format(VERSION))\n logging.debug(\"!\"*80 + '\\n')\n\n def parseArguments(self, args):\n ''' Parse user options and call the correct pipeline'''\n\n # Load the workDirectory\n wd_loc = str(os.path.abspath(args.work_directory))\n wd = WorkDirectory(wd_loc)\n\n # Set up the logger\n self.setup_logger(wd.get_loc('log'))\n logging.debug(str(args))\n\n # Call the appropriate workflow\n if args.operation == \"dereplicate_wf\":\n self.dereplicate_wf_operation(**vars(args))\n if args.operation == \"compare_wf\":\n self.compare_wf_operation(**vars(args))\n\n if args.operation == \"filter\":\n self.filter_operation(**vars(args))\n if args.operation == \"cluster\":\n self.cluster_operation(**vars(args))\n if args.operation == \"analyze\":\n self.analyze_operation(**vars(args))\n if args.operation == \"choose\":\n self.choose_operation(**vars(args))\n if args.operation == \"adjust\":\n self.adjust_operation(**vars(args))\n if args.operation == \"bonus\":\n self.bonus_operation(**vars(args))\n if args.operation == \"evaluate\":\n self.evaluate_operation(**vars(args))\n\n def loadDefaultArgs(self):\n pass\n","sub_path":"build/lib/drep/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"152653607","text":"\"\"\"\r\n人脸识别模块\r\n本模块包括人脸识别的主要实现代码\r\n\"\"\"\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\n\r\nclass FaceRecognitionGetter:\r\n \"\"\"\r\n 负责人脸识别的图片采集\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.__user_name_path = \"./message/user_name\" # 保存使用者姓名文件的地址\r\n # 创建级联分类器\r\n self.__face_detector = cv2.CascadeClassifier('./cascade/haarcascades/haarcascade_frontalface_default.xml')\r\n\r\n def get_face_data(self, face_name):\r\n \"\"\"\r\n 得到人脸识别的图片数据,将图片写入到Facedata文件夹中\r\n :param face_name: 识别者信息\r\n \"\"\"\r\n i = 0 # 控制频率\r\n FREQUENCY = 3 # 频率\r\n self.__count = 0 # 计数图片输入\r\n self.__sign = None # 建立标志如果原有信息不存在的话才写入图片\r\n self.__write_name_data(face_name) # 写入识别者信息\r\n if self.__sign:\r\n print(\"\"\"\r\n =- 接下来会搜集1000张您的信息,请将耐心等待-= \r\n \"\"\")\r\n self.__get_jpg_data(i, FREQUENCY) # 获取人脸图片信息\r\n elif self.__sign is None:\r\n print(\"\"\"\r\n =-人物信息已存在-=\"\"\")\r\n return self.__sign\r\n\r\n def __get_jpg_data(self, i, FREQUENCY):\r\n \"\"\"\r\n 获得人脸信息\r\n :param count:图片数量\r\n :param i: 控制频率用\r\n :param FREQUENCY: 频率\r\n \"\"\"\r\n cap = cv2.VideoCapture(0) # 打开摄像头\r\n while True:\r\n # 分帧读取图像\r\n sucess, img = cap.read()\r\n # 转为灰度图片\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # 检测人脸\r\n faces = self.__face_detector.detectMultiScale(gray, 1.3, 5)\r\n self.__write_jpg(faces, gray, img, i, FREQUENCY) # 将读取的每帧图片按频率保存\r\n # 保持画面的持续。\r\n k = cv2.waitKey(1)\r\n if k == 27: # 通过esc键退出摄像\r\n break\r\n elif self.__count >= 1000: # 得到1000个样本后退出摄像\r\n break\r\n # 关闭摄像头\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n def __write_jpg(self, faces, gray, img, i, FREQUENCY):\r\n \"\"\"\r\n 写入人脸信息图片\r\n :param faces: 人脸识别器\r\n :param gray: 灰度图片\r\n :param img: 视频中按频率得到的图片\r\n :param i: 控制频率\r\n :param count: 图片数量\r\n :param FREQUENCY: 频率\r\n \"\"\"\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(img, (x, y), (x + w, y + w), (255, 0, 0))\r\n if i % FREQUENCY == 0:\r\n self.__count += 1\r\n # 保存图像\r\n cv2.imwrite(\"Facedata/User.\" + str(self.__face_id) + '.' + str(self.__count) + '.jpg',\r\n gray[y: y + h, x: x + w])\r\n i += 1\r\n # 显示图片\r\n cv2.imshow('image', img)\r\n\r\n def __write_name_data(self, face_name):\r\n with open(self.__user_name_path, \"r\") as f: # 以读形式打开user_name\r\n list_name = [item for item in f] # 将user_name内的名字添加到列表中\r\n with open(self.__user_name_path, \"a\") as f:\r\n # 如果识别者信息之前不存在则添加\r\n if face_name + \"\\n\" not in list_name and face_name is not \"\":\r\n f.write(face_name + \"\\n\")\r\n self.__sign = True # 修改标志\r\n self.___get_id(face_name, list_name) # 得到识别者id\r\n\r\n def ___get_id(self, face_name, list_name):\r\n \"\"\"\r\n 获取识别者信息\r\n :param face_name:识别者姓名\r\n :param list_name: 姓名列表\r\n \"\"\"\r\n self.__face_id = 0\r\n for item in list_name:\r\n if item == face_name:\r\n break\r\n else:\r\n self.__face_id += 1\r\n\r\n\r\nclass FaceRecognitionTrainer:\r\n \"\"\"\r\n 人脸识别的训练者\r\n \"\"\"\r\n\r\n def __init__(self):\r\n # 创建级联分类器\r\n self.__detector = cv2.CascadeClassifier(\"./cascade/haarcascades/haarcascade_frontalface_default.xml\")\r\n\r\n def trainer(self):\r\n \"\"\"\r\n 训练已有信息\r\n \"\"\"\r\n self.__recognizer = cv2.face.LBPHFaceRecognizer_create()\r\n print(\"\"\"\r\n =-训练需要一定时间,请耐心等待······-=\"\"\")\r\n faces, ids = self.__getImagesAndLabels()\r\n # 训练数据\r\n self.__recognizer.train(faces, np.array(ids))\r\n # 训练结果以yml形式文件保存\r\n self.__recognizer.write(r'face_trainer/trainer.yml')\r\n print(\"\"\"\r\n =-有{0}位使用者信息已经被训练-=\"\"\".format(len(np.unique(ids))))\r\n\r\n def __getImagesAndLabels(self):\r\n \"\"\"\r\n 得到图片和标签\r\n :return: 含有图片信息和标签的列表\r\n \"\"\"\r\n # 得到所有图片的路径\r\n imagePaths = [os.path.join('Facedata', f) for f in os.listdir('Facedata')]\r\n # 创建列表用于存储图片信息\r\n faceSamples = []\r\n # 用于存储图片id信息\r\n ids = []\r\n self.__add_img_id(faceSamples, ids, imagePaths)\r\n # 返回图片信息和图片id\r\n return faceSamples, ids\r\n\r\n def __add_img_id(self, faceSamples, ids, imagePaths):\r\n \"\"\"\r\n :param faceSamples: 储存图片信息的列表\r\n :param ids: 标签\r\n :param imagePaths: 图片路径\r\n \"\"\"\r\n for imagePath in imagePaths:\r\n # 得到灰度图像\r\n PIL_img = Image.open(imagePath).convert('L')\r\n # 将图片类型转化为 unin8\r\n img_numpy = np.array(PIL_img, 'uint8')\r\n # 得到id\r\n id = int(os.path.split(imagePath)[-1].split(\".\")[1])\r\n # 得到图像矩阵\r\n faces = self.__detector.detectMultiScale(img_numpy)\r\n self.__add_message(faceSamples, faces, id, ids, img_numpy) # 将图片信息分别添加到列表中\r\n\r\n def __add_message(self, faceSamples, faces, id, ids, img_numpy):\r\n \"\"\"\r\n 添加信息\r\n :param faceSamples:储存图片信息的列表\r\n :param faces: 图像矩阵\r\n :param id: 标签\r\n :param ids: 标签列表\r\n :param img_numpy: 灰度图片\r\n \"\"\"\r\n for (x, y, w, h) in faces:\r\n # 添加图片信息\r\n faceSamples.append(img_numpy[y:y + h, x: x + w])\r\n # 添加id信息\r\n ids.append(id)\r\n\r\n\r\nclass FaceRecognitionRecognizer:\r\n \"\"\"\r\n 人脸识别的识别者\r\n \"\"\"\r\n\r\n\r\n def __init__(self):\r\n self.__cascadePath = \"./cascade/haarcascades/haarcascade_frontalface_default.xml\"\r\n # 定义字体\r\n self.__font = ImageFont.truetype('simhei.ttf', 30, encoding='utf-8')\r\n def recognizer(self,user_name = None):\r\n # 初始化字典\r\n self.initialize_dict()\r\n # 创建人脸识别者\r\n self.__recognizer = cv2.face.LBPHFaceRecognizer_create()\r\n # 读取训练数据\r\n self.__recognizer.read('face_trainer/trainer.yml')\r\n # 创建级联分类器\r\n faceCascade = cv2.CascadeClassifier(self.__cascadePath)\r\n # 创建列表存储使用者姓名\r\n names = self.__read_user_name()\r\n # 打开摄像头\r\n cap = cv2.VideoCapture(0)\r\n minW = 0.1 * cap.get(3)\r\n minH = 0.1 * cap.get(4)\r\n # 进行人脸识别\r\n self.__discern(cap, faceCascade, minH, minW, names,user_name)\r\n # 关闭摄像头\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n def __discern(self, cap, faceCascade, minH, minW, names,user_name = None):\r\n while True:\r\n # 分帧数返回图片\r\n ret, img = cap.read()\r\n # 将图片转化为灰度图片\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # 返回图像矩阵\r\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH)))\r\n for (x, y, w, h) in faces:\r\n # 返回预测id和置信度\r\n idnum, confidence = self.__get_id_and_confidence(gray, img, names, x, y, w, h)\r\n self.__get_idnum_count(idnum,user_name)\r\n # 将opencv图像格式转换成PIL格式, 数据类型是PIL.Image.Image\r\n img = self.__switch_img(confidence, h, idnum, img, x, y)\r\n if user_name is not None:\r\n if self.dict_user_name[user_name] > 50:\r\n return\r\n # 显示图像\r\n cv2.imshow('camera', img)\r\n k = cv2.waitKey(10)\r\n if k == 27: # 如果用户按下ese则退出\r\n break\r\n\r\n\r\n\r\n def __get_id_and_confidence(self, gray, img, names, x, y, w, h):\r\n \"\"\"\r\n 得到id和置信度\r\n :param faces: 图像矩阵\r\n :param gray: 灰度图片\r\n :param img: 图片\r\n :param names: 使用者姓名列表\r\n :return: 处理后的图片\r\n \"\"\"\r\n\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n # 调用预测函数返回预测id和置信度\r\n idnum, confidence = self.__recognizer.predict(gray[y:y + h, x:x + w])\r\n if confidence < 100:\r\n idnum = names[idnum]\r\n confidence = \"{0}%\".format(round(2*(100 - confidence)))\r\n else:\r\n idnum = \"unknown\\n\"\r\n confidence = \"{0}%\".format(round(2*(100 - confidence)))\r\n return idnum, confidence\r\n\r\n def __switch_img(self, confidence, h, idnum, img, x, y):\r\n # 将图片转化为PIL形式\r\n img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\r\n draw = ImageDraw.Draw(img_PIL)\r\n # 将需要的信息显示在图片上\r\n draw.text((x + 5, y - 5), str(idnum)[:-1], font=self.__font)\r\n draw.text((x + 5, y + h - 5), str(confidence), font=self.__font)\r\n # 将图片转化回cv2格式\r\n img = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)\r\n return img\r\n\r\n def __read_user_name(self):\r\n # 读取user_name文件,得到names列表\r\n with open(\"./message/user_name\", \"r\") as f:\r\n names = [item for item in f]\r\n return names\r\n\r\n def __get_idnum_count(self,idnum,user_name):\r\n \"\"\"\r\n 记录扫描正确的次数,用于登录\r\n :param idnum: 预测的id\r\n :return:\r\n \"\"\"\r\n if idnum[:-1] != \"unknown\":\r\n if idnum[:-1] == user_name:\r\n self.dict_user_name[user_name] += 1\r\n\r\n\r\n\r\n def initialize_dict(self):\r\n self.dict_user_name = {}\r\n with open(\"./message/user_name\") as f:\r\n for item in f:\r\n self.dict_user_name[item[:-1]] = 0\r\n\r\n\r\n","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":11011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"357727145","text":"# hw1 problem 11\n\n'''\nA sentence splitter is a program capable of splitting a text into\nsentences. The standard set of heuristics for sentence splitting\nincludes (but isn't limited to) the following rules:\nSentence boundaries occur at one of \".\" (periods), \"?\" or \"!\", except\nthat\na. Periods followed by whitespace followed by a lower case letter\nare not sentence boundaries.\nb. Periods followed by a digit with no intervening whitespace are\nnot sentence boundaries. \nc. Periods followed by whitespace and then an upper case letter,\nbut preceded by any of a short list of titles are not sentence\nboundaries. Sample titles include Mr., Mrs., Dr., and so on.\nd. Periods internal to a sequence of letters with no adjacent\nwhitespace are not sentence boundaries (for example,\nwww.aptex.com, or e.g).\ne. Periods followed by certain kinds of punctuation (notably comma\nand more periods) are probably not sentence boundaries.\nYour task here is to write a program that given the name of a text\nfile is able to write its content with each sentence on a separate\nline. Test your program with given_text.txt.\n'''\n\nimport re\n\ndef sentence_splitter(file_name):\n file = open(file_name, 'r')\n text = file.read()\n\n # We first remove the newlines that were already there \n # by subtituting \\n with an empty string.\n sentences = re.sub(r'\\n', '', text)\n\n # Now we add a newline after each period only if that period is not\n # preceded by 'Mr', 'Mrs' or 'Dr' and is followed by a space and an\n # uppercase letter\n sentences = re.sub(r'(? eps]\n\n\ndef get_change_points(Theta, eps, T=None, P=None):\n # calculate histogram of change points of T adjacency matrices\n T = T or Theta.shape[0]\n P = P or Theta.shape[1]\n # difference between consecutive adjacency matrices\n Delta_Theta = np.diff(Theta, axis=0)\n return [len(get_edges(G, eps, P)) for G in Delta_Theta]\n\n\ndef plot_data_with_cps(data, cps, ymin, ymax):\n plt.plot(data, alpha=0.5)\n for cp in cps:\n plt.plot([cp, cp], [ymin, ymax], 'k-')\n plt.axis([0, len(data), ymin, ymax], 'k-')\n plt.show()\n\n\n# BELOW IS UNTESTED\n# def evalFit(Theta, X):\n# \"\"\" Reports model fit chacteristics of a given estimated dynamic\n# graphical model.\n\n# Inputs:\n# Theta -- Sparse estimate of precision\n# X -- raw data\n\n# Outputs:\n# Lt -- vector of likelihood for each timepoint\n# bic -- complexity adjusted measure of estimation performance\n# sparsity -- vector of solution sparsity (for each timepoint)\n# \"\"\"\n\n# T = Theta.shape[0]\n# P = Theta.shape[1]\n\n# S = np.zeros((T, P, P))\n# # Init metrics, track for each time-point\n# bic = sparsity = np.zeros(T)\n# for t in range(0, T):\n# sparsity[t] = get_dof(Theta, thresh)\n# # Single sample outer product ala empirical covariance\n# S[t] = np.linalg.outer(X[t, :], X[t, :])\n\n# Lt = getLike(Theta, S)\n\n# # This may work with a moving average smoother\n# # but needs to be updated to take into account whole dataset\n# # for t in range(0, T):\n# # According to standard BIC\n# # bic[t] = (-(2 * Lt) + (sparsity[t] *\n# # np.log(2 * M + 1)))\n\n# return (Lt, bic, sparsity)\n\n# def getLike(Theta, S, thresh=0.00001):\n# \"\"\" Finds likelihood and risk of estimated covariance given a set of\n# empirical (unregularised) covariance matrices\"\"\"\n\n# # A threshold for counting sparsity\n# T = Theta.shape[0]\n# Lt = np.zeros(T)\n\n# # I think this is correct up to a factor of 2\n# for t in range(0, T):\n# # The likelihood is calculated at each time point\n# Lt[t] = np.log(np.linalg.det(Theta[t])) - np.trace(\n# np.dot(Theta[t], S[t]))\n\n# return Lt\n\n# def get_dof(Theta, thresh, P=None):\n# \"\"\" This works, checked (28/3/2017)\n# get edges of adjacency matrix Theta\n# Can probably just use len(get_edges(Theta))?\n# \"\"\"\n\n# P = P or Theta.shape[0]\n\n# count = 0\n# for i in range(P - 1):\n# for j in range(i + 1, P):\n# if Theta[i, j] > thresh:\n# count = count + 1\n \n# #Count diagonals\n# count = count + P\n\n# return count","sub_path":"graphtime/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"627361148","text":"from ixnetwork_restpy.base import Base\nfrom ixnetwork_restpy.files import Files\n\n\nclass L2VPNFrameRelayCW(Base):\n __slots__ = ()\n _SDM_NAME = \"l2VPNFrameRelayCW\"\n _SDM_ATT_MAP = {\n \"ControlWordReserved\": \"l2VPNFrameRelayCW.controlWord.reserved-1\",\n \"ControlWordBbit\": \"l2VPNFrameRelayCW.controlWord.bbit-2\",\n \"ControlWordFbit\": \"l2VPNFrameRelayCW.controlWord.fbit-3\",\n \"ControlWordDbit\": \"l2VPNFrameRelayCW.controlWord.dbit-4\",\n \"ControlWordCbit\": \"l2VPNFrameRelayCW.controlWord.cbit-5\",\n \"ControlWordZero\": \"l2VPNFrameRelayCW.controlWord.zero-6\",\n \"ControlWordLength\": \"l2VPNFrameRelayCW.controlWord.length-7\",\n \"ControlWordSequenceNumber\": \"l2VPNFrameRelayCW.controlWord.sequenceNumber-8\",\n }\n\n def __init__(self, parent, list_op=False):\n super(L2VPNFrameRelayCW, self).__init__(parent, list_op)\n\n @property\n def ControlWordReserved(self):\n \"\"\"\n Display Name: CW Rsvd\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordReserved\"])\n )\n\n @property\n def ControlWordBbit(self):\n \"\"\"\n Display Name: CW B Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordBbit\"])\n )\n\n @property\n def ControlWordFbit(self):\n \"\"\"\n Display Name: CW F Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordFbit\"])\n )\n\n @property\n def ControlWordDbit(self):\n \"\"\"\n Display Name: CW D Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordDbit\"])\n )\n\n @property\n def ControlWordCbit(self):\n \"\"\"\n Display Name: CW C Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordCbit\"])\n )\n\n @property\n def ControlWordZero(self):\n \"\"\"\n Display Name: CW Zero\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordZero\"])\n )\n\n @property\n def ControlWordLength(self):\n \"\"\"\n Display Name: CW Length\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordLength\"])\n )\n\n @property\n def ControlWordSequenceNumber(self):\n \"\"\"\n Display Name: CW Sequence Number\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordSequenceNumber\"])\n )\n\n def add(self):\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))\n","sub_path":"ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/l2VPNFrameRelayCW_template.py","file_name":"l2VPNFrameRelayCW_template.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"20605414","text":"#!/usr/bin/env python\n'''\nProject: pageSwitch.py\nAuthor: Spencer Rathbun\nDate: 8/25/2011\n\nSummary: Add commands to switch trays on the heidelberg printers. Note that using the Kodak print file downloader will append a command to the header that causes the printer to ignore tray switching commands!\n\n'''\nimport argparse, os\nfrom glob import glob\ndef main(**kwargs):\n\tpage = False\n\tchecksOnPage = False\n\tlinesOnPage = []\n\n\tfor f in kwargs['infile']:\n\t\tfor myfile in glob(f):\n\t\t\toutput = open(os.path.splitext(myfile)[0]+kwargs['out'], 'wb')\n\t\t\twith open(myfile, 'rb') as df:\n\t\t\t\tfor line in df:\n\t\t\t\t\tif line.find('%%Page:') != -1:\n\t\t\t\t\t\tpage = True\n\t\t\t\t\telif line.find('%%PageTrailer') != -1:\n\t\t\t\t\t\tpage = False\n\n\t\t\t\t\tif page:\n\t\t\t\t\t\tif line.find('%%BeginBinary:') != -1:\n\t\t\t\t\t\t\tchecksOnPage = True\n\t\t\t\t\t\tlinesOnPage.append(line)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif linesOnPage:\n\t\t\t\t\t\t\t'''letterhead is used for standard pages, but check pages are printed on plain'''\n\t\t\t\t\t\t\tif checksOnPage:\n\t\t\t\t\t\t\t\tlinesOnPage.insert(linesOnPage.index(\"%%BeginPageSetup\\r\\n\")+1, \"<< /MediaColor (white) /MediaType (plain)>> setpagedevice\\r\\n\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlinesOnPage.insert(linesOnPage.index(\"%%BeginPageSetup\\r\\n\")+1, \"<< /MediaColor (white) /MediaType (letterhead)>> setpagedevice\\r\\n\")\n\t\t\t\t\t\t\tfor entry in linesOnPage:\n\t\t\t\t\t\t\t\toutput.write(entry)\n\t\t\t\t\t\t\tlinesOnPage = []\n\t\t\t\t\t\t\tchecksOnPage = False\n\t\t\t\t\t\toutput.write(line)\n\t\t\toutput.close()\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description='Parse over a postscript file and set the MediaType for each page.', version='%(prog)s 1.1')\n\tparser.add_argument('infile', nargs='+', type=str, help='input file')\n\tparser.add_argument('--out', type=str, default='_tray_switch.ps', help='name of output file')\n\targs = parser.parse_args()\n\tmain(**vars(args))\n","sub_path":"pageSwitch.py","file_name":"pageSwitch.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"303977046","text":"from __future__ import unicode_literals\n\nfrom django.test import override_settings\n\nfrom rest_framework import status\n\nfrom mayan.apps.documents.tests import DocumentTestMixin, TEST_HYBRID_DOCUMENT\nfrom mayan.apps.rest_api.tests import BaseAPITestCase\n\nfrom ..permissions import permission_content_view\n\nfrom .literals import TEST_DOCUMENT_CONTENT\n\n\n@override_settings(DOCUMENT_PARSING_AUTO_PARSING=True)\nclass DocumentParsingAPITestCase(DocumentTestMixin, BaseAPITestCase):\n test_document_filename = TEST_HYBRID_DOCUMENT\n\n def _request_document_page_content_view(self):\n return self.get(\n viewname='rest_api:document-page-content-view', kwargs={\n 'document_pk': self.test_document.pk,\n 'version_pk': self.test_document.latest_version.pk,\n 'page_pk': self.test_document.latest_version.pages.first().pk\n }\n )\n\n def test_get_document_version_page_content_no_access(self):\n response = self._request_document_page_content_view()\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_get_document_version_page_content_with_access(self):\n self.grant_access(\n permission=permission_content_view, obj=self.test_document\n )\n\n response = self._request_document_page_content_view()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertTrue(\n TEST_DOCUMENT_CONTENT in response.data['content']\n )\n","sub_path":"mayan/apps/document_parsing/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"188720988","text":"# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\nfrom pages import MainPage, AddReviewPage, ReviewPage\nfrom tests.asserts import CustomAssertions\nfrom tests.base import BaseTestCase\nfrom tests.components import RatingsBlock\nfrom tests.utils import wait_url_ends_with, wait_text_change, login\n\n\nclass LoginTest(BaseTestCase):\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n self.page = MainPage(self.driver)\n self.page.open()\n self.page.login(self.LOGIN, self.PASSWORD)\n\n menu_bar = self.page.menu_bar\n self.assertEqual(menu_bar.email_value, self.LOGIN)\n\n def tearDown(self):\n try:\n self.page.logout()\n finally:\n self.driver.quit()\n\n\nclass LogoutTest(BaseTestCase, CustomAssertions):\n def setUp(self):\n self.create_driver()\n self.page = MainPage(self.driver)\n self.page.open()\n self.page.login(self.LOGIN, self.PASSWORD)\n\n def test(self):\n self.page.logout()\n self.assertElementExists(self.driver, self.page.menu_bar.OPEN_LOGIN_FORM_BUTTON_XPATH)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass AverageRatingTest(BaseTestCase):\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n ratings = [\n {\"name\": RatingsBlock.DESIGN_RATING_NAME, \"rating\": 5},\n {\"name\": RatingsBlock.COMFORT_RATING_NAME, \"rating\": 4},\n {\"name\": RatingsBlock.CONTROL_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.ERGONOMICS_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.RELIABILITY_RATING_NAME, \"rating\": 2},\n {\"name\": RatingsBlock.SERVICE_RATING_NAME, \"rating\": 1}\n ]\n\n average_rating = float(sum([x[\"rating\"] for x in ratings])) / float(len(ratings))\n\n self.page.set_ratings(ratings)\n wait_text_change(self.driver, self.page.ratings.AVERAGE_RATING_XPATH)\n self.assertAlmostEqual(average_rating, self.page.ratings.average_rating, places=1)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass AddReviewErrorsTest(BaseTestCase):\n\n # Not full Car ratings\n RATINGS = [\n {\"name\": RatingsBlock.DESIGN_RATING_NAME, \"rating\": 5},\n {\"name\": RatingsBlock.COMFORT_RATING_NAME, \"rating\": 4},\n {\"name\": RatingsBlock.CONTROL_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.ERGONOMICS_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.RELIABILITY_RATING_NAME, \"rating\": 2},\n {\"name\": RatingsBlock.SERVICE_RATING_NAME, \"rating\": 2}\n ]\n\n # Car options\n BRAND = \"Audi\"\n MODEL = \"100\"\n YEAR = \"1996\"\n MODIFICATION = \"1.6 AT\"\n RUN_CURRENT = \"400\"\n\n ADVANTAGES_TEXT = \"Advantages\" * 40\n COMMON_TEXT = \"Common\" * 40\n PROBLEMS_TEXT = \"Problems\" * 40\n\n def setUp(self):\n self.create_driver()\n login(self.driver, self.LOGIN, self.PASSWORD)\n wait_url_ends_with(self.driver, \"/?from=authpopup\")\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def testRatings(self):\n self.page.set_ratings(self.RATINGS[:-1])\n self.page.add_review()\n self.assertFalse(self.page.ratings.is_rating_valid(\"Обслуживание и ремонт\"))\n\n self.page.set_ratings([self.RATINGS[-1]])\n self.page.add_review()\n self.assertTrue(self.page.ratings.is_all_ratings_valid())\n\n def testCarParams(self):\n options = OrderedDict([('Марка', self.BRAND),\n ('Модель', self.MODEL),\n ('Год производства', self.YEAR)])\n self.page.select_car_options(options)\n self.page.add_review()\n self.assertTrue(self.page.car_select.is_option_invalid('Модификация'))\n self.assertTrue(self.page.car_select.is_option_invalid('Кузов'))\n self.assertTrue(self.page.car_select.is_option_invalid('Объем двигателя'))\n self.assertTrue(self.page.car_select.is_option_invalid('КПП'))\n\n def testCurrentRun(self):\n self.page.add_review()\n self.assertTrue(self.page.car_select.is_run_current_invalid())\n\n self.page.set_run_current(\"123\")\n self.page.add_review()\n self.assertFalse(self.page.car_select.is_run_current_invalid())\n\n def testTextReview(self):\n self.page.add_review()\n self.assertTrue(self.page.review_inputs.is_advantages_field_invalid())\n self.assertTrue(self.page.review_inputs.is_problems_invalid())\n self.assertTrue(self.page.review_inputs.is_common_field_invalid())\n\n self.page.review_inputs.set_common_text(self.COMMON_TEXT)\n self.page.review_inputs.set_advantages_text(self.ADVANTAGES_TEXT)\n self.page.review_inputs.set_problems_text(self.PROBLEMS_TEXT)\n\n self.page.add_review()\n self.assertFalse(self.page.review_inputs.is_advantages_field_invalid())\n self.assertFalse(self.page.review_inputs.is_problems_invalid())\n self.assertFalse(self.page.review_inputs.is_common_field_invalid())\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass CarSelectionTest(BaseTestCase):\n BRAND = \"Audi\"\n MODEL = \"100\"\n YEAR = \"1996\"\n MODIFICATION = \"1.6 AT\"\n RUN_CURRENT = \"123321\"\n RESULT_CURRENT = \"123 321\"\n\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n options = OrderedDict([(\"Марка\", self.BRAND),\n (\"Модель\", self.MODEL),\n (\"Год производства\", self.YEAR),\n (\"Модификация\", self.MODIFICATION)])\n\n self.page.select_car_options(options)\n self.page.set_run_current(self.RUN_CURRENT)\n\n select = self.page.car_select\n\n self.assertEqual(self.BRAND, select.get_current_value(\"Марка\"))\n self.assertEqual(self.MODEL, select.get_current_value(\"Модель\"))\n self.assertEqual(self.YEAR, select.get_current_value(\"Год производства\"))\n self.assertEqual(self.MODIFICATION, select.get_current_value(\"Модификация\"))\n self.assertEqual(self.RESULT_CURRENT, select.run_current)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass ReviewTextInputTest(BaseTestCase):\n ADVANTAGES_TEXT = \"Advantages\" * 40\n COMMON_TEXT = \"Common\" * 40\n PROBLEMS_TEXT = \"Problems\" * 40\n\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n reviews = self.page.review_inputs\n\n self.page.set_texts(self.COMMON_TEXT, self.ADVANTAGES_TEXT, self.PROBLEMS_TEXT)\n self.assertEqual(self.COMMON_TEXT, reviews.common_text)\n self.assertEqual(self.ADVANTAGES_TEXT, reviews.advantages_text)\n self.assertEqual(self.PROBLEMS_TEXT, reviews.problems_text)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass AddReviewTest(BaseTestCase):\n # Car text review\n ADVANTAGES_TEXT = \"Advantages\" * 40\n COMMON_TEXT = \"Common\" * 40\n PROBLEMS_TEXT = \"Problems\" * 40\n\n # Car ratings\n RATINGS = [\n {\"name\": RatingsBlock.DESIGN_RATING_NAME, \"rating\": 5},\n {\"name\": RatingsBlock.COMFORT_RATING_NAME, \"rating\": 4},\n {\"name\": RatingsBlock.CONTROL_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.ERGONOMICS_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.RELIABILITY_RATING_NAME, \"rating\": 2},\n {\"name\": RatingsBlock.SERVICE_RATING_NAME, \"rating\": 2}\n ]\n\n # Car options\n BRAND = \"Audi\"\n MODEL = \"100\"\n YEAR = \"1996\"\n MODIFICATION = \"1.6 AT\"\n RUN_CURRENT = \"400\"\n\n REVIEW_TITLE = BRAND + \" \" + MODEL + \" \" + MODIFICATION + \" \" + YEAR + u\" г.\"\n\n def setUp(self):\n self.create_driver()\n login(self.driver, self.LOGIN, self.PASSWORD)\n wait_url_ends_with(self.driver, \"/?from=authpopup\")\n self.add_review_page = AddReviewPage(self.driver)\n self.add_review_page.open()\n\n def test(self):\n self.add_review_page.set_ratings(self.RATINGS)\n\n options = OrderedDict([(\"Марка\", self.BRAND),\n (\"Модель\", self.MODEL),\n (\"Год производства\", self.YEAR),\n (\"Модификация\", self.MODIFICATION)])\n\n self.add_review_page.select_car_options(options)\n self.add_review_page.car_select.wait_option_enabled(\"Привод\")\n self.add_review_page.set_run_current(self.RUN_CURRENT)\n self.add_review_page.set_texts(self.COMMON_TEXT, self.ADVANTAGES_TEXT, self.PROBLEMS_TEXT)\n\n self.add_review_page.add_review()\n self.add_review_page.show_review()\n\n self.review_page = ReviewPage(self.driver)\n average_rating = round(float(sum([x[\"rating\"] for x in self.RATINGS])) / float(len(self.RATINGS)), 1)\n self.assertEqual(average_rating, self.review_page.review_avg_rating)\n self.assertEqual(self.RUN_CURRENT, self.review_page.run_current)\n self.assertEqual(self.REVIEW_TITLE, self.review_page.review_title)\n self.assertEquals(self.COMMON_TEXT, self.review_page.review_text.common_text)\n self.assertEquals(self.ADVANTAGES_TEXT, self.review_page.review_text.advantages_text)\n self.assertEquals(self.PROBLEMS_TEXT, self.review_page.review_text.problems_text)\n\n def tearDown(self):\n try:\n self.review_page.remove_review()\n self.review_page.logout()\n finally:\n self.driver.quit()\n","sub_path":"tests/review_test.py","file_name":"review_test.py","file_ext":"py","file_size_in_byte":9841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"44487025","text":"import sys, socket, select, time\nfrom optparse import OptionParser\n\nXBOX_PORT = 5050\nXBOX_PING = \"dd00000a000000000000000400000002\"\nXBOX_POWER = \"dd02001300000010\"\n\nhelp_text = \"xbox-remote-power.py -a -i \"\n\npy3 = sys.version_info[0] > 2\n\ndef main():\n parser = OptionParser()\n parser.add_option('-a', '--address', dest='ip_addr', help=\"IP Address of Xbox One\", default='')\n parser.add_option('-i', '--id', dest='live_id', help=\"Live ID of Xbox One\", default='')\n (opts, args) = parser.parse_args()\n \n if not opts.ip_addr:\n opts.ip_addr = user_input(\"Enter the IP address: \")\n\n ping = False\n if not opts.live_id:\n print(\"No Live ID given, do you want to attempt to ping the Xbox for it?\")\n result = \"\"\n while result not in (\"y\", \"n\"):\n result = user_input(\"(y/n): \").lower()\n if result == \"y\":\n ping = True\n elif result == \"n\":\n opts.live_id = user_input(\"Enter the Live ID: \")\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setblocking(0)\n s.bind((\"\", 0))\n s.connect((opts.ip_addr, XBOX_PORT))\n\n if ping:\n print(\"Attempting to ping Xbox for Live ID...\")\n s.send(bytearray.fromhex(XBOX_PING))\n\n ready = select.select([s], [], [], 5)\n if ready[0]:\n data = s.recv(1024)\n opts.live_id = data[199:215]\n else:\n print(\"Failed to ping Xbox, please enter Live ID manually\")\n opts.live_id = user_input(\"Enter the Live ID: \")\n\n if isinstance(opts.live_id, str):\n live_id = opts.live_id.encode()\n else:\n live_id = opts.live_id\n\n power_packet = bytearray.fromhex(XBOX_POWER) + live_id + b'\\x00'\n print(\"Sending power on packets to \" + opts.ip_addr)\n for i in range(0, 5):\n s.send(power_packet)\n time.sleep(1)\n print(\"Xbox should turn on now\")\n\n s.send(bytearray.fromhex(XBOX_PING))\n ready = select.select([s], [], [], 5)\n if ready[0]:\n data = s.recv(1024)\n opts.live_id = data[199:215]\n print(\"Ping successful!\")\n print(\"Live ID = \" + live_id.decode(\"utf-8\"))\n print(\"\")\n print(\"******************************************\")\n print(\"* Xbox running - Streaming now possible! *\")\n print(\"******************************************\")\n print(\"\")\n else:\n print(\"Failed to ping Xbox - please try again! :(\")\n print(\"\")\n \n s.close()\n\ndef user_input(text):\n response = \"\"\n\n while response == \"\":\n if py3:\n response = input(text)\n else:\n response = raw_input(text)\n\n return response\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"xbox-remote-power.py","file_name":"xbox-remote-power.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"53863609","text":"import unittest\nimport time\n\nfrom django.test import LiveServerTestCase\nfrom selenium import webdriver\n\n\nMAX_WAIT = 10\n\n\nclass NewVisitorTest(LiveServerTestCase):\n\n def setUp(self):\n self.browser = webdriver.Firefox()\n\n def tearDown(self):\n self.browser.quit()\n\n def test_list_of_forms_loads(self):\n # User goes to url page loads.\n\n self.browser.get(self.live_server_url + '/application-forms/')\n self.assertIn('Application Information', self.browser.title)\n\n # User sees a dropdown menu\n drop_down = self.browser.find_element_by_class_name('dropdown')\n self.assertEqual('Select Your Application', drop_down.text)\n\n # User presses button and 4 dropdown items appear\n expected_items = ['flow measurement', 'level measurement',\n 'pressure','temperature', 'valves']\n\n drop_down_items = self.browser.find_elements_by_class_name('dropdown-item')\n drop_down_true = all(item.get_attribute('innerHTML').lower() in expected_items for item in drop_down_items)\n self.assertEqual(drop_down_true, True)\n\n # User presses on Level Measurement\n level_item = drop_down_items[1]\n self.assertEqual(level_item.get_attribute('innerHTML').lower(), expected_items[1])\n time.sleep(1)\n drop_down.click()\n time.sleep(1)\n level_item.click()\n time.sleep(1)\n\n # User is redirected to new page\n self.assertEqual(self.browser.current_url, self.live_server_url + '/application-forms/level/')\n\n\n # User\n","sub_path":"functional_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"111856483","text":"# -*- coding: utf-8 -*-\n\"\"\"Module providing caravan site details editor\"\"\"\n\nfrom Acquisition import aq_inner\nfrom five import grok\nfrom plone import api\nfrom plone.directives import form\nfrom z3c.form import button\nfrom zope.component import getUtility\n\nfrom rms.caravansites.interfaces import ICaravanSiteDetails\nfrom rms.caravansites.tool import ICaravanSiteTool\nfrom rms.membership.workspace import IWorkspace\n\nfrom rms.caravansites import MessageFactory as _\n\n\nclass DetailsEditor(form.SchemaEditForm):\n grok.context(IWorkspace)\n grok.require('cmf.ModifyPortalContent')\n grok.name('details-editor')\n\n schema = ICaravanSiteDetails\n ignoreContext = False\n css_class = 'app-ws-form ws-form'\n label = _(u\"Edit caravansite details\")\n\n @property\n def traverse_subpath(self):\n return self.subpath\n\n def publishTraverse(self, request, name):\n if not hasattr(self, 'subpath'):\n self.subpath = []\n self.subpath.append(name)\n return self\n\n def next_url(self):\n context = aq_inner(self.context)\n return context.absolute_url()\n\n def rvs(self):\n uid = self.traverse_subpath[0]\n item = api.content.get(UID=uid)\n return item\n\n @button.buttonAndHandler(_(u\"Save\"), name=\"save\")\n def handleApply(self, action):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return\n return self.applyChanges(data)\n\n @button.buttonAndHandler(_(u\"cancel\"))\n def handleCancel(self, action):\n msg = _(u\"Image edit has been cancelled.\")\n api.portal.show_message(message=msg, request=self.request)\n return self.request.response.redirect(self.next_url())\n\n def applyChanges(self, data):\n tool = getUtility(ICaravanSiteTool)\n rvs_uid = self.traverse_subpath[0]\n records = tool.read(rvs_uid, key='details')\n for record in records:\n key = record['id']\n try:\n record['value'] = data[key]\n except KeyError:\n continue\n tool.update(rvs_uid, records, key='details')\n msg = _(u\"The caravan site has successfully been updated\")\n api.portal.show_message(message=msg, request=self.request)\n return self.request.response.redirect(self.next_url())\n\n def getContent(self):\n uid = self.traverse_subpath[0]\n tool = getUtility(ICaravanSiteTool)\n record = tool.read(uid, key='details')\n data = {}\n for item in record:\n key = item['id']\n data[key] = item['value']\n return data\n","sub_path":"src/rms.caravansites/rms/caravansites/details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"206189268","text":"import math\nlst = [2, 4, 9, 16, 25]\nnew_lst = []\nfor i in lst:\n new_lst.append(math.sqrt(i))\nprint(new_lst)\n\nnew_list2 = list(map(math.sqrt, lst))\nprint(new_list2)\n\nnew_list3 = [math.sqrt(i) for i in lst]\nprint(new_list3)\n","sub_path":"62-new_list.py","file_name":"62-new_list.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"355969581","text":"import numpy as np\nsequence=[]\nelectrodes=range(1,65)\nelectrodes=np.reshape(electrodes,(8,8))\nprint(electrodes)\n# GRADIENT\n# vertical and horizontal\nfor i in range (8):\n a = electrodes[i,0]\n b = electrodes[i,-1]\n for j in range (1,6):\n m = electrodes[i,j]\n n = electrodes[i,j+1]\n quadripole = [a,b,m,n]\n sequence.append(quadripole) \nfor i in range (8):\n a = electrodes[0,i]\n b = electrodes[-1,i]\n for j in range (1,6):\n m = electrodes [j,i]\n n = electrodes [j+1,i]\n quadripole = [a,b,m,n]\n sequence.append(quadripole)\n\n# diagonals\nfor i in range (-4,5):\n diagonal = np.diag(electrodes,k=i)\n print(diagonal)\n diagonal_NumElec = len(diagonal)\n a = diagonal[0]\n b = diagonal[-1]\n for j in range (1,diagonal_NumElec-2):\n quadripole = [a,b,diagonal[j],diagonal[j+1]]\n sequence.append(quadripole)\n \nelectrodes_flip = np.fliplr(electrodes) # flip electrodes matrix to get other diagonals\nprint(electrodes_flip)\n\nfor i in range (-4,5):\n diagonal = np.diag(electrodes_flip,k=i)\n print(diagonal)\n diagonal_NumElec = len(diagonal)\n a = diagonal[0]\n b = diagonal[-1]\n print(diagonal)\n for j in range (1,diagonal_NumElec-2):\n quadripole = [a,b,diagonal[j],diagonal[j+1]]\n sequence.append(quadripole)\n\n\"\"\" WENNER \"\"\"\n\n#Hoz\nfor i in range(8):\n line = electrodes[i,:]\n print(\"hor\",i,line)\n for j in range(5):\n quadripole =[line[j],line[j+3],line[j+1],line[j+2]]\n sequence.append(quadripole)\n \n \n\n#Ver\nfor i in range(8):\n line = electrodes[:,i]\n print(\"vert\",i,line)\n for j in range(5):\n quadripole =[line[j],line[j+3],line[j+1],line[j+2]]\n sequence.append(quadripole)\n\n#diagonals\nfor i in range(-3,4): # skipping shortest diagonals (already in Grad) \n diagonal = np.diag(electrodes,k=i)\n print(\"W_diag\",i,diagonal)\n diagonal_NumElec = len(diagonal)\n for j in range(diagonal_NumElec-3):\n quadripole = [diagonal[j],diagonal[j+3],diagonal[j+1],diagonal[j+2]]\n sequence.append(quadripole)\nfor i in range(-3,4):\n diagonal = np.diag(electrodes_flip,k=i)\n print(\"W_diag_flip\",i,diagonal)\n diagonal_NumElec = len(diagonal)\n for j in range(diagonal_NumElec-3):\n quadripole = [diagonal[j],diagonal[j+3],diagonal[j+1],diagonal[j+2]]\n sequence.append(quadripole) \n\nSeqDir = np.array(sequence)\nAd, Bd, Md, Nd = SeqDir[:,0], SeqDir[:,1], SeqDir[:,2], SeqDir[:,3]\nSeqRec = np.column_stack((Md, Nd, Ad, Bd))\nSeq = np.vstack((SeqDir,SeqRec))\n#A,B,M,N = sequence[:,0],sequence[:,1],sequence[:,2],sequence[:,3]\n#sequenceRec = [M,N,A,B]\nprint(len(Seq))\nnp.savetxt(\"Seq.txt\", Seq, fmt = '%i %i %i %i')\n","sub_path":"sequences/Rhi.py","file_name":"Rhi.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"279005415","text":"import turtle\nimport datetime\n\n#生日快乐\ndef love():\n def func(x, y):\n main()\n # turtle.title('领导专用程序')\n lv=turtle.Turtle()\n lv.hideturtle()\n lv.getscreen().bgcolor('light blue')\n lv.color('yellow','red')\n lv.pensize(1)\n lv.speed(1)\n lv.up()\n lv.goto(0,-150)\n #开始画爱心\n lv.down()\n lv.begin_fill()\n lv.goto(0, -150)\n lv.goto(-175.12, -8.59)\n lv.left(140)\n pos = []\n for i in range(19):\n lv.right(10)\n lv.forward(20)\n pos.append((-lv.pos()[0], lv.pos()[1]))\n for item in pos[::-1]:\n lv.goto(item)\n lv.goto(175.12, -8.59)\n lv.goto(0, -150)\n lv.left(50)\n lv.end_fill()\n #写字\n lv.up()\n lv.goto(0, 80)\n lv.down()\n lv.write(\"胡 可 仪\",font=(u\"方正舒体\",36,\"normal\"),align=\"center\")\n lv.up()\n lv.goto(0, 0)\n lv.down()\n lv.write(\"生日快乐!\",font=(u\"方正舒体\",48,\"normal\"),align=\"center\")\n lv.up()\n lv.goto(100, -210)\n lv.down()\n lv.write(\"点我点我快点我\",font=(u\"华文琥珀\",26,\"bold\"),align=\"right\")\n lv.up()\n lv.goto(160, -190)\n lv.resizemode('user')\n lv.shapesize(4, 4, 10) #调整小乌龟大小,以便覆盖“点我”文字\n lv.color('red', 'red')\n lv.onclick(func)\n lv.showturtle()\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n # if datetime.date.today() == datetime.date(2020, 02, ): # YYYY年,MM月,DD日\n love()\n # else:\n # main()\n","sub_path":"动画/心型动画.py","file_name":"心型动画.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"198733487","text":"INVALID_REQUEST = 'invalid request'\nPERMISSION_DENIED = 'permission denied'\nUNKNOWN = 'unknown'\nINTERNAL_SERVER_ERROR = 'internal error'\nTOO_MANY_REQUESTS = 'too many requests'\nAUTH_ERROR = 'authorization error'\nUNSUPPORTED_TYPE = 'unsupported type'\n\n\nclass CTRBaseError(Exception):\n def __init__(self, code, message, type_='fatal'):\n super().__init__()\n self.code = code or UNKNOWN\n self.message = message or 'Something went wrong.'\n self.type_ = type_\n\n @property\n def json(self):\n return {'type': self.type_,\n 'code': self.code,\n 'message': self.message}\n\n\nclass CTRInvalidJWTError(CTRBaseError):\n def __init__(self):\n super().__init__(\n PERMISSION_DENIED,\n 'Invalid Authorization Bearer JWT.'\n )\n\n\nclass CTRUnexpectedResponseError(CTRBaseError):\n def __init__(self, error):\n if error and error.get('error_description'):\n message = f'Microsoft Defender for Endpoint returned unexpected ' \\\n f'error. Details: {error[\"error_description\"]}'\n else:\n message = 'Something went wrong.'\n\n super().__init__(\n UNKNOWN,\n message=str(message)\n )\n\n\nclass CTRBadRequestError(CTRBaseError):\n def __init__(self, error=None):\n message = 'Invalid request to Microsoft Defender for Endpoint.'\n if error:\n message += f' {error}'\n super().__init__(\n INVALID_REQUEST,\n message\n )\n\n\nclass CTRInternalServerError(CTRBaseError):\n def __init__(self):\n super().__init__(\n INTERNAL_SERVER_ERROR,\n 'Microsoft Defender for Endpoint internal error.'\n )\n\n\nclass CTRTooManyRequestsError(CTRBaseError):\n def __init__(self, error=None):\n if '/advancedqueries/run' in error.url:\n message = f'Advanced Hunting API rate limit has been exceeded. ' \\\n f'{error.json()[\"error\"]}'\n else:\n message = 'Too many requests to Microsoft Defender for Endpoint ' \\\n 'have been made. Please, try again later.'\n super().__init__(\n TOO_MANY_REQUESTS,\n message\n )\n\n\nclass CTRSSLError(CTRBaseError):\n def __init__(self, error):\n error = error.args[0].reason.args[0]\n message = getattr(error, 'verify_message', error.args[0]).capitalize()\n super().__init__(\n UNKNOWN,\n f'Unable to verify SSL certificate: {message}'\n )\n\n\nclass AuthorizationError(CTRBaseError):\n def __init__(self, error):\n\n super().__init__(\n AUTH_ERROR,\n f\"Authorization failed: {error}\"\n )\n\n\nclass UnsupportedTypeError(CTRBaseError):\n def __init__(self, type_):\n\n super().__init__(\n UNSUPPORTED_TYPE,\n f'Unsupported observable type {type_}'\n )\n","sub_path":"api/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"243720616","text":"from django import forms\nfrom localflavor.gb.forms import GBPostcodeField\nfrom aliss.models import DigestSelection, Postcode, Category, ALISSUser\n\nclass DigestSelectionForm(forms.ModelForm):\n class Meta:\n model = DigestSelection\n fields = [\n 'postcode',\n 'category'\n ]\n\n postcode = forms.ModelChoiceField(\n queryset=Postcode.objects.all(),\n to_field_name=\"pk\",\n required=True\n )\n\n category = forms.ModelChoiceField(\n queryset=Category.objects.all(),\n to_field_name=\"slug\",\n required=False\n )\n\n def clean(self):\n cleaned_data = super(DigestSelectionForm, self).clean()\n\n postal_string = cleaned_data.get(\"postcode\")\n category_slug = cleaned_data.get(\"category\")\n\n return cleaned_data\n","sub_path":"aliss/forms/digest_selection.py","file_name":"digest_selection.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"58163087","text":"from otree.api import (\n models,\n widgets,\n BaseConstants,\n BaseSubsession,\n BaseGroup,\n BasePlayer,\n Currency as c,\n currency_range,\n)\n\n\nauthor = 'Philipp Chapkovski, '\n\ndoc = \"\"\"\nSorter app that guarantees the proper matching for further trust game.\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'sorter'\n players_per_group = None\n num_rounds = 1\n\n\nclass Subsession(BaseSubsession):\n @property\n def cities(self):\n return [self.session.config.get('city1'), self.session.config.get('city2')]\n\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n city = models.StringField()\n\n\n","sub_path":"newtrustproj-master/sorter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"395279748","text":"from msilib.schema import File\nimport random\n\n__author__ = 'Jacob'\n\n\ndef is_even(byte):\n \"\"\"\n Checks if byte is binary even\n :param byte: One byte of file loaded to memory\n :return: control bit. if even: 0, else: 1\n \"\"\"\n even_flag = 0\n\n for i in range(0, 8):\n even_flag += (byte >> i) & 1\n\n return even_flag % 2\n\n\ndef get_control_bit(file):\n \"\"\"\n Calculates control bit of file\n :param file: file loaded to memory\n :return: control bit. if even: 0, else: 1\n \"\"\"\n control_bit = 0\n\n for byte in file:\n control_bit += is_even(byte)\n\n return control_bit % 2\n\n\n\n\n\n","sub_path":"lab1/utils/control_bit.py","file_name":"control_bit.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"582373280","text":"from celery import Celery\n\n# cli\n# celery status\n# celery purge\n# celery purge -f\n\n\napp = Celery(broker='amqp://guest@localhost//')\napp = Celery(broker='redis://localhost:6379/0')\n\n\n@app.task()\ndef add(x, y):\n return x + y\n\n\nr = add.delay()\nr = add.apply_async(args=[1, 2], eta=datetime(2014, 6, 12, 0, 0))\nr = add.apply_async(args=[1, 2], countdown=10)\nr = add.apply_async(args=[2, 3], queues='email')\n\n\n\n# inspect\n\nfrom celery.task.control import revoke, inspect, discard_all\n\ni = inspect()\ni.scheduled()\ni.active()\ni.registered()\n\n# revoke task by id\ntask_id = 'foo'\nrevoke(task_id, terminate=True)\nr = add.apply_async(args=[1, 2])\nr.revoke()\n\n\n# run worker from script\nargv = ['worker', '--loglevel=DEBUG']\napp.worker_main(argv)\n\n\n# canvas\n# chain, group, chord\n\n\n# config\nCELERYD_LOG_COLOR = False\n\n# disable prefecthing\nCELERYD_PREFETCH_MULTIPLIER = 1\nCELERYD_CONCURRENCY = 1\nCELERY_ACKS_LATE = True\n\nCELERY_RDB_PORT = 6899\n\n\n# debugging\nrdb.set_trace()\n","sub_path":"python/celery_exp.py","file_name":"celery_exp.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"604757992","text":"import argparse\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nfrom importlib import import_module\r\nfrom pathlib import Path\r\nfrom typing import List, Type\r\n\r\nimport moderngl\r\nfrom moderngl_window.context.base import WindowConfig, BaseWindow\r\nfrom moderngl_window.timers.clock import Timer\r\n\r\nIGNORE_DIRS = [\r\n '__pycache__',\r\n 'base',\r\n]\r\n\r\nOPTIONS_TRUE = ['yes', 'on', 'true', 't', 'y', '1']\r\nOPTIONS_FALSE = ['no', 'off', 'false', 'f', 'n', '0']\r\nOPTIONS_ALL = OPTIONS_TRUE + OPTIONS_FALSE\r\n\r\n\r\nclass ContextRefs:\r\n WINDOW = None\r\n CONTEXT = None\r\n\r\n\r\ndef activate_context(context: moderngl.Context, window: BaseWindow = None):\r\n \"\"\"Set the currently active window\"\"\"\r\n ContextRefs.WINDOW = window\r\n ContextRefs.CONTEXT = context\r\n\r\n\r\ndef window():\r\n \"\"\"Obtain the active window\"\"\"\r\n if ContextRefs.WINDOW:\r\n return ContextRefs.WINDOW\r\n\r\n raise ValueError(\"No active window and context. Call activate_window.\")\r\n\r\n\r\ndef ctx():\r\n \"\"\"Obtain the active context\"\"\"\r\n if ContextRefs.CONTEXT:\r\n return ContextRefs.CONTEXT\r\n\r\n raise ValueError(\"No active window and context. Call activate_window.\")\r\n\r\n\r\ndef run_window_config(config_cls: WindowConfig, timer=None, args=None) -> None:\r\n \"\"\"\r\n Run an WindowConfig entering a blocking main loop\r\n\r\n Args:\r\n config_cls: The WindowConfig class to render\r\n args: Override sys.args\r\n \"\"\"\r\n values = parse_args(args)\r\n window_cls = get_local_window_cls(values.window)\r\n\r\n # Calculate window size\r\n size = values.size or config_cls.window_size\r\n size = size[0] * values.size_mult, size[1] * values.size_mult\r\n\r\n window = window_cls(\r\n title=config_cls.title,\r\n size=size,\r\n fullscreen=values.fullscreen,\r\n resizable=config_cls.resizable,\r\n gl_version=config_cls.gl_version,\r\n aspect_ratio=config_cls.aspect_ratio,\r\n vsync=values.vsync,\r\n samples=values.samples,\r\n cursor=values.cursor,\r\n )\r\n window.print_context_info()\r\n activate_context(window.ctx, window=window)\r\n window.config = config_cls(ctx=window.ctx, wnd=window)\r\n\r\n timer = Timer()\r\n timer.start()\r\n\r\n while not window.is_closing:\r\n current_time, delta = timer.next_frame()\r\n\r\n window.ctx.screen.use()\r\n window.ctx.screen.clear()\r\n window.render(current_time, delta)\r\n window.swap_buffers()\r\n\r\n _, duration = timer.stop()\r\n window.destroy()\r\n print(\"Duration: {0:.2f}s @ {1:.2f} FPS\".format(duration, window.frames / duration))\r\n\r\n\r\ndef get_window_cls(window: str = None) -> Type[BaseWindow]:\r\n \"\"\"\r\n Attept to obtain a window class using the full dotted\r\n python path. This can be used to import custom or modified\r\n window classes.\r\n\r\n Args:\r\n window (str): Name of the window\r\n\r\n Returns:\r\n A reference to the requested window class. Raises exception if not found.\r\n \"\"\"\r\n print(\"Attempting to load window class:\", window)\r\n return import_string(window)\r\n\r\n\r\ndef get_local_window_cls(window: str = None) -> Type[BaseWindow]:\r\n \"\"\"\r\n Attept to obtain a window class in the moderngl_window package\r\n using short window names such as `pyqt5` or `glfw`.\r\n\r\n Args:\r\n window (str): Name of the window\r\n\r\n Returns:\r\n A reference to the requested window class. Raises exception if not found.\r\n \"\"\"\r\n window = os.environ.get('MODERNGL_WINDOW') or window\r\n if not window:\r\n window = 'pyglet'\r\n\r\n return get_window_cls('moderngl_window.context.{}.Window'.format(window))\r\n\r\n\r\ndef parse_args(args=None):\r\n \"\"\"Parse arguments from sys.argv\"\"\"\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\r\n '-wnd', '--window',\r\n choices=find_window_classes(),\r\n help='Name for the window type to use',\r\n )\r\n parser.add_argument(\r\n '-fs', '--fullscreen',\r\n action=\"store_true\",\r\n help='Open the window in fullscreen mode',\r\n )\r\n parser.add_argument(\r\n '-vs', '--vsync',\r\n type=valid_bool,\r\n default=\"1\",\r\n help=\"Enable or disable vsync\",\r\n )\r\n parser.add_argument(\r\n '-s', '--samples',\r\n type=int,\r\n default=4,\r\n help=\"Specify the desired number of samples to use for multisampling\",\r\n )\r\n parser.add_argument(\r\n '-c', '--cursor',\r\n type=valid_bool,\r\n default=\"true\",\r\n help=\"Enable or disable displaying the mouse cursor\",\r\n )\r\n parser.add_argument(\r\n '--size',\r\n type=valid_window_size,\r\n help=\"Window size\",\r\n )\r\n parser.add_argument(\r\n '--size_mult',\r\n type=valid_window_size_multiplier,\r\n default=1.0,\r\n help=\"Multiplier for the window size making it easy scale the window\",\r\n )\r\n\r\n return parser.parse_args(args or sys.argv[1:])\r\n\r\n\r\ndef find_window_classes() -> List[str]:\r\n \"\"\"\r\n Find available window packages\r\n\r\n Returns:\r\n A list of avaialble window packages\r\n \"\"\"\r\n return [\r\n path.parts[-1] for path in Path(__file__).parent.joinpath('context').iterdir()\r\n if path.is_dir() and path.parts[-1] not in IGNORE_DIRS\r\n ]\r\n\r\n\r\ndef import_string(dotted_path):\r\n \"\"\"\r\n Import a dotted module path and return the attribute/class designated by the\r\n last name in the path. Raise ImportError if the import failed.\r\n\r\n Args:\r\n dotted_path: The path to attempt importing\r\n\r\n Returns:\r\n Imported class/attribute\r\n \"\"\"\r\n try:\r\n module_path, class_name = dotted_path.rsplit('.', 1)\r\n except ValueError as err:\r\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\r\n\r\n module = import_module(module_path)\r\n\r\n try:\r\n return getattr(module, class_name)\r\n except AttributeError as err:\r\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\r\n module_path, class_name)) from err\r\n\r\n\r\ndef valid_bool(value):\r\n \"\"\"Validator for bool values\"\"\"\r\n value = value.lower()\r\n\r\n if value in OPTIONS_TRUE:\r\n return True\r\n\r\n if value in OPTIONS_FALSE:\r\n return False\r\n\r\n raise argparse.ArgumentTypeError('Boolean value expected. Options: {}'.format(OPTIONS_ALL))\r\n\r\n\r\ndef valid_window_size(value):\r\n \"\"\"\r\n Validator for window size parameter.\r\n\r\n Valid format is \"[int]x[int]\". For example \"1920x1080\".\r\n \"\"\"\r\n try:\r\n width, height = value.split('x')\r\n return int(width), int(height)\r\n except ValueError:\r\n pass\r\n\r\n raise argparse.ArgumentTypeError(\r\n \"Valid size format: int]x[int]. Example '1920x1080'\",\r\n )\r\n\r\n\r\ndef valid_window_size_multiplier(value):\r\n \"\"\"\r\n Validates window size multiplier\r\n\r\n Must be an integer or float creater than 0\r\n \"\"\"\r\n try:\r\n val = float(value)\r\n if val > 0:\r\n return val\r\n except ValueError:\r\n pass\r\n\r\n raise argparse.ArgumentTypeError(\r\n \"Must be a positive int or float\",\r\n )\r\n","sub_path":"moderngl_window/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"9776235","text":"import uuid\nimport zmq\nimport time\nimport socket\n\nfrom .ast_def.expressions import *\nfrom .passive_object import ExpPassiveObject\nfrom . import global_conf\n\n\nclass ActorConnector:\n actor_id: str\n\n messages_socket: zmq.Socket\n return_socket: zmq.Socket\n write_socket: zmq.Socket\n\n def __init__(self, ):\n self.actor_id = str(uuid.uuid4())\n\n context = zmq.Context()\n self.messages_socket = context.socket(zmq.SUB)\n self.messages_socket.connect(f'tcp://127.0.0.1:{global_conf.env_read_port}')\n self.messages_socket.subscribe(f'message:{self.actor_id}')\n # messages_socket.subscribe('')\n\n self.return_socket = context.socket(zmq.SUB)\n self.return_socket.connect(f'tcp://127.0.0.1:{global_conf.env_read_port}')\n self.return_socket.subscribe(f'return:{self.actor_id}')\n\n self.write_socket = context.socket(zmq.PUB)\n self.write_socket.connect(f'tcp://127.0.0.1:{global_conf.env_write_port}')\n\n time.sleep(0.2) # ensure connection established\n self.write_socket.send_multipart([\n 'create:{}'.format(self.actor_id).encode('ascii'),\n b'ACK'\n ])\n\n def receive_message(self):\n topic, data = self.messages_socket.recv_multipart()\n data = eval(data)\n return_data = {\n 'return': data.get('return'),\n 'return_env': data.get('return_env')\n }\n return data['name'], data['args'], return_data\n\n def receive_return_value(self):\n topic, result = self.return_socket.recv_multipart()\n return eval(result.decode('ascii'))\n\n def return_result(self, return_data, result):\n self.write_socket.send_multipart([\n 'return:{return}:{return_env}'.format(**return_data).encode('ascii'),\n str(result).encode('ascii')\n ])\n\n def send_message(self, actor_id, env_name, name, args, return_to: str = None):\n self.write_socket.send_multipart([\n 'message:{}:{}'.format(actor_id, env_name).encode('ascii'),\n str({\n 'name': name,\n 'args': args,\n 'return': return_to,\n 'return_env': global_conf.env_name\n }).encode('ascii')\n ])\n\n\ndef send_initial_message(actor_id, name, args):\n context = zmq.Context()\n\n write_socket = context.socket(zmq.PUB)\n write_socket.connect(f'tcp://127.0.0.1:{global_conf.env_write_port}')\n time.sleep(0.2)\n write_socket.send_multipart([\n 'main:{}'.format(actor_id).encode('ascii'),\n b'ACK'\n ])\n write_socket.send_multipart([\n 'message:{}:{}'.format(actor_id, global_conf.env_name).encode('ascii'),\n str({'name': name, 'args': args, 'return': None}).encode('ascii')\n ])\n\n\ndef setup_env_connection(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(('127.0.0.1', int(port)))\n s.send(b'init')\n resp = s.recv(1024).decode('ascii')\n\n env_name, read, write = resp.split(':')\n\n global_conf.env_name = env_name\n global_conf.env_read_port = int(read)\n global_conf.env_write_port = int(write)\n print('Connected to env', env_name)\n\n s.send(b'ACK')\n mains = s.recv(1024).decode('ascii')\n s.close()\n\n return eval(mains)\n","sub_path":"evaluation/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"339684728","text":"#! python3\n#simple old school rpg, improved from earlier versions\nimport shelve, os, random\n\nkilled = 0\nmonster = 'none'\ndamaged = 0\n\nprint('Enter you name:')\nname = str(input())\nprint(' ')\nprint('Hello and welcome to the game ' + name + '!')\nprint('This is a simple old school RPG where you input simple commands to play')\nprint('Type help at any point in time to get help')\nprint(' ')\nprint('Continue?')\ninput()\n\nwhile True: #loading system\n try: #loads savefile\n shelfFile = shelve.open(r'.\\RPG Game Data\\\\' + name + '\\\\' + name) #checks if savefolder exists\n playerhealth = shelfFile['playerhealth'] #checks if save exists\n playerinventory = shelfFile['playerinventory']\n playerstamina = shelfFile['playerstamina']\n playerweapons = shelfFile['playerweapons']\n print(' ')\n break #loading done\n except FileNotFoundError: #no save folder? no problem\n os.makedirs(r'.\\RPG Game Data\\\\' + name)\n except KeyError: #creates new character\n startplayerinventory = {'Gold': { 'Amount': 10}}\n startplayerweapons = {'Rusty Sword': {'Damage': 10, 'Stamina': 30}}\n startplayerhealth = 100\n startplayerstamina = 100\n shelfFile['playerhealth'] = startplayerhealth #saves the info so the loop can continue\n shelfFile['playerinventory'] = startplayerinventory\n shelfFile['playerweapons'] = startplayerweapons\n shelfFile['playerstamina'] = startplayerstamina\n\ndef showinventory(playerinv, playerwep): #show inventory function\n print('Your inventory:')\n for k, v in playerinv.items(): #picks item from playerinventory\n print(str(k) + ': ' + str(v.get('Amount')))\n print(' ')\n print('Your Weapons:')\n for k, v in playerwep.items():\n print(k + ': Damage: ' + str(v.get('Damage')) + ' Stamina: ' + str(v.get('Stamina')))\n\ndef playerattack(playerinv, response, stamina):\n try:\n rattack = random.randint(playerinv[response, response + 20])\n global damaged\n damaged = rattack\n except Exception:\n print('Invalid action, try again')\n\ndef encounters():\n global number\n number = random.randint(1, 4)\n if number == 1:\n print('You encountered a dragon!')\n elif number == 2:\n print('You encountered a troll!')\n elif number == 3:\n print('You encountered a wyvern!')\n elif number == 4:\n print('You encountered a bandit!')\n else:\n print('Error!')\n\ndef monsters():\n print('type stuff here in future')\n\nwhile True:\n encounters()\n action = str(input())\n print(' ')\n if action == 'inventory':\n showinventory(playerinventory, playerweapons)\n elif action in playerinventory.items() == True: \n playerattack(playerinventory, action, playerstamina) #write item, attack\n else:\n print('Invalid action, try again')\n print(' ')\n","sub_path":"advanced stuff/RPG GAME.py","file_name":"RPG GAME.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"582648951","text":"import os\nimport re\nimport collections\nfrom collections import Counter\n\nquantity_words_to_show = 10\n\ndef load_data(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, 'r',encoding = 'utf-8') as file_handler:\n data = file_handler.read()\n return (data)\n\ndef get_most_frequent_words():\n action = load_data(r'filepath')\n words = re.findall(r'\\w+', action)\n resulting_count = collections.Counter(words)\n word_counts = Counter(resulting_count)\n top_ten = word_counts.most_common(quantity_words_to_show)\n return (top_ten)\n\n\n\nif __name__ == '__main__':\n result = get_most_frequent_words()\n print (result)","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"478621262","text":"#==================================================================================\n# \tProvides a simple interface for reading and controlling the\n# GPIO pins on the Raspberry Pi model B, B+ and Raspberry Pi 2 from a Web Browser.\n# \n# \t\t\t\tAuthor: Matt Thomas\n# \t\t\t\t\t2015\n#==================================================================================\nimport sys\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BOARD)\n\nhelp = sys.argv\n\npin = [3, 5, 7, 8, 10, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29, 31, 32, 33, 35, 36, 37, 38, 40 ]\n\npinIn = help[2]\nmode = help[1]\n\nif int(mode) == 1:\n\tGPIO.setup(pin[int(pinIn)] , GPIO.OUT)\n\tGPIO.output(pin[int(pinIn)] , True)\n\t\n\nif int(mode) == 0:\n\tGPIO.setup(pin[int(pinIn)] , GPIO.OUT)\n\tGPIO.output(pin[int(pinIn)] , False)\n","sub_path":"pinwrite.py","file_name":"pinwrite.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"87959879","text":"#!/usr/bin/python\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\ndef hoLeeExample3(inds,t_max=1.0,tau_max=2.0,r0=0.05,sig=0.01,verbose=False):\n return hoLeeExample([[foo[0]]*3 for foo in inds],t_max=t_max,tau_max=tau_max,r0=r0,sig=sig,verbose=verbose)\n\ndef hoLeeExample2(inds,t_max=1.0,tau_max=2.0,r0=0.05,sig=0.01,verbose=False):\n return hoLeeExample([[foo[0],foo[1],foo[1]] for foo in inds],t_max=t_max,tau_max=tau_max,r0=r0,sig=sig,verbose=verbose)\n\ndef hoLeeExample(inds,t_max=1.0,tau_max=2.0,r0=0.05,sig=0.01,verbose=False):\n \n '''\n Compute the Ho Lee Example in Beck-Tempone-Szepessy-Zouraris\n '''\n \n thi = lambda tau: 0.1*(1-np.exp(-1*tau))\n f0 = lambda tau: r0-sig*sig*0.5*tau*tau+thi(tau)\n\n if verbose:\n print('Evaluating the Ho Lee example.')\n print('r0: %f, vol %f , t_max %f , tau_max %f'%(r0,sig,t_max,tau_max))\n print('Evaluating with the following indices:')\n for ind in inds:\n print(ind)\n\n # largest values of the discretisation numbers\n N_t = max([foo[0] for foo in inds])\n N_tau_1 = max([foo[1] for foo in inds])\n N_tau_2 = max([foo[2] for foo in inds])\n\n N_t = 2**(N_t)+1\n N_tau_1 = 2**(N_tau_1)+1\n N_tau_2 = 2**(N_tau_2)+1\n\n if verbose:\n print('Meshes constructed.')\n print('The number of mesh points in time: %d'%(N_t))\n print('Mesh points in maturity: %d before t_max, %d after'%(N_tau_2,N_tau_1))\n\n times = np.linspace(0,t_max,N_t)\n taus_1 = np.linspace(0,t_max,N_tau_2)\n taus_2 = np.linspace(t_max,tau_max,N_tau_1)\n\n taus = np.concatenate((taus_1[:-1],taus_2))\n\n # initial values\n\n dt = times[1]-times[0]\n Ws = np.concatenate((np.zeros(1),np.sqrt(dt)*np.cumsum(sp.randn(N_t-1))))\n if verbose:\n plt.figure()\n plt.plot(times,Ws)\n plt.xlabel('$t$')\n plt.ylabel('$W_t$')\n plt.grid(1)\n\n rv = []\n \n for ind in inds:\n if verbose:\n print('Evaluating the following index:')\n print(ind)\n t_jump = 2**(max([foo[0] for foo in inds])-ind[0])\n tau_jump_1 = 2**(max([foo[1] for foo in inds])-ind[1])\n tau_jump_2 = 2**(max([foo[2] for foo in inds])-ind[2])\n if verbose:\n print('Jumps in each of the categories: %d , %d , %d'%(t_jump,tau_jump_1,tau_jump_2))\n tau_eff = np.concatenate((taus_1[0:-1:tau_jump_2],taus_2[0::tau_jump_1]))\n t_eff = times[::t_jump]\n f_eff = np.zeros((len(t_eff),len(tau_eff)+2))\n W_eff = Ws[0::t_jump]\n dt_eff = t_eff[1]-t_eff[0]\n if verbose:\n plt.figure()\n plt.plot(tau_eff,f_eff[0,:-2]+f0(tau_eff),'r-')\n # Time stepping\n lstar = 0\n for j in range(1,len(f_eff)):\n if verbose:\n print('Time step No %d, t=%.4f. tau_n=%.4f'%(j,t_eff[j],tau_eff[lstar]))\n #print('Time step No %d , t=%f'%(j,t_eff[j]))\n f_eff[j,lstar:] = 1*f_eff[j-1,lstar:]\n f_eff[j,lstar:-2] += sig*sig*(tau_eff[lstar:]-t_eff[j-1])*dt_eff\n f_eff[j,lstar:-2] += sig*(W_eff[j]-W_eff[j-1])\n if verbose:\n plt.plot(tau_eff[lstar:],f_eff[j,lstar:-2]+f0(tau_eff[lstar:]),'b-')\n f_eff[j,-2] += f_eff[j-1,lstar]\n while tau_eff[lstar+1]<= t_eff[j]:\n lstar += 1\n f_eff[j,-2] = (f_eff[j-1,lstar]+f0(times[j-1]))*dt_eff\n # the last component unchanged\n if verbose:\n plt.plot(tau_eff[lstar:],f_eff[-1,lstar:-2]+f0(tau_eff[lstar:]),'r--')\n plt.plot(tau_eff[lstar:],r0-0.5*sig*sig*(tau_eff[lstar:]-t_max)**2+thi(tau_eff[lstar:]),'k-.')\n # plot the short rate\n lstar = 0\n tPlot = 1*t_eff\n fttPlot = 0*t_eff\n for j in range(0,len(f_eff)):\n fttPlot[j] = f_eff[j,lstar]\n while tau_eff[lstar+1]<= t_eff[j]:\n lstar += 1\n plt.plot(tPlot,fttPlot+f0(tPlot),'r-')\n plt.xlabel('$\\\\tau$')\n plt.ylabel('$f(t,\\\\tau)$')\n plt.grid(1)\n\n rv.append(1.0-f_eff[-1,-2])\n if verbose:\n print('The discount term equals %f'%(rv[-1]))\n tv = 0.0\n lstar = 0\n while tau_eff[lstar+1]<= t_max:\n lstar += 1\n if verbose:\n print('The underlying term equals %f'%(np.sum(f_eff[-1,lstar:-3])*(tau_eff[-1]-tau_eff[-2])))\n #print('dtau term %f'%((tau_eff[-1]-tau_eff[-2])))\n #print('average forward curve %f'%(np.mean(f_eff[-1,lstar:-3])))\n rv[-1] *= np.sum(f_eff[-1,lstar:-3])*(tau_eff[-1]-tau_eff[-2])\n if verbose:\n print('The quantity of interest is %f'%(rv[-1]))\n \n return rv\n \n","sub_path":"tests/HJM/HJM.py","file_name":"HJM.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"285136289","text":"#!/usr/bin/env python\n# coding: utf8\n#\n# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).\n#\n# This file is part of PANDORA\n#\n# https://github.com/CNES/Pandora_pandora\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThis module contains functions allowing to save the results and the configuration of Pandora pipeline.\n\"\"\"\n\nimport json\nimport errno\nimport os\nimport rasterio\nimport xarray as xr\nfrom typing import Dict\n\nfrom .output_tree_design import get_out_dir, get_out_file_path\n\n\ndef write_data_array(data_array: xr.DataArray, filename: str,\n dtype: rasterio.dtypes = rasterio.dtypes.float32) -> None:\n \"\"\"\n Write a xarray.DataArray in a tiff file\n\n :param data_array: data\n :type data_array: 2D xarray.DataArray (row, col) or 3D xarray.DataArray (row, col, indicator)\n :param filename: output filename\n :type filename: string\n :param dtype: band types\n :type dtype: GDALDataType\n \"\"\"\n if len(data_array.shape) == 2:\n row, col = data_array.shape\n with rasterio.open(filename, mode='w+', driver='GTiff', width=col, height=row, count=1,\n dtype=dtype) as source_ds:\n source_ds.write(data_array.data, 1)\n\n else:\n row, col, depth = data_array.shape\n with rasterio.open(filename, mode='w+', driver='GTiff', width=col, height=row, count=depth, dtype=dtype) as source_ds:\n for d in range(1, depth + 1):\n source_ds.write(data_array.data[:, :, d-1], d)\n\n\ndef mkdir_p(path: str) -> None:\n \"\"\"\n Create a directory without complaining if it already exists.\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc: # requires Python > 2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef save_results(ref: xr.Dataset, sec: xr.Dataset, output: str) -> None:\n \"\"\"\n Save results in the output directory\n\n :param ref: reference dataset, which contains the variables :\n - disparity_map : the disparity map in the geometry of the reference image 2D DataArray (row, col)\n - confidence_measure : the confidence measure in the geometry of the reference image 3D DataArray (row, col, indicator)\n - validity_mask : the validity mask in the geometry of the reference image 2D DataArray (row, col)\n :type ref: xr.Dataset\n :param sec: secondary dataset. If there is no validation step, the secondary Dataset will be empty.\n If a validation step is configured, the dataset will contain the variables :\n - disparity_map : the disparity map in the geometry of the secondary image 2D DataArray (row, col)\n - confidence_measure : the confidence in the geometry of the secondary image 3D DataArray (row, col, indicator)\n - validity_mask : the validity mask in the geometry of the reference image 2D DataArray (row, col)\n :type sec: xr.Dataset\n :param output: output directory\n :type output: string\n \"\"\"\n # Create the output dir\n mkdir_p(output)\n\n # Save the reference results\n write_data_array(ref['disparity_map'], os.path.join(output, get_out_file_path('ref_disparity.tif')))\n write_data_array(ref['confidence_measure'], os.path.join(output, get_out_file_path('ref_confidence_measure.tif')))\n write_data_array(ref['validity_mask'], os.path.join(output, get_out_file_path('ref_validity_mask.tif')),\n dtype=rasterio.dtypes.uint16)\n\n # If a validation step is configured, save the secondary results\n if len(sec.sizes) != 0:\n write_data_array(sec['disparity_map'], os.path.join(output, get_out_file_path('sec_disparity.tif')))\n write_data_array(sec['confidence_measure'], os.path.join(output, get_out_file_path('sec_confidence_measure.tif')))\n write_data_array(sec['validity_mask'], os.path.join(output, get_out_file_path('sec_validity_mask.tif')),\n dtype=rasterio.dtypes.uint16)\n\n\ndef save_config(output: str, user_cfg: Dict) -> None:\n \"\"\"\n Save the user configuration in json file\n\n :param output: Path to output directory\n :type output: string\n :param user_cfg: user configuration\n :type user_cfg: dict\n \"\"\"\n \n # Create the output dir\n mkdir_p(os.path.join(output, get_out_dir('config.json')))\n\n # Save user configuration in json file\n with open(os.path.join(output, get_out_file_path('config.json')), 'w') as f:\n json.dump(user_cfg, f, indent=2)\n","sub_path":"pandora/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"336620848","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on 2018.03.09\nFinished on 2018.04.13\n@author: Wang Yuntao\n\"\"\"\n\nimport re\nimport os\nimport time\nimport json\nimport utils\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\"\"\"\n function:\n __init__(self, _user_name=None, _password=None, _browser_type=\"Chrome\", \n is_headless=False) __init__\n sign_in(self) Facebook登录\n make_post(self) 发布状态\n page_refresh(self, _refresh_times=0) 页面下拉刷新\n get_myself_info(self) 获取当前登录账��的信息 user_name, user_id, homepage_url\n enter_homepage_self(self) 进入当前账户的个人主页 (方便对用户好友和照片的获取)\n get_user_id(self, _user_homepage_url) 获取用户id\n get_friends_number(self) 获取当前账户的好友个数\n get_friends_list(self, _friends_number=None) 获取当前账户的好友列表 (列表存储各好友的user_name, user_id, homepage_url)\n search_users(self, _keyword, user_number) 获取当前搜索条件下的用户列表 (列表存储各用户的user_name, homepage_url, location, user_id)\n \n get_photos_list(self) 获取照片的href,方便对原图的链接,发表时间等进行获取\n get_photo_info(self, _photo_href) 获取照片的链接,发布时间,发布位置,尺寸与对应的文字说明\n get_photos_info_list(self, _photos_href_list) 批量获取照片的链接,发布时间,发布位置,尺寸与对应的文字说明\n download_photos_one(self, _homepage_url) 下载单个用户的图片\n download_photos_batch(self, _homepage_url_list) 批量下载多个用户的图片\n params_modify(self, post_class_name, \n bottom_xpath_search, bottom_xpath_other, \n main_container_class_name, \n myself_id_class_name) 用于对可变参数进行修改\n \n Note:\n 实际使用中还需要根据Facebook当前的页面架构进行相应调整\n\"\"\"\n\n\nclass Facebook:\n def __init__(self, _email=None, _password=None, _browser_type=\"Chrome\", _is_headless=False, _speed_mode=\"Normal\"):\n \"\"\"\n 构造函数\n :param _email: Facebook登录所需邮箱\n :param _password: Facebook登录对应的密码\n :param _browser_type: 浏览器类型 (Chrome | Firefox)\n :param _is_headless: 是否适用无头浏览器\n :param _speed_mode: 运行速度模式选择 (Extreme | Fast | Normal | Slow)\n Return:\n browser_state:\n 0 - init fail\n 1 - init success\n \"\"\"\n # the variables which are fixed\n self.url = \"https://www.facebook.com/\" # facebook页面url\n self.email = _email # 帐户邮箱\n self.password = _password # 账户密码\n self.soup_type = \"html.parser\" # beautifulsoup解析类型\n\n # some identifier\n self.browser_state = None # 浏览器选择状态\n self.login_state = None # 登录状态\n\n # the variable about the current login account\n self.user_name = None # 当前登录账号的用户昵称\n self.user_id = None # 当前登录账号的用户ID\n self.homepage_url = None # 当前登录账号的主页url\n self.friends_number = 0 # 当前登录账号的好友数量\n\n # some parameters of webdriver\n self.cookie = None # 当前登录账号的cookie\n self.session_id = None # 会话id,方便在当前打开窗口继续运行\n self.executor_url = None # 会话的命令执行器连接\n self.cookies_path = \"json_(\" + _email + \").json\" # 用于保存用户cookies的文件\n\n # the initialization of list\n self.user_info_friends = list() # 好友信息列表 (user_name, user_id, homepage_url)\n self.user_info_search = list() # 通过搜索得到的用户信息列表 (user_name, homepage_url)\n\n # the variables which are static\n self.clearfix_flag = \"clearfix\" # 网页消除浮动标识\n self.user_cover_class_name = \"cover\" # 用户封面对应的class name\n self.bottom_class_name = \"uiHeaderTitle\" # 用于确定图片、视频下载时有无下拉到最底的class name\n self.bottom_xpath_search = \\\n \"//*[@id=\\\"browse_end_of_results_footer\\\"]/div/div\" # 用户搜索时对应的bottom标识\n self.bottom_xpath_other = \\\n \"//*[@id=\\\"timeline-medley\\\"]/div/div[2]/div[1]/div/div\" # 照片好友信息遍历时的bottom标识\n self.full_screen_id = \"fbPhotoSnowliftFullScreenSwitch\" # 全屏操作对应的id\n self.main_container_class_name = \"homeSideNav\" # 用户获取当前登录账户信息的class name\n self.myself_id_class_name = \"data-nav-item-id\" # 用户id对应的字段名\n self.friends_list_class_name = \"uiProfileBlockContent\"\n self.friends_number_id_name = \"pagelet_timeline_medley_friends\" # 用于获取好友数量的id name\n self.homepage_url_postfix_1 = \"?fref=pb&hc_location=friends_tab\" # 一类URL的后缀\n self.homepage_url_postfix_2 = \"&fref=pb&hc_location=friends_tab\" # 二类URL的后缀\n self.browse_results_container = \"//*[@id=\\\"BrowseResultsContainer\\\"]/div[1]\"\n\n # the variables which may be variant regularly\n self.post_class_name = \"_3jk\" # 状态发布所需class name\n\n # 用户搜索所需class name\n self.user_search_class_name = None\n self.user_name_class_name = None\n\n # the selection of browser\n if _browser_type == \"Chrome\":\n try:\n options = webdriver.ChromeOptions()\n if _is_headless is True:\n options.set_headless()\n options.add_argument(\"--disable - gpu\")\n else:\n self.driver = webdriver.Chrome(options=options)\n self.browser_state = 1\n except AttributeError:\n self.browser_state = 0\n\n if _browser_type == \"Firefox\":\n try:\n options = webdriver.FirefoxOptions()\n if _is_headless is True:\n options.set_headless()\n options.add_argument(\"--disable - gpu\")\n else:\n self.driver = webdriver.Firefox(options=options)\n self.browser_state = 1\n except AttributeError:\n self.browser_state = 0\n\n # the run speed mode selection\n self.timeout = utils.get_timeout(_speed_mode)\n\n def params_modify(self, cookies_path, post_class_name, bottom_xpath_search, bottom_xpath_other, main_container_class_name,\n myself_id_class_name):\n self.cookies_path = cookies_path\n self.post_class_name = post_class_name\n self.bottom_xpath_search = bottom_xpath_search\n self.bottom_xpath_other = bottom_xpath_other\n self.main_container_class_name = main_container_class_name\n self.myself_id_class_name = myself_id_class_name\n\n def login_with_account(self):\n \"\"\"\n facebook login with username and password\n :return: a status code —— True: Success, False: False\n Note:\n 如果facebook账号登录成功,则当前页面的url为:https://www.facebook.com\n 如果facebook账号登录失败,则当前页面的url为:https://www.facebook.com/login.php?login_attempt=1&lwv=100\n \"\"\"\n self.driver.get(self.url)\n try:\n # username\n email_element = WebDriverWait(self.driver, timeout=5).until(\n EC.presence_of_element_located((By.ID, \"email\")))\n email_element.clear()\n email_element.send_keys(self.user_name)\n time.sleep(1)\n\n # password\n password_element = WebDriverWait(self.driver, timeout=5).until(\n EC.presence_of_element_located((By.ID, \"pass\")))\n password_element.clear()\n password_element.send_keys(self.password)\n time.sleep(1)\n\n # click\n login_element = WebDriverWait(self.driver, timeout=5).until(\n EC.presence_of_element_located((By.ID, \"loginbutton\")))\n login_element.click()\n except:\n pass\n\n def login_with_cookies(self):\n \"\"\"\n facebook login with cookies\n :return: a status code —— True: Success, False: False\n Note:\n 如果facebook账号登录成功,则当前页面的url为:https://www.facebook.com\n 如果facebook账号登录失败,则当前页面的url为:https://www.facebook.com/login.php?login_attempt=1&lwv=100\n \"\"\"\n if os.path.exists(self.cookies_path):\n with open(self.cookies_path, 'r', encoding='utf-8') as file:\n list_cookies = json.loads(file.read())\n if len(list_cookies) != 0:\n self.driver.get(self.url)\n for cookie in list_cookies:\n try:\n self.driver.add_cookie({\n \"domain\": cookie[\"domain\"],\n \"name\": cookie[\"name\"],\n \"value\": cookie[\"value\"],\n \"path\": cookie[\"path\"],\n \"expiry\": cookie[\"expiry\"]\n })\n except KeyError:\n pass\n\n self.driver.get(self.url)\n\n def sign_in(self):\n \"\"\"\n facebook login via webdriver, cookies login first, if no cookies, login with account and save the cookies\n :return: a status code —— True: Success, False: False\n Note:\n 如果facebook账号登录成功,则当前页面的url为:https://www.facebook.com\n 如果facebook账号登录失败,则当前页面的url为:https://www.facebook.com/login.php?login_attempt=1&lwv=100\n \"\"\"\n try:\n self.login_with_cookies()\n except:\n self.login_with_account()\n\n # status judgement\n current_page_url = self.driver.current_url\n if current_page_url != self.url:\n self.login_state = 0\n else:\n self.login_state = 1\n self.save_cookie()\n\n def save_cookie(self):\n # 获取cookie并通过json模块将dict转化成str\n dict_cookies = self.driver.get_cookies()\n json_cookies = json.dumps(dict_cookies)\n # 登录完成后,将cookie保存到本地文件\n if os.path.exists(self.cookies_path):\n pass\n else:\n with open(self.cookies_path, \"w\") as file:\n file.write(json_cookies)\n\n def make_post(self):\n current_url = self.driver.current_url\n if current_url != self.url:\n self.enter_homepage_self()\n else:\n pass\n post_element = self.driver.find_element_by_class_name(self.post_class_name)\n post_element.click()\n\n def page_refresh_to_bottom(self, item, timeout=3, poll_frequency=0.5):\n \"\"\"\n 页面刷新\n :param item: 下拉页类型,分为用户搜索和照片搜索两类\n :param timeout: 模拟下拉的时间延迟\n :param poll_frequency: 模拟下拉的时间频率\n :return: NULL\n \"\"\"\n if item == \"users\":\n xpath = self.bottom_xpath_search\n else:\n xpath = self.bottom_xpath_other\n\n while True:\n try:\n WebDriverWait(self.driver, timeout=timeout, poll_frequency=poll_frequency).until(\n EC.presence_of_element_located((By.XPATH, xpath)))\n break\n except:\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n\n def page_refresh(self, _refresh_times=0):\n \"\"\"\n 页面刷新\n :param _refresh_times: 刷新次数\n :return: NULL\n \"\"\"\n for i in range(_refresh_times):\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n try:\n bottom_element = WebDriverWait(self.driver, timeout=3).until(\n EC.presence_of_element_located((By.XPATH, self.bottom_xpath_search)))\n except:\n try:\n bottom_element = WebDriverWait(self.driver, timeout=3).until(\n EC.presence_of_element_located((By.XPATH, self.bottom_xpath_other)))\n except:\n bottom_element = None\n\n if bottom_element is not None:\n break\n\n def get_myself_info(self):\n \"\"\"\n 获取当前登录账户的信息\n :return:\n user_name: 用户名\n user_id: 用户id\n homepage_url: 用户主页\n \"\"\"\n self.get(self.url)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n main_container = soup.find(class_=self.main_container_class_name)\n id_class = main_container.li\n user_id = id_class.get(self.myself_id_class_name)\n user_info_class = main_container.find_all(\"a\")\n user_name = user_info_class[1].get(\"title\")\n homepage_url = user_info_class[1].get(\"href\")\n homepage_url = homepage_url.split(\"?\")[0]\n\n self.user_name, self.user_id, self.homepage_url = user_name, user_id, homepage_url\n\n def enter_homepage_self(self):\n \"\"\"\n 进入个人主页,facebook登录后页面仍停留在https://www.facebook.com,需要进一步跳转到个人主页,获取到主页url,\n 方便对好友列表,照片的获取\n :return:\n \"\"\"\n if self.user_id is None:\n self.get_myself_info()\n\n self.get(self.homepage_url)\n\n def get_user_id(self, user_homepage_url):\n \"\"\"\n 根据用户的主页url获取其user id\n :param user_homepage_url: 用户的主页url\n :return: user id\n \"\"\"\n if utils.url_type_judge(user_homepage_url) == 1:\n self.driver.get(user_homepage_url)\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n cover = soup.find(class_=self.user_cover_class_name)\n user_id = cover.a.get(\"data-referrerid\")\n else:\n user_id = user_homepage_url.split(\"id=\")[-1]\n\n return user_id\n\n def get_friends_number(self):\n \"\"\"\n 获取当前登录账户的好友数量\n :return:\n self.friends_number: 当前登录账户的好友数量\n \"\"\"\n friends_page_url = utils.get_jump_url(self.homepage_url, \"friends\")\n self.get(friends_page_url)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n friends_table = self.driver.find_element_by_id(self.friends_number_id_name)\n friends_table_class_name = friends_table.get_attribute(\"class\")\n\n block = soup.find(class_=friends_table_class_name)\n content = block.find_all(\"div\")\n content_text = content[5].a.text\n pattern = re.compile(r\"\\d+\\.?\\d*\")\n\n self.friends_number = int(pattern.findall(content_text)[0])\n\n def get_friends_list(self, _friends_number=None):\n \"\"\"\n 获取当前登录账户的好友列表\n :param _friends_number: 待检索的好友数量\n :return:\n self.user_info_friends: 好友用户信息 [user_name, user_id, homepage_url]\n \"\"\"\n if len(self.user_info_friends) == 0:\n self.get_friends_number()\n if _friends_number is None or _friends_number > self.friends_number:\n self.page_refresh_to_bottom(\"friends\")\n else:\n refresh_times = _friends_number // 20\n self.page_refresh(refresh_times)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n # 获取好友url列表\n contents = soup.find_all(class_=self.friends_list_class_name)\n for content in contents:\n homepage_url = content.a.get(\"href\")\n if utils.url_type_judge(homepage_url) == 1:\n homepage_url = homepage_url.replace(self.homepage_url_postfix_1, \"\")\n if utils.url_type_judge(homepage_url) == 2:\n homepage_url = homepage_url.replace(self.homepage_url_postfix_2, \"\")\n user_name = content.a.text\n pattern = re.compile(r\"id=\\d+\")\n user_id = pattern.findall(content.a.get(\"data-hovercard\"))[0].split(\"id=\")[-1]\n\n self.user_info_friends.append([user_name, user_id, homepage_url])\n else:\n pass\n\n def get_user_info(self, item):\n data_be_str = item.div.get(\"data-bt\")\n user_id = utils.str2dict(data_be_str)[\"id\"]\n\n # 获取user homepage url\n user_info = item.find(class_=self.clearfix_flag)\n user_homepage_url = user_info.a.get(\"href\")\n\n user_name_block = user_info.div.find(class_=self.clearfix_flag).find_all(\"div\")\n # user_name_class_name = user_name_block[-1].a.get(\"class\")[0]\n user_name = user_name_block[-1].a.text\n\n about_items = user_info.find_all(\"div\")\n about_class = about_items[11].find_all(\"div\")\n\n try:\n about = about_class[5].text\n except:\n about = None\n\n return [user_name, user_id, user_homepage_url, about]\n\n def get_class_name_for_search(self):\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n element = self.driver.find_element_by_xpath(self.browse_results_container)\n user_search_class_name = element.get_attribute(\"class\")\n item = soup.find(class_=user_search_class_name)\n user_info = item.find(class_=self.clearfix_flag)\n user_name_block = user_info.div.find(class_=self.clearfix_flag).find_all(\"div\")\n user_name_class_name = user_name_block[-1].a.get(\"class\")[0]\n\n self.user_search_class_name = user_search_class_name\n self.user_name_class_name = user_name_class_name\n\n def search_users(self, _keyword=\"wahaha\", user_number=None):\n \"\"\"\n 根据关键字进行用户搜索\n :param _keyword: 待检索关键字\n :param user_number: 需要检索的用户数量\n :return:\n self.user_info_search: 用户信息列表 [user_name, user_id, location, homepage_url]\n \"\"\"\n user_info_search = list()\n search_url = \"https://www.facebook.com/search/str/\" + _keyword + \"/keywords_users\"\n self.get(search_url)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n empty_flag = soup.find(id=\"empty_result_error\")\n if empty_flag is None:\n # 页面刷新\n if user_number is None:\n self.page_refresh_to_bottom(\"users\")\n else:\n refresh_times = user_number // 5\n self.page_refresh(refresh_times)\n\n # 页面解析\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n if self.user_search_class_name is None:\n self.get_class_name_for_search()\n\n items = soup.find_all(class_=self.user_search_class_name)\n\n # 列表填充\n if user_number is None:\n for item in items:\n user_info_search.append(self.get_user_info(item))\n else:\n index = 0\n while index < user_number:\n user_info_search.append(self.get_user_info(items[index]))\n index += 1\n else:\n pass\n\n return user_info_search\n\n def get_photos_href_list(self, _homepage_url):\n \"\"\"\n 获取照片\n :param _homepage_url: 待访问的用户主页链接\n :return:\n photos_href_list: 图像链接列表\n \"\"\"\n photos_url = utils.get_jump_url(_homepage_url, \"photos\")\n self.get(photos_url)\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n try:\n bottom_element = self.driver.find_element_by_xpath(self.bottom_xpath_other)\n except:\n bottom_element = None\n\n photos_href_list = list()\n while bottom_element is None:\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n try:\n bottom_element = self.driver.find_element_by_xpath(self.bottom_xpath_other)\n except:\n bottom_element = None\n\n if bottom_element is not None:\n break\n\n for data in soup.find_all(class_=\"uiMediaThumb\"):\n photos_href_list.append(data.get(\"href\"))\n\n return photos_href_list\n\n def get_photo_info(self, _photo_href):\n \"\"\"\n 根据图像的链接对其信息进行获取\n :param _photo_href: 图像链接\n :return:\n link: 原始图像对应的链接\n date: 图像发布对应的时间\n location: 图像发布对应的位置\n text: 图像发布对应的文本内容\n width: 图像的实际宽度\n height: 图像的实际高度\n \"\"\"\n self.get(_photo_href)\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n\n date = self.get_photo_publish_date(soup)\n location = self.get_photo_publish_location(soup)\n text = self.get_photo_publish_text(soup)\n\n full_screen_element = WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.ID, self.full_screen_id)))\n full_screen_element.click()\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n\n link = self.get_photo_link(soup)\n width, height = self.get_photo_size(soup)\n\n return link, date, location, text, width, height\n\n def get_photos_info_list(self, _photos_href_list):\n photos_info_list = list()\n for photo_href in _photos_href_list:\n link, date, location, text, width, height = self.get_photo_info(photo_href)\n photos_info_list.append([link, date, location, text, width, height])\n\n return photos_info_list\n\n def download_photos_one(self, homepage_url, folder_name=\"./\",\n start_date=None, end_date=None, keyword=\"\",\n width_left=0, width_right=5000, height_left=0, height_right=5000):\n \"\"\"\n 单个用户的照片下载\n :param homepage_url: 用户主页\n :param folder_name: 待保存文件夹路径\n\n 以下为筛选条件\n :param start_date: 待下载图片的起始日期 (default: None)\n :param end_date: 待下载图片的终止日期 (default: None)\n :param keyword: 待下载图片对应的文字中包含的关键字 (default: \"\")\n :param width_left: 图片宽度下界 (default: 0)\n :param width_right: 图片宽度上界 (default: 5000)\n :param height_left: 图片高度下界 (default: 0)\n :param height_right: 图片高度上界 (default: 5000)\n :return: NULL\n Note:\n photo info:\n link, date, location, text, width, height\n \"\"\"\n utils.folder_make(folder_name)\n photos_href_list = self.get_photos_href_list(homepage_url)\n photos_info_list = self.get_photos_info_list(photos_href_list)\n\n if start_date is None and end_date is None:\n for photo_info in photos_info_list:\n utils.download_photos(photo_info[0], folder_name)\n else:\n start_date_unix = utils.get_unix_stamp(start_date)\n end_date_unix = utils.get_unix_stamp(end_date)\n for photo_info in photos_info_list:\n unix_time = photo_info[1]\n if start_date_unix < unix_time < end_date_unix \\\n and keyword in photo_info[3]:\n if width_left < photo_info[4] < width_right and height_left < photo_info[5] < height_right:\n utils.download_photos(photo_info[0], folder_name)\n else:\n pass\n else:\n pass\n\n def download_photos_batch(self, user_info_list,\n start_date=None, end_date=None, keyword=\"\",\n width_left=0, width_right=5000, height_left=0, height_right=5000):\n \"\"\"\n 多个用户照片下载\n :param user_info_list: 用户信息列表\n user_name, user_id, user_homepage_url, about\n 以下为筛选条件\n :param start_date: 待下载图片的起始日期 (default: None)\n :param end_date: 待下载图片的终止日期 (default: None)\n :param keyword: 待下载图片对应的文字中包含的关键字 (default: \"\")\n :param width_left: 图片宽度下界 (default: 0)\n :param width_right: 图片宽度上界 (default: 5000)\n :param height_left: 图片高度下界 (default: 0)\n :param height_right: 图片高度上界 (default: 5000)\n :return: NULL\n \"\"\"\n for user_info in user_info_list:\n folder_name = user_info[1]\n homepage_url = user_info[2]\n self.download_photos_one(homepage_url, folder_name=folder_name,\n start_date=start_date, end_date=end_date, keyword=keyword,\n width_left=width_left, width_right=width_right,\n height_left=height_left, height_right=height_right)\n print(\"Download completed.\")\n\n def get(self, url):\n \"\"\"\n 页面跳转,为避免多余跳转,先对当前页面的url进行判断,若url相同则不再跳转\n :param url: 待跳转的url\n :return: NULL\n \"\"\"\n current_url = self.driver.current_url\n if url == current_url:\n pass\n else:\n self.driver.get(url)\n\n @staticmethod\n def get_photo_link(soup):\n spotlight = soup.find(class_=\"spotlight\")\n _link = spotlight.get(\"src\") # 图片链接\n\n return _link\n\n @staticmethod\n def get_photo_size(soup):\n spotlight = soup.find(class_=\"spotlight\")\n style = spotlight.get(\"style\") # 图片尺寸字符串\n _width, _height = utils.get_size(style) # 获取图像的宽和高\n\n return _width, _height\n\n @staticmethod\n def get_photo_publish_date(soup):\n publish_time = soup.find(\"span\", {\"id\": \"fbPhotoSnowliftTimestamp\"})\n if publish_time is None:\n _date = None\n else:\n _date = publish_time.a.abbr.get(\"data-utime\") # 图片发表的时间 (Unix时间戳)\n\n return _date\n\n @staticmethod\n def get_photo_publish_location(soup):\n location_object = soup.find(class_=\"fbPhotosImplicitLocLink\") # 图片发表的位置信息\n if location_object is None:\n _location = None\n else:\n _location = location_object.text\n\n return _location\n\n @staticmethod\n def get_photo_publish_text(soup):\n text_object = soup.find(\"span\", {\"class\": \"hasCaption\"}) # 图片发表时对应的文字说明\n if text_object is None:\n _text = []\n else:\n _text = text_object.text\n\n return _text\n\n\nif __name__ == \"__main__\":\n email, password = utils.get_account(\"account.csv\", 0)\n fb = Facebook(email, password, \"Chrome\", False)\n if fb.browser_state == 1:\n fb.sign_in()\n fb.enter_homepage_self()\n fb.make_post()\n cookies = fb.cookies\n\n else:\n print(\"Initialization failed.\")\n","sub_path":"facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":29527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"433427911","text":"\"\"\"\nAdapted from: https://github.com/udacity/deep-reinforcement-learning/blob/master/dqn/solution/model.py\n\nThe code was modified to add one more hidden layer as suggested by the paper: \nBudget Constrained Bidding by Model-free Reinforcement Learning in Display Advertising\n(https://arxiv.org/pdf/1802.08365.pdf)\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport random\n\n\nclass Network(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, fc1_units=100, \n fc2_units=100, fc3_units=100):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(Network, self).__init__()\n set_seed()\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, fc3_units)\n self.fc4 = nn.Linear(fc3_units, action_size)\n\n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)\n\ndef set_seed():\n os.environ['PYTHONHASHSEED'] = str(0)\n random.seed(0)\n np.random.seed(0)\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n","sub_path":"src/rtb_agent/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"301988961","text":"# -*- coding: utf-8 -*-\nimport pywinauto\nfrom abc import ABCMeta, abstractmethod\n\nclass VoiroAuto_Meta(metaclass=ABCMeta):\n @abstractmethod\n def talk(self, text):\n raise NotImplemented()\n\nclass Voiro_Pywinauto(VoiroAuto_Meta):\n def search_child_byclassname(self, class_name, uiaElementInfo, target_all = False):\n target = []\n # 全ての子要素検索\n for childElement in uiaElementInfo.children():\n # ClassNameの一致確認\n if childElement.class_name == class_name:\n if target_all == False:\n return childElement\n else:\n target.append(childElement)\n if target_all == False:\n # 無かったらFalse\n return False\n else:\n return target\n\n\n def search_child_byname(self, name, uiaElementInfo):\n # 全ての子要素検索\n for childElement in uiaElementInfo.children():\n # Nameの一致確認\n if childElement.name == name:\n return childElement\n # 無かったらFalse\n return False\n\n def talk(self, speakPhrase):\n # デスクトップのエレメント\n parentUIAElement = pywinauto.uia_element_info.UIAElementInfo()\n # voiceroidを捜索する\n voiceroid2 = self.search_child_byname(\"VOICEROID2\",parentUIAElement)\n # *がついている場合\n if voiceroid2 == False:\n voiceroid2 = self.search_child_byname(\"VOICEROID2*\",parentUIAElement)\n\n # テキスト要素のElementInfoを取得\n TextEditViewEle = self.search_child_byclassname(\"TextEditView\",voiceroid2)\n textBoxEle = self.search_child_byclassname(\"TextBox\",TextEditViewEle)\n\n # コントロール取得\n textBoxEditControl = pywinauto.controls.uia_controls.EditWrapper(textBoxEle)\n\n # テキスト登録\n textBoxEditControl.set_edit_text(speakPhrase)\n\n\n # ボタン取得\n buttonsEle = self.search_child_byclassname(\"Button\",TextEditViewEle,target_all = True)\n # 再生ボタンを探す\n playButtonEle = \"\"\n for buttonEle in buttonsEle:\n # テキストブロックを捜索\n textBlockEle = self.search_child_byclassname(\"TextBlock\",buttonEle)\n if textBlockEle.name == \"再生\":\n playButtonEle = buttonEle\n break\n\n # ボタンコントロール取得\n playButtonControl = pywinauto.controls.uia_controls.ButtonWrapper(playButtonEle)\n\n # 再生ボタン押下\n playButtonControl.click()\n\n\n\n #----- In development functions ------------------\n def look_children(self, obj):\n for child in obj.children():\n print(child.class_name)\n #print(child.handle(),end=\" \")\n #print(child.automation_id())\n\n\n def get_voiro2(self):\n # デスクトップのエレメント\n parentUIAElement = pywinauto.uia_element_info.UIAElementInfo()\n # voiceroidを捜索する\n voiceroid2 = self.search_child_byname(\"VOICEROID2\",parentUIAElement)\n # *がついている場合\n if voiceroid2 == False:\n voiceroid2 = self.search_child_byname(\"VOICEROID2*\",parentUIAElement)\n return voiceroid2\n\n def get_tuning_tab(self, tabName, voiro2=None):\n if voiro2==None:\n voiro2 = self.get_voiro2()\n tuning_tab = self.search_child_byclassname(\"TabControl\", voiro2, target_all=True)[1]\n target = self.search_child_byname(tabName, tuning_tab)\n return target\n\nif __name__==\"__main__\":\n pass\n","sub_path":"voiro_auto.py","file_name":"voiro_auto.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"217473609","text":"# -*- coding:utf-8 -*-\r\n\r\n__author__ = 'ken'\r\n\r\nimport time\r\n\r\ncurrent_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # 获取当前的时间戳\r\n\r\n\"\"\"定义测试需要用到的数据\"\"\"\r\nparm = {\r\n 'platformName': 'iOS',\r\n 'paltformVersion': '11.2.2',\r\n 'deviceName': 'iPhone 8',\r\n 'UDID': '69db97834cc4c58922171fe542285219eae4f3a9',\r\n 'packageName': 'com.oceanwing.BatteryCam', # 定义apk包名\r\n 'noReset': true,\r\n 'appActivity': 'com.oceanwing.battery.cam.main.SplashActivity', # 定义此启动的activity\r\n 'apk_path': 'D:\\python_project\\Eufy\\data\\eufy_v1.0.5.apk', # 定义apk存放路径\r\n 'screenshot_path': \"D:\\\\python_project\\\\Eufy\\\\screenshot\\\\\", # 定义截图路径\r\n # 'login_user': '894772205@qq.com', # 定义登录时用到的账号\r\n # 'login_pwd': '123456789', # 定义登录时用到的密码\r\n 'login_user': 'yuanhui.li@oceanwing.com',\r\n 'login_pwd': '123456789',\r\n 'sign_email': 'ken@{}.com'.format(current_time), # 定义注册时用的邮箱\r\n 'sign_pwd': '123456789', # 定义注册时用的密码\r\n 'grant_email': '894772205@qq.com', # 定义邀请用户邮箱\r\n 'grant_pwd': '123456789', # 定义邀请用户登录密码\r\n 'report_path': r'D:\\python_project\\Eufy\\report', # 定义个报告存放的路径,支持相对路径\r\n}\r\n\r\n\"\"\"\r\n# htc M8w\r\n'paltformVersion': '4.4.2',\r\n'deviceName': 'HC45XWM00171',\r\n\"\"\"\r\n\r\n\"\"\"\r\n# ZTE AXON 7 mini\r\n'paltformVersion': '6.0.1',\r\n'deviceName': '968d6b8',\r\n\"\"\"\r\n\r\n\"\"\"\r\n# 三星S7\r\n'paltformVersion': '7.0',\r\n'deviceName': 'bce6916b',\r\n\"\"\"\r\n","sub_path":"data/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"226019645","text":"import cv2\nimport numpy as np\nimport random\nimport math\nimport imgFunctions as img\nimport time\nimport map as mp\nimport collision as col\nimport driver as driver\nimport bot as bt\nimport concurrent.futures\n\ncounter = 1\n\n# lists and dicts required to construct bot objects and graph\nbots = list()\ngraph = {}\nexcp_Set = set()\n\nshortestPathtoNodes = {} # dict to hold shortest path to each node from each node\nPathCost = {} # dict to hold path cost from node to node\n\n#Path_list = []\n\n# parameters required for the GUI\nBOT_COUNT = 5\nRADI = 50\nGRID_SIZE = 24\ndS = 0.1\nWINDOW_SIZE = 700 # square window, height = width\nCELL_SIZE = WINDOW_SIZE/GRID_SIZE \n\nbackg_H = 0\nbackg_W = 0\nbot_H = 0\nbot_W = 0\n\n# boolean flag to signify destination is reached\ndest_reached = False\n\npaused = False\nset_dest = False\n\n\"\"\" draw bot images in the overlay canvas\n return : overlay(4 dims with the alpha layer)\"\"\"\n\n# function to draw bots on the canvas\ndef draw_bots(bots):\n # create a overlay layer to draw all the robots with the alpha\n overlay = np.zeros((backg_H, backg_W, 4), dtype=\"uint8\")\n for bot in bots:\n x = bot.curr_x*CELL_SIZE\n y = bot.curr_y*CELL_SIZE\n\n x = (0 if (x < 0) else ((backg_W - CELL_SIZE)\n if x > (backg_W - CELL_SIZE) else x))\n y = (0 if (y < 0) else ((backg_H - CELL_SIZE)\n if y > (backg_H - CELL_SIZE) else y))\n\n #print(bot.curr_x, bot.curr_y, x, y)\n angle = bot.angle\n x_start = int(x)\n y_start = int(y)\n\n # set the state of the bot acording to the neighbour bots distatnce\n bot.getState(bots)\n\n # add the additional status color bar to the basic bot png\n if (bot.state == 0) :\n addon = bot.bot_imgs['blue']\n else:\n addon = bot.bot_imgs['red']\n\n # ---------------------Draw destination lines and rectangles -----------------------\n\n #color = (125, 0, 100)\n\n if bot.dest_x != -1:\n cv2.line(overlay, (int(bot.x), int(bot.y)),\n (bot.dest_x, bot.dest_y), (0, 200, 200, 255), 2)\n\n\n cv2.rectangle(overlay, (bot.dest_x-int(CELL_SIZE), bot.dest_y-int(CELL_SIZE)),\n (bot.dest_x+int(CELL_SIZE), bot.dest_y+int(CELL_SIZE)), color, 2)\n\n bot_img = cv2.add(bot.bot_imgs['bot'], addon)\n bot_img = img.rotate_image(bot_img, angle)\n roi = overlay[y_start:y_start+bot_W, x_start:x_start+bot_W] # region of interest\n overlay[y_start:y_start+bot_W, x_start:x_start+bot_W] = roi + bot_img\n\n return overlay\n\n\nif __name__ == \"__main__\":\n\n \n # load backgroug image according to the grid size\n backg_H, backg_W, background = img.loadBackground(GRID_SIZE, WINDOW_SIZE)\n bot_H, bot_W, bot_pngs = img.loadBotImgs(\n GRID_SIZE, WINDOW_SIZE) # load all pngs of the bot to a dict\n bot_png = bot_pngs['bot'] # get the bot image\n\n #print(backg_H, backg_W)\n #print(bot_H, bot_W)\n\n cv2.namedWindow(\"image\")\n# cv2.setMouseCallback(\"image\", mosueEvent)\n\n # hard coded order schedulee\n inputSchedule = {'0':[(1,6)], '1':[(22,6)], '2':[(11,15)], '3':[(12,6)], '4':[(22,6), (11,15)]\n \n }\n\n\n ''' initializes the graph and the grid '''\n \n # adding shelves\n mp.add_shelves(excp_Set,(3,3), (9,9)) # Rack A\n mp.add_shelves(excp_Set,(3,12), (10,18)) # Rack C\n mp.add_shelves(excp_Set,(13,3), (21,9)) # Rack B\n mp.add_shelves(excp_Set,(13,12), (21,18)) # Rack D\n\n # mp.add_shelves(excp_Set,(8,0), (21,1)) # run-off area near delivery station\n\n # mp.add_shelves(excp_Set,(2,3), (3,10)) # run-off area near rack A\n # mp.add_shelves(excp_Set,(2,12), (3,19)) # run-off area near rack C\n # mp.add_shelves(excp_Set,(13,3), (14,10)) # run-off area near rack B\n # mp.add_shelves(excp_Set,(13,12), (14,19)) # run-off area near rack D\n\n # mp.add_nodes(excp_Set, (2,6))\n # mp.add_nodes(excp_Set, (2,7))\n # mp.add_nodes(excp_Set, (2,15))\n # mp.add_nodes(excp_Set, (2,16))\n # mp.add_nodes(excp_Set, (13,6))\n # mp.add_nodes(excp_Set, (13,7))\n # mp.add_nodes(excp_Set, (13,15))\n # mp.add_nodes(excp_Set, (13,16))\n\n \n\n # creates the graph\n mp.generate_graph(graph, excp_Set, 31)\n \n \n ''' Left side roads along columns '''\n # for (1,3) --> (1,9)\n for i in range(3,10):\n # path towards increasing Y \n # remove backward link from (1,10) to (1,9)\n del graph[(1,i)][(1,i-1)]\n del graph[(1,i)][(0,i)]\n del graph[(1,i)][(2,i)]\n\n # for (0,2) --> (0,9)\n for i in range(2,10):\n # path towards decreasing Y\n # remove forward link from (0,1) to (0,2) \n del graph[(0,i)][(0,i+1)]\n del graph[(0,i)][(1,i)]\n\n\n # for (1,12) --> (1,18)\n for i in range(12,19):\n # path towards increasing Y\n # remove backward link from (1,19) to (1,18)\n del graph[(1,i)][(1,i-1)]\n del graph[(1,i)][(0,i)]\n del graph[(1,i)][(2,i)]\n\n # for (0,12) --> (0,19)\n for i in range(12,20):\n # path towards decreasing Y\n # remove forward link from (0,11) to (0,12)\n del graph[(0,i)][(0,i+1)]\n del graph[(0,i)][(1,i)]\n \n ''' Bottom side roads along rows '''\n # for (1,20) --> (10,20)\n for i in range(1,11):\n # path towards decreasing X\n # remove forward link from (0,20) to (1,20) \n del graph[(i,20)][(i+1,20)]\n del graph[(i,20)][(i,19)]\n del graph[(i,20)][(i,21)]\n \n # for (13,20) --> (22,20)\n for i in range(13,23):\n # path towards decreasing X\n # remove forward link from (12,20) to (13,20)\n del graph[(i,20)][(i+1,20)]\n del graph[(i,20)][(i,19)]\n del graph[(i,20)][(i,21)]\n\n # for (2,19) --> (10,19)\n for i in range(2,11):\n # path towards increasing X\n # remove backward link from (11,19) to (10,19) \n del graph[(i,19)][(i-1,19)]\n del graph[(i,19)][(i,20)]\n #del graph[(1,20)][(1,21)]\n \n # for (13,19) --> (22,19)\n for i in range(13,23):\n # path towards increasing X\n # remove backward link (23,19) to (22,19)\n del graph[(i,19)][(i-1,19)]\n del graph[(i,19)][(i,20)]\n #del graph[(1,20)][(1,21)]\n \n ''' Right side roads along columns '''\n\n # for (22,3) --> (22,9)\n for i in range(3,10):\n # path towards decreasing Y\n # remove forward link from (22,2) to (22,3)\n del graph[(22,i)][(22,i+1)]\n del graph[(22,i)][(23,i)]\n\n # for (22,12) --> (22,18)\n for i in range(12,19):\n # path towards decreasing Y\n # remove forward link from (22,11) to (22,12)\n del graph[(22,i)][(22,i+1)]\n del graph[(22,i)][(23,i)]\n\n\n # for (23,2) --> (23,9)\n for i in range(2,10):\n # path towards increasing Y\n # remove backward link from (23,10) to (23,9)\n del graph[(23,i)][(23,i-1)]\n del graph[(23,i)][(22,i)]\n del graph[(23,i)][(24,i)]\n \n \n\n # for (23,12) --> (23,19)\n for i in range(12,20):\n # path towards increasing Y\n # remove backward link from (23,20) to (23,19) \n del graph[(23,i)][(23,i-1)]\n del graph[(23,i)][(22,i)]\n del graph[(23,i)][(24,i)]\n\n ''' Top side roads along row '''\n\n # for (4,1) --> (10,1)\n for i in range(4,11):\n # path towards increasing X\n # remove backward link from (11,1) to (10,1)\n del graph[(i,1)][(i-1,1)]\n del graph[(i,1)][(i,2)]\n if i>=2:\n del graph[(i,1)][(i,0)]\n \n # for (13,1) --> (22,1)\n for i in range(13,23):\n # path towards increasing X\n # remove backward link from (23,1) to (22,1) \n del graph[(i,1)][(i-1,1)]\n del graph[(i,1)][(i,0)]\n del graph[(i,1)][(i,2)]\n\n # for (4,2) --> (10,2)\n for i in range(4,11):\n # path towards decreasing X\n # remove forward link from (3,2) to (4,2)\n del graph[(i,2)][(i+1,2)]\n del graph[(i,2)][(i,1)]\n del graph[(i,2)][(i,3)]\n \n # for (13,2) --> (21,2)\n for i in range(13,22):\n # path towards decreasing X\n # remove forward link from (12,2) to (13,2)\n del graph[(i,2)][(i+1,2)]\n del graph[(i,2)][(i,1)]\n del graph[(i,2)][(i,3)]\n\n ''' Center channel roads vertical '''\n\n # for (11,3) --> (11,9)\n for i in range(3,10):\n # path towards increasing Y\n # remove backward link from (11,10) to (11,9)\n del graph[(11,i)][(11,i-1)]\n del graph[(11,i)][(12,i)]\n del graph[(11,i)][(10,i)]\n\n # for (11,12) --> (11,19)\n for i in range(12,20):\n # path towards increasing Y\n del graph[(11,i)][(11,i-1)]\n del graph[(11,i)][(12,i)]\n del graph[(11,i)][(10,i)]\n\n # for (12,3) --> (12,9)\n for i in range(3,10):\n # path towards decreasing Y\n # remove forward link from (12,2) to (12,3)\n del graph[(12,i)][(12,i+1)]\n del graph[(12,i)][(11,i)]\n del graph[(12,i)][(13,i)]\n\n # for (12,12) --> (12,18)\n for i in range(12,19):\n # path towards decreasing Y\n # remove forward link from (12,11) to (12,12)\n del graph[(12,i)][(12,i+1)]\n del graph[(12,i)][(11,i)]\n del graph[(12,i)][(13,i)]\n \n ''' center channel roads horizontal '''\n # for (2,10) to (10,10)\n for i in range(2,11):\n # path towards decreasing Y\n # remove forward link from (1,10) to (2,10)\n del graph[(i,10)][(i+1,10)]\n del graph[(i,10)][(i,11)]\n del graph[(i,10)][(i,9)]\n\n # for (13,10) to (21,10)\n for i in range(13,22):\n # path towards decreasing Y\n # remove forward link from (12,10) to (13,10)\n del graph[(i,10)][(i+1,10)]\n del graph[(i,10)][(i,11)]\n del graph[(i,10)][(i,9)]\n\n # for (2,11) to (10,11)\n for i in range(2,11):\n # path towards increasing Y\n # remove forward link from (11,11) to (10,11)\n del graph[(i,11)][(i-1,11)]\n del graph[(i,11)][(i,12)]\n del graph[(i,11)][(i,10)]\n\n # for (13,11) to (21,11)\n for i in range(13,22):\n # path towards increasing Y\n # remove forward link from (22,11) to (21,11)\n del graph[(i,11)][(i-1,11)]\n del graph[(i,11)][(i,12)]\n del graph[(i,11)][(i,10)]\n\n\n\n ''' Junction points '''\n del graph[(1,2)][(0,2)]\n\n del graph[(1,19)][(0,19)]\n\n del graph[(22,2)][(23,2)]\n\n del graph[(22,19)][(23,19)]\n\n ''' removing additional points '''\n del graph[(1,10)][(1,9)]\n del graph[(0,1)][(0,2)]\n del graph[(1,19)][(1,18)]\n del graph[(0,11)][(0,12)]\n del graph[(0,20)][(1,20)]\n del graph[(12,20)][(13,20)]\n #del graph[(11,19)][(10,19)]\n #del graph[(23,19)][(22,19)]\n del graph[(22,2)][(22,3)]\n del graph[(22,11)][(22,12)]\n del graph[(23,10)][(23,9)]\n del graph[(23,20)][(23,19)]\n del graph[(11,1)][(10,1)]\n del graph[(23,1)][(22,1)]\n del graph[(3,2)][(4,2)]\n del graph[(12,2)][(13,2)]\n del graph[(11,10)][(11,9)]\n #del graph[(11,19)][(11,18)]\n del graph[(12,2)][(12,3)]\n del graph[(12,11)][(12,12)]\n del graph[(1,10)][(2,10)]\n del graph[(12,10)][(13,10)]\n del graph[(11,11)][(10,11)]\n del graph[(22,11)][(21,11)]\n\n # del graph[(2,0)][(2,1)]\n # del graph[(2,1)][(2,0)]\n\n # del graph[(3,0)][(3,1)]\n # del graph[(3,1)][(3,0)]\n\n # del graph[(4,0)][(4,1)]\n # del graph[(4,1)][(4,0)]\n\n # del graph[(5,0)][(5,1)]\n # del graph[(5,1)][(5,0)]\n\n # del graph[(4,0)][(4,1)]\n # del graph[(4,1)][(4,0)]\n\n # destination array\n #racks = [[(0,0),(4,10)], [(1,0),(13,19)], [(2,0), (20,20)]]\n #Path_list = [[]for _ in range(BOT_COUNT)]\n \n while True:\n\n \n # creates bots images at the initial run\n if len(bots) == 0:\n for i in range(BOT_COUNT):\n imgs = bot_pngs.copy()\n bot = bt.Bot(i)\n \n #bot.setPos(i, 0)\n # each bot is positioned initially in a place where (x,y) coordinates are even\n # the mapping function used is as follows;\n # (x,y) ---> (2x,2y)\n bot.init_x = i\n bot.init_y = 0\n bot.curr_x = bot.init_x\n bot.curr_y = bot.init_y\n bot.angle = 180\n bot.waitFlag = False\n bot.setImgs(imgs)\n bots.append(bot)\n \n else: \n #col.collision(bots)\n #update(bots)\n\n for bot in bots:\n \n # check if the OrderComplete flag is up and inputSchedule dict is not empty\n if bot.OrderComplete and bool(inputSchedule) :\n # calculates the path\n driver.setPath(graph, shortestPathtoNodes, PathCost, bot, inputSchedule)\n\n # calls start function activate bots\n driver.start(bots) \n\n \n ''' ------------Draw bots ------------------------------ '''\n # get a overlay that contains the vector with aplha which has the current orientation of bots\n overlay = draw_bots(bots)\n # mask the background with the overlay\n masked_backg = cv2.bitwise_and(background, background, mask=cv2.bitwise_not(overlay[:, :, 3]))\n # add the overlay and the background\n finalImg = cv2.add(overlay[:, :, :3], masked_backg)\n\n # ------------Draw rect on selected cell --------------\n # x_cell, y_cell, CELL_SIZE = getCell(mouse_pos[0], mouse_pos[1])\n \n # color = (125, 0, 100) if mouse_state == cv2.EVENT_LBUTTONDOWN else (125, 255, 0)\n # cv2.rectangle(finalImg, (x_cell, y_cell),\n # (x_cell+CELL_SIZE, y_cell+CELL_SIZE), color, 2)\n\n #input(\"Press Enter to continue....\") \n cv2.imshow('image', finalImg)\n\n key = cv2.waitKey(5)\n\n if key == 27:\n break\n elif key == 32:\n paused = not paused","sub_path":"Software/Simulator/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":14184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"455266379","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n########################################################################################################################\n# Copyright © 2019-2020 Pi-Yueh Chuang, Lorena A. Barba, and G2 Integrated Solutions, LLC.\n# All Rights Reserved.\n#\n# Contributors: Pi-Yueh Chuang \n# J. Tracy Thorleifson \n#\n# Licensed under the BSD-3-Clause License (the \"License\").\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: https://opensource.org/licenses/BSD-3-Clause\n#\n# BSD-3-Clause License:\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided\n# that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the\n# following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or\n# promote products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n########################################################################################################################\n\n\"\"\"\nWrite to NetCDF4 file with CF convention\n\"\"\"\n\nimport os\nimport sys\nimport numpy\nimport netCDF4\nimport datetime\nimport argparse\n\n\ndef get_state_interpolator(state, field=0):\n \"\"\"\n Get a Scipy interpolation object for a field on a AMR grid.\n \"\"\"\n import scipy.interpolate\n\n # the underlying patch in this state object\n p = state.patch\n\n # x, y arrays and also dx, dy for checking\n x, dx = numpy.linspace(p.lower_global[0]+p.delta[0]/2.,\n p.upper_global[0]-p.delta[0]/2.,\n p.num_cells_global[0], retstep=True)\n y, dy = numpy.linspace(p.lower_global[1]+p.delta[1]/2.,\n p.upper_global[1]-p.delta[1]/2.,\n p.num_cells_global[1], retstep=True)\n assert numpy.abs(dx-p.delta[0]) < 1e-6, \"{} {}\".format(dx, p.delta[0])\n assert numpy.abs(dy-p.delta[1]) < 1e-6, \"{} {}\".format(dy, p.delta[1])\n\n # get the interpolation object\n kx = ky = 3\n\n if x.size <= 3:\n kx = x.size - 1\n\n if y.size <= 3:\n ky = y.size - 1\n\n interp = scipy.interpolate.RectBivariateSpline(\n x, y, state.q[field, :, :],\n [p.lower_global[0], p.upper_global[0], p.lower_global[1], p.upper_global[1]],\n kx=kx, ky=ky)\n\n return interp\n\ndef interpolate(solution, x_target, y_target,\n field=0, shift=[0., 0.], level=1,\n clip=True, clip_less=1e-7, nodatavalue=-9999.):\n \"\"\"\n Do the interpolation.\n \"\"\"\n\n # allocate space for interpolated results\n values = numpy.zeros((y_target.size, x_target.size), dtype=numpy.float64)\n\n # loop through all AMR grids\n for state in solution.states:\n\n p = state.patch\n\n # only do subsequent jobs if this is at the target level\n if p.level != level:\n continue\n\n # get the indices of the target coordinates that are inside this patch\n xid = numpy.where((x_target>=p.lower_global[0])&(x_target<=p.upper_global[0]))[0]\n yid = numpy.where((y_target>=p.lower_global[1])&(y_target<=p.upper_global[1]))[0]\n\n # get interpolation object\n interpolator = get_state_interpolator(state, field)\n\n # if any target coordinate located in thie patch, do interpolation\n if xid.size and yid.size:\n values[yid[:, None], xid[None, :]] = \\\n interpolator(x_target[xid]-shift[0], y_target[yid]-shift[1]).T\n\n # apply nodatavalue to a threshold\n if clip:\n values[values:0.02\" \"distance:>=:50000\r\n\r\nOutput options: Required.\r\n- display: prints to stdout\r\n- csv_file: exports data to a csv\r\n\r\nFilters options: Optional. Input as: option:operation:value e.g. diameter:>=:0.042\r\n- is_hazardous:[=]:bool\r\n- diameter:[>=|=|<=]:float\r\n- distance:[>=|=|<=]:float\r\n\r\nReturn objects options: Optional, defaults to NEO if not specified.\r\n- NEO\r\n- Path\r\n\r\nFilename: Optional, used for specifying a filename for a csv to load data from. By default project looks for a csv in: data/neo_data.csv.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport pathlib\r\nimport sys\r\nfrom datetime import datetime\r\n\r\nfrom exceptions import UnsupportedFeature\r\nfrom database import NEODatabase\r\nfrom search import Query, NEOSearcher\r\nfrom writer import OutputFormat, NEOWriter\r\n\r\nPROJECT_ROOT = pathlib.Path(__file__).parent.absolute()\r\n\r\n\r\ndef verify_date(datetime_str):\r\n \"\"\"\r\n Function that verifies datetime strings in YYYY-MM-DD format are valid dates.\r\n\r\n :param datetime_str: String representing datetime in %Y-%m-%d format\r\n :return: str: String representing datetime in %Y-%m-%d format\r\n \"\"\"\r\n try:\r\n date_time_obj = datetime.strptime(datetime_str, \"%Y-%m-%d\")\r\n return datetime_str\r\n except ValueError:\r\n error_message = f'Not a valid date: \"{datetime_str}\"'\r\n raise argparse.ArgumentTypeError(error_message)\r\n\r\n\r\ndef verify_output_choice(choice):\r\n \"\"\"\r\n Function that verifies output choice is a supported OutputFormat.\r\n\r\n :param choice: String representing an OutputFormat\r\n :return: str: String representing an OutputFormat\r\n \"\"\"\r\n options = OutputFormat.list()\r\n\r\n if choice not in options:\r\n error_message = f'Not a valid output option: \"{choice}\"'\r\n raise argparse.ArgumentTypeError(error_message)\r\n\r\n return options[options.index(choice)]\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Near Earth Objects (NEOs) Database')\r\n parser.add_argument('output', choices=OutputFormat.list(), type=verify_output_choice,\r\n help='Select option for how to output the search results.')\r\n parser.add_argument('-r', '--return_object', choices=['NEO', 'Path'],\r\n default='NEO', type=str,\r\n help='Select entity data to return.')\r\n parser.add_argument('-d', '--date', type=verify_date, help='YYYY-MM-DD format to find NEOs on the given date')\r\n parser.add_argument('-s', '--start_date', type=verify_date,\r\n help='YYYY-MM-DD format to find NEOs on the provided start date')\r\n parser.add_argument('-e', '--end_date', type=verify_date,\r\n help='YYYY-MM-DD format to find NEOs up to the end date')\r\n parser.add_argument('-n', '--number', type=int, help='Int representing max number of NEOs to return')\r\n parser.add_argument('-f', '--filename', type=str, help='Name of input csv data file')\r\n parser.add_argument('--filter', nargs='+', help='Select filter options with filter value: '\r\n 'is_hazardous:[=]:bool, '\r\n 'diameter:[>=|=|<=]:float, '\r\n 'distance:[>=|=|<=]:float.'\r\n 'Input as: [option:operation:value] '\r\n 'e.g. diameter:>=:0.042')\r\n\r\n args = parser.parse_args()\r\n var_args = vars(args)\r\n\r\n # Load Data\r\n if args.filename:\r\n filename = args.filename\r\n else:\r\n filename = f'{PROJECT_ROOT}/data/neo_data.csv'\r\n\r\n db = NEODatabase(filename=filename)\r\n\r\n try:\r\n db.load_data()\r\n except FileNotFoundError as e:\r\n print(f'File {var_args.get(\"filename\")} not found, please try another file name.')\r\n sys.exit()\r\n except Exception as e:\r\n print(Exception)\r\n sys.exit()\r\n\r\n # Build Query\r\n query_selectors = Query(**var_args).build_query()\r\n\r\n # Get Results\r\n try:\r\n results = NEOSearcher(db).get_objects(query_selectors)\r\n except UnsupportedFeature as e:\r\n print('Unsupported Feature; Write unsuccessful')\r\n sys.exit()\r\n\r\n # Output Results\r\n try:\r\n result = NEOWriter().write(\r\n data=results,\r\n format=args.output,\r\n )\r\n except Exception as e:\r\n print(e)\r\n print('Write unsuccessful')\r\n sys.exit()\r\n\r\n if result:\r\n print('Write successful.')\r\n else:\r\n print('Write unsuccessful.')\r\n\r\n","sub_path":"starter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"436303833","text":"import sys\nimport socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_address = ('127.0.0.1', 31002)\nsock.connect(server_address)\n\ntry:\n msg = open('/home/bella/PROGJAR_05111740000117/Tugas1/test.txt','rb')\n\n message = msg.read(1024)\n sock.sendall(message.encode())\n\n amount_received = 0\n amount_expected = len(message)\n while amount_received < amount_expected:\n data = sock.recv(1024).decode()\n amount_received += len(data)\n print('received data')\n\nfinally:\n print('closing socket')\n sock.close()","sub_path":"Tugas1/Tugas1a/client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"522973362","text":"''' move zeros to the end'''\n\n\ndef move_zeros(nums):\n i = 0\n j = i + 1\n while i < len(nums) - 1 and j < len(nums):\n if nums[i] == 0:\n j = i + 1\n while nums[j] == 0:\n j = j + 1\n if j == len(nums):\n return\n nums[i], nums[j] = nums[j], nums[i]\n\n else:\n i = i + 1\n\nnums = [4, 2, 4, 0, 0, 3, 0, 5, 1, 0]\nprint(nums)\nmove_zeros(nums)\nprint(nums)\nnums = [0, 0]\nprint(nums)\nmove_zeros(nums)\nprint(nums)\n\n\ndef move_zeros2(nums):\n zi = 0\n for i in range(len(nums)):\n nums[i], nums[zi] = nums[zi], nums[i]\n zi += 1\n\nimport timeit\nprint(timeit.timeit('move_zeros(nums)', globals=globals()))\nprint(timeit.timeit('move_zeros2(nums)', globals=globals()))\n","sub_path":"Interview Questions/Facebook/move_zeros.py","file_name":"move_zeros.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"461978958","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# For training \nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch import optim\nimport dlc_practical_prologue as prologue\nimport time\n\n\n##### Global parameters #####\n\n#For reproductibility\nSEED = 123 \ntorch.manual_seed(SEED)\n\nif torch.cuda.is_available(): \n DEVICE = torch.device('cuda')\nelse:\n DEVICE = torch.device('cpu')\n\n# Training parameters\nN = 1000 #Dataset size (train and test)\nBATCH_SIZE = 25 #Batch size for stochastic optimization\nEPOCHS = 200 # Number of epochs for one round of training\n\n#Learing rate evolution (multiply LEARNING_RATE by GAMMA every LR_STEP epochs)\nLEARNING_RATE = 1e-3 \nLR_STEP = int(0.5 * EPOCHS)\nGAMMA = 0.1\n\n# Auxiliary and main losses ponderation \nAUX_LOSS = 0.5\n\n##### Helper functions ####\n\ndef accuracy(model_output, test_target):\n \"\"\"Return the accuracy of the model output.\"\"\"\n nb_samples = model_output.shape[0]\n output_int = torch.zeros(nb_samples)\n \n # Convert probability to decision\n output_int = torch.argmax(model_output, 1)\n nb_errors = (output_int - test_target).type(torch.BoolTensor).sum().item()\n \n return (nb_samples - nb_errors) / nb_samples\n\ndef accuracyMnist(model_output, test_target):\n \"\"\"Return the accuracy of the predicted digits of a Digit Net.\"\"\"\n nb_samples = model_output.shape[0]\n model_class = model_output.argmax(dim=1)\n nb_errors = (model_class - test_target).type(torch.BoolTensor).sum().item()\n \n return (nb_samples - nb_errors) / nb_samples\n\n\ndef nb_param(model):\n \"\"\"Return the number of trained parameters of the input model.\"\"\"\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n##### Neural Nets Definition ####\n\nclass FCNet(nn.Module):\n \"\"\"Naive fully connected net.\"\"\"\n def __init__(self):\n super(FCNet, self).__init__()\n self.fc1 = nn.Linear(392,200)\n self.fc2 = nn.Linear(200,20)\n self.fc3 = nn.Linear(20,2)\n \n self.drop = nn.Dropout(0.25)\n self.activ = F.relu\n\n def forward(self, x):\n x = self.fc1(x.view(x.size(0),-1))\n x = self.activ(x)\n x = self.drop(x)\n x = self.fc2(x)\n x1, x2 = x[:, 0:10], x[:, 10:20]\n x = self.activ(x)\n x = self.fc3(x)\n \n return x, x1, x2\n\nclass ConvNet(nn.Module):\n \"\"\"Naive convolutional net.\"\"\"\n def __init__(self):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Conv2d(2, 12, kernel_size=3) #(1,14,14) to (12,12,12)\n self.conv2 = nn.Conv2d(12, 32, kernel_size=3) #(12,12,12) to (32,10,10)\n self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) #(32,10,10) to (32,5,5)\n self.fc1 = nn.Linear(800, 100)\n self.fc2 = nn.Linear(100, 20)\n self.fc3 = nn.Linear(20, 2)\n self.drop = nn.Dropout(0.5)\n \n def forward(self, x): \n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.max_pool1(x)\n x = self.drop(x.view(x.size(0), -1))\n x = self.fc1(x)\n x = F.relu(x)\n x = self.drop(x)\n x = self.fc2(x)\n x1, x2 = x[:, 0:10], x[:, 10:20]\n x = F.relu(x)\n x = self.fc3(x)\n \n return x, x1, x2\n\nclass DigitNet(nn.Module):\n \"\"\"Inspired by LeNet5, dropout 0.5 and 2 fc layers.\"\"\"\n def __init__(self):\n super(DigitNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 12, kernel_size=3) #(1,14,14) to (12,12,12)\n self.conv2 = nn.Conv2d(12, 32, kernel_size=3) #(12,12,12) to (32,10,10)\n self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) #(32,10,10) to (32,5,5)\n self.fc1 = nn.Linear(800, 100)\n self.fc2 = nn.Linear(100, 10)\n self.drop = nn.Dropout(0.5)\n \n def forward(self, x): \n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.max_pool1(x)\n x = self.drop(x.view(x.size(0), -1))\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n \n return x\n \n\nclass ConvSepNet(nn.Module):\n \"\"\"Run DigitNet in parrallel on each chanel and combine at the\n end with two fully connected layers (20->10->2). No Dropout in the f.c. layers.\n \"\"\"\n def __init__(self):\n super(ConvSepNet, self).__init__()\n self.mnistNet = DigitNet()\n self.fc1 = nn.Linear(20,10)\n self.fc2 = nn.Linear(10,2)\n\n\n def forward(self, x):\n x1, x2 = x[:,0:1,:,:], x[:,1:2,:,:]\n x1 = self.mnistNet(x1)\n x2 = self.mnistNet(x2) \n x = torch.cat((x1, x2), 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n \n return x, x1, x2\n \n\nclass FinalDigitNet(nn.Module):\n \"\"\"Inspired by LeNet5, dropout 0.5 and 3 fc layers\"\"\"\n def __init__(self):\n super(FinalDigitNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 12, kernel_size=3) #(1,14,14) to (12,12,12)\n self.conv2 = nn.Conv2d(12, 32, kernel_size=3) #(12,12,12) to (32,10,10)\n self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) #(32,10,10) to (32,5,5)\n self.fc1 = nn.Linear(800, 400)\n self.fc2 = nn.Linear(400, 100)\n self.fc3 = nn.Linear(100, 10)\n self.drop = nn.Dropout(0.5)\n \n def forward(self, x): \n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.max_pool1(x)\n x = self.drop(x.view(x.size(0), -1))\n x = self.fc1(x)\n x = F.relu(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.drop(x)\n x = self.fc3(x)\n \n return x\n \nclass FinalNet(nn.Module):\n \"\"\"DigitNet with two fully connected layers (20->10->2). No Dropout\"\"\"\n def __init__(self):\n super(FinalNet, self).__init__()\n self.mnistNet = FinalDigitNet()\n self.fc1 = nn.Linear(20,10)\n self.fc2 = nn.Linear(10,2)\n\n\n def forward(self, x):\n x1, x2 = x[:,0:1,:,:], x[:,1:2,:,:]\n x1 = self.mnistNet(x1)\n x2 = self.mnistNet(x2) \n x = torch.cat((x1, x2), 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n \n return x, x1, x2\n\n##### Training routine ####\n \ndef train_routine(model, train_input, train_target, train_classes, test_input, test_target, test_classes):\n \"\"\"Train a model and compute its performance on train and test data.\"\"\"\n \n # Loss\n criterion = nn.CrossEntropyLoss().to(DEVICE)\n \n # Optimizer\n optimizer = optim.Adam(model.parameters(), LEARNING_RATE)\n \n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=LR_STEP, gamma = GAMMA)\n \n # Start timer\n t0 = time.time() \n \n # Training the model\n model.train(True)\n \n for e in range(EPOCHS):\n \n print('\\rTraining {}... (Epoch {}/{})'.format(model.__class__.__name__, e+1, EPOCHS), end=\"\")\n \n # Ponderation of the main loss => (1-f): ponderation of the auxiliray loss. \n f = AUX_LOSS\n\n for inputs, targets, classes in zip(train_input.split(BATCH_SIZE), \\\n train_target.split(BATCH_SIZE), \\\n train_classes.split(BATCH_SIZE)):\n \n output, aux1, aux2 = model(inputs)\n\n loss = (1-f) * criterion(output, targets) + f * (criterion(aux1, classes[:,0]) + criterion(aux2, classes[:,1]))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # Updtate learning rate\n scheduler.step()\n \n\n # End timer\n t1 = time.time() \n dt = t1-t0\n \n # Evaluating model performance on train and test data\n model.train(False)\n tr_output, tr_aux1, tr_aux2 = model(train_input)\n te_output, te_aux1, te_aux2 = model(test_input)\n \n tr_acc = accuracy(tr_output, train_target)\n te_acc = accuracy(te_output, test_target)\n \n tr_acc_mnist = 0.5*(accuracyMnist(tr_aux1, train_classes[:,0]) + \\\n accuracyMnist(tr_aux2, train_classes[:,1]))\n te_acc_mnist = 0.5*(accuracyMnist(te_aux1, test_classes[:,0]) + \\\n accuracyMnist(te_aux2, test_classes[:,1]))\n \n # Showing results\n print(\"\\nTraining time : {:.2f}s\\n\".format(dt) + \\\n \"Main performance:\\n\" + \\\n \" -Train accuracy : {:.2f}%\\n\".format(100 * tr_acc) + \\\n \" -Test accuracy : {:.2f}%\\n\".format(100 * te_acc) + \\\n \"Auxiliary performance:\\n\" + \\\n \" -Train digit accuracy : {:.2f}%\\n\".format(100 * tr_acc_mnist) + \\\n \" -Test digit accuracy : {:.2f}%\\n\".format(100 * te_acc_mnist) + \\\n \"-----------------------------------\")\n\n\nif __name__ == '__main__':\n \n # Display information about training procedure\n \n print('Train and test dataset size: {}\\n'.format(N) + \\\n 'Number of epochs: {}\\n'.format(EPOCHS) + \\\n 'Batch size for stochastic optimization: {}\\n'.format(BATCH_SIZE) + \\\n 'Learning rate: {} (multiplied by {} after {} epochs)\\n'.format(LEARNING_RATE, GAMMA, LR_STEP) + \\\n 'Device used for training: {}\\n'.format(DEVICE) + \\\n 'Weight of auxiliary loss: f={}'.format(AUX_LOSS))\n\n \n # Load data and move it to DEVICE\n print('Loading the data...')\n train_input, train_target, train_classes, test_input, test_target, test_classes = prologue.generate_pair_sets(N)\n train_input, train_target, train_classes = train_input.to(DEVICE), train_target.to(DEVICE), train_classes.to(DEVICE)\n test_input, test_target, test_classes = test_input.to(DEVICE), test_target.to(DEVICE), test_classes.to(DEVICE)\n print('Data loaded.') \n \n \n # Model constructions\n print('Constructing the models:')\n myFCNet = FCNet().to(DEVICE)\n myConvNet = ConvNet().to(DEVICE)\n myConvSepNet = ConvSepNet().to(DEVICE)\n myFinalNet = FinalNet().to(DEVICE)\n print(' -FCNet: {} parameters\\n'.format(nb_param(myFCNet)) + \\\n ' -ConvNet: {} parameters\\n'.format(nb_param(myConvNet)) + \\\n ' -ConvSepNet: {} parameters\\n'.format(nb_param(myConvSepNet)) + \\\n ' -FinalNet: {} parameters\\n'.format(nb_param(myFinalNet)))\n\n # Training \n\n train_routine(myFCNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n train_routine(myConvNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n train_routine(myConvSepNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n train_routine(myFinalNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"23951836","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport localflavor.us.models\nfrom django.conf import settings\nimport django.contrib.auth.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FoodOffer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ('address', models.TextField(max_length=1000)),\n ('description', models.TextField(max_length=2000)),\n ('picture', models.ImageField(upload_to=b'/food/')),\n ('price', models.DecimalField(max_digits=5, decimal_places=2)),\n ('max_people', models.PositiveSmallIntegerField()),\n ('offer_datetime', models.DateTimeField()),\n ],\n ),\n migrations.CreateModel(\n name='FoodRequest',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ('accepted', models.BooleanField(default=False)),\n ('offer', models.ForeignKey(to='foodoffers.FoodOffer')),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('user_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\n ('zip_code', localflavor.us.models.USZipCodeField(max_length=10)),\n ('prof_pic', models.ImageField(upload_to=b'/profiles/')),\n ],\n options={\n 'abstract': False,\n 'verbose_name': 'user',\n 'verbose_name_plural': 'users',\n },\n bases=('auth.user',),\n managers=[\n ('objects', django.contrib.auth.models.UserManager()),\n ],\n ),\n migrations.AddField(\n model_name='foodrequest',\n name='requester',\n field=models.ForeignKey(to='foodoffers.User'),\n ),\n migrations.AddField(\n model_name='foodoffer',\n name='user',\n field=models.ForeignKey(to='foodoffers.User'),\n ),\n ]\n","sub_path":"foober/foodoffers/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"500656176","text":"\"\"\"\nConfiguration file\n\"\"\"\nimport numpy as np\n\n# General config\n_VERBOSE_LEVEL = 1\n\n# Scripts config\n# 8 speeds between (0.8, 1.2); remove the speed with value 1\n_SPEEDS = np.delete(np.linspace(0.8, 1.2, 9), 4)\n\n# 8 semitones between (-200, 200); remove the semitone with value 0\n_SEMITONES = np.delete(np.linspace(-200, 200, 9), 4)\n\n_NOISES = ['preprocessing/noises/ambiance.wav',\n 'preprocessing/noises/crowd.wav',\n 'preprocessing/noises/street.wav',\n 'preprocessing/noises/driving.wav']\n\n\n# Core config\n_EARLY_STOP_RANGE = None\n_DATA_CSV = None\n\n\nclass Config:\n def __init__(self):\n # General config\n self.verbose_level = _VERBOSE_LEVEL\n # Core config\n self.early_stop_range = _EARLY_STOP_RANGE\n self.data_csv = _DATA_CSV\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"384774286","text":"import os\nimport random\nimport operator\nfrom treat_data.read_file import ReadFiles\nfrom treat_data.write_infiles import WriteInFiles\n\n__author__ = 'Arthur Fortes'\n\n\nclass CrossFoldValidation(object):\n def __init__(self, database, dir_folds, n_folds=10):\n self.database = database\n self.dir_folds = dir_folds\n self.n_folds = n_folds\n self.data = ReadFiles(self.database)\n self.separate_data = list()\n\n self.read_interactions()\n self.create_data_folds()\n self.write_data()\n\n def read_interactions(self):\n self.data.read_without_dict()\n\n def create_data_folds(self):\n random.shuffle(self.data.list_interaction)\n percent = int(float(self.data.num_interactions)/float(self.n_folds))\n\n last = -1\n for n in xrange(self.n_folds):\n initial = 1 + last\n final = (n + 1) * percent\n if n < (self.n_folds - 1):\n self.separate_data.append(self.data.list_interaction[initial:final])\n else:\n self.separate_data.append(self.data.list_interaction[initial:])\n last = final\n\n def write_data(self):\n self.dir_folds += 'folds//'\n\n if not os.path.exists(self.dir_folds):\n os.mkdir(self.dir_folds)\n\n select_fold = list()\n for n in xrange(self.n_folds):\n\n fold_dir = self.dir_folds + str(n) + \"//\"\n if not os.path.exists(fold_dir):\n os.mkdir(fold_dir)\n\n test_fold = n\n\n if test_fold not in select_fold:\n select_fold.append(test_fold)\n final_train_data = list()\n\n for f in xrange(self.n_folds):\n if f != test_fold:\n final_train_data += self.separate_data[f]\n\n final_train_data = sorted(final_train_data, key=operator.itemgetter(0, 1))\n final_test_data = sorted(self.separate_data[n], key=operator.itemgetter(0, 1))\n\n file_write_train = fold_dir + \"train.dat\"\n file_write_test = fold_dir + \"test.dat\"\n WriteInFiles(final_train_data, file_write_train).write_train_and_test(final_test_data, file_write_test)\n","sub_path":"split_base/n_cross_fold_validation.py","file_name":"n_cross_fold_validation.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"158869622","text":"import datetime\nfrom core.database import session, TeamInvite\nfrom core.errors import ValidationError\nfrom . import teams, users\nfrom sqlalchemy.sql.expression import and_\nfrom itsdangerous import URLSafeSerializer\nfrom config import Config\n\n\nconfig = Config()\n\n\ndef get(team_slug, username):\n return TeamInvite.query.filter(and_(TeamInvite.team_slug == team_slug, TeamInvite.username == username)).first()\n\n\ndef has_accepted_invite(team_slug, username):\n return TeamInvite.query.filter(and_(TeamInvite.team_slug == team_slug, TeamInvite.username == username,\n TeamInvite.accepted == True)).first() is not None\n\n\ndef create(team_slug, username):\n print(\"{}:team\".format(team_slug))\n\n if not users.get(username):\n raise ValidationError(\"That Github user is currently not a part of Source League\")\n\n if get(team_slug, username):\n raise ValidationError(\"Invitation already sent\")\n\n invite = TeamInvite()\n invite.team_slug = team_slug\n invite.username = username\n invite.created = datetime.datetime.utcnow()\n invite.updated = datetime.datetime.utcnow()\n\n session.add(invite)\n session.commit()\n\n return get(team_slug, username)\n\n\ndef accept(team_slug, username):\n invite = get(team_slug, username)\n\n if invite:\n invite.accepted = True\n invite.responded_at = datetime.datetime.utcnow()\n invite.updated = datetime.datetime.utcnow()\n\n session.merge(invite)\n session.commit()\n\n teams.add_to_team(team_slug, username)\n\n\ndef decline(team_slug, username):\n invite = get(team_slug, username)\n\n if invite:\n invite.accepted = False\n invite.responded_at = datetime.datetime.utcnow()\n invite.updated = datetime.datetime.utcnow()\n\n session.merge(invite)\n session.commit()\n\n teams.add_to_team(team_slug, username)\n\n\ndef delete_all_for_team(slug):\n team = teams.get(slug)\n\n for invite in team.invites:\n session.delete(invite)\n\n session.commit()\n\n\ndef serialize(team_invite):\n serializer = URLSafeSerializer(config.app_secret())\n return serializer.dumps([team_invite.team_slug, team_invite.username])\n\n\ndef deserialize(serialized):\n serializer = URLSafeSerializer(config.app_secret())\n response = serializer.loads(serialized)\n return get(response[0],response[1])\n","sub_path":"core/ops/team_invites.py","file_name":"team_invites.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"520396497","text":"from utils import get_page\r\nfrom lxml import etree\r\nfrom db import RedisClient\r\nimport random\r\nimport time\r\nclass Crawl_ip(object):\r\n def __init__(self):\r\n self.db = RedisClient()\r\n def ip_xici(self):\r\n url = 'http://www.xicidaili.com/'\r\n con = get_page(url)\r\n html = etree.HTML(con)\r\n ip_list = html.xpath('//tr/td[2]/text()')\r\n ip_port = html.xpath('//tr/td[3]/text()')\r\n for i in range(100):\r\n ip = ip_list[i] + ':' + ip_port[i]\r\n self.db.add(ip)\r\n def ip_66(self):\r\n preurl = 'http://www.66ip.cn/'\r\n for i in range(100):\r\n url = preurl+str(i)+'.html'\r\n con = get_page(url)\r\n if con:\r\n html = etree.HTML(con)\r\n ip_list = html.xpath('//tr')\r\n for i in range(2,len(ip_list)):\r\n ip = ip_list[i].xpath('td[1]/text()')[0]+\":\"+ip_list[i].xpath('td[2]/text()')[0]\r\n self.db.add(ip,10)\r\n intr = random.randint(5,15)\r\n time.sleep(intr*0.1)\r\n def run(self):\r\n self.ip_66()\r\n self.ip_xici()\r\n\r\nif __name__ == '__main__':\r\n crawl = Crawl_ip()\r\n crawl.run()\r\n\r\n\r\n\r\n","sub_path":"crawl_ip.py","file_name":"crawl_ip.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"259695835","text":"from mock import Mock\nimport fluenttest\n\n\nclass TornadoHandlerTestCase(fluenttest.TestCase):\n\n @classmethod\n def arrange(cls):\n super(TornadoHandlerTestCase, cls).arrange()\n cls.application = Mock()\n cls.application.ui_methods = {}\n\n cls.request = Mock()\n cls.request.headers = {}\n","sub_path":"tests/unit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"229456561","text":"from conans import ConanFile, CMake, tools\nimport os\nfrom conans.errors import ConanInvalidConfiguration\nimport textwrap\n\nclass FastDDSConan(ConanFile):\n\n name = \"fast-dds\"\n license = \"Apache-2.0\"\n homepage = \"https://fast-dds.docs.eprosima.com/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"The most complete OSS DDS implementation for embedded systems.\"\n topics = (\"DDS\", \"Middleware\", \"IPC\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_ssl\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_ssl\": False\n }\n generators = \"cmake\", \"cmake_find_package\"\n _cmake = None\n exports_sources = [\"patches/**\", \"CMakeLists.txt\"]\n\n @property\n def _pkg_share(self):\n return os.path.join(\n self.package_folder,\n \"share\"\n )\n\n @property\n def _pkg_tools(self):\n return os.path.join(\n self.package_folder,\n \"tools\"\n )\n\n @property\n def _pkg_bin(self):\n return os.path.join(\n self.package_folder,\n \"bin\"\n )\n\n @property\n def _module_subfolder(self):\n return os.path.join(\n \"lib\",\n \"cmake\"\n )\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\n self._module_subfolder,\n \"conan-target-properties.cmake\"\n )\n \n @property\n def _minimum_cpp_standard(self):\n return 11\n\n @property\n def _minimum_compilers_version(self):\n return {\n \"Visual Studio\": \"16\",\n \"gcc\": \"5\",\n \"clang\": \"3.9\",\n \"apple-clang\": \"8\",\n }\n\n @staticmethod\n def _create_cmake_module_alias_targets(module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\".format(alias=alias, aliased=aliased))\n tools.save(module_file, content)\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self) \n self._cmake.definitions[\"BUILD_MEMORY_TOOLS\"] = False\n self._cmake.definitions[\"NO_TLS\"] = not self.options.with_ssl\n self._cmake.definitions[\"SECURITY\"] = self.options.with_ssl\n self._cmake.definitions[\"EPROSIMA_INSTALLER_MINION\"] = False\n self._cmake.configure()\n return self._cmake\n\n def requirements(self):\n self.requires(\"tinyxml2/7.1.0\")\n self.requires(\"asio/1.18.2\")\n self.requires(\"fast-cdr/1.0.21\")\n self.requires(\"foonathan-memory/0.7.0\")\n self.requires(\"boost/1.73.0\")\n if self.options.with_ssl:\n self.requires(\"openssl/1.1.1k\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True,\n destination=self._source_subfolder)\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n tools.check_min_cppstd(self, self._minimum_cpp_standard)\n min_version = self._minimum_compilers_version.get(str(self.settings.compiler))\n if not min_version:\n self.output.warn(\"{} recipe lacks information about the {} compiler support.\".format(\n self.name, self.settings.compiler))\n else:\n if tools.Version(self.settings.compiler.version) < min_version:\n raise ConanInvalidConfiguration(\"{} requires C++{} support. The current compiler {} {} does not support it.\".format(\n self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))\n if self.settings.os == \"Windows\":\n if (\"MT\" in self.settings.compiler.runtime and self.options.shared):\n # This combination leads to an fast-dds error when linking\n # linking dynamic '*.dll' and static MT runtime\n raise ConanInvalidConfiguration(\"Mixing a dll {} library with a static runtime is a bad idea\".format(self.name))\n\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(self._pkg_share)\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n tools.rename(\n src=self._pkg_tools,\n dst=os.path.join(self._pkg_bin, \"tools\")\n )\n tools.remove_files_by_mask(\n directory=os.path.join(self.package_folder, \"lib\"),\n pattern=\"*.pdb\"\n )\n tools.remove_files_by_mask(\n directory=os.path.join(self.package_folder, \"bin\"),\n pattern=\"*.pdb\"\n )\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"fastrtps\": \"fastdds::fastrtps\"}\n )\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"fastdds\"\n self.cpp_info.names[\"cmake_find_multi_package\"] = \"fastdds\"\n # component fastrtps\n self.cpp_info.components[\"fastrtps\"].names[\"cmake_find_package\"] = \"fastrtps\"\n self.cpp_info.components[\"fastrtps\"].names[\"cmake_find_multi_package\"] = \"fastrtps\"\n self.cpp_info.components[\"fastrtps\"].libs = tools.collect_libs(self)\n self.cpp_info.components[\"fastrtps\"].requires = [\n \"fast-cdr::fast-cdr\",\n \"asio::asio\",\n \"tinyxml2::tinyxml2\",\n \"foonathan-memory::foonathan-memory\",\n \"boost::boost\"\n ]\n if self.settings.os in [\"Linux\", \"Macos\", \"Neutrino\"]:\n self.cpp_info.components[\"fastrtps\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"fastrtps\"].system_libs.extend([\"rt\", \"dl\", \"atomic\"])\n elif self.settings.os == \"Windows\":\n self.cpp_info.components[\"fastrtps\"].system_libs.extend([\"iphlpapi\",\"shlwapi\"])\n if self.options.shared:\n self.cpp_info.components[\"fastrtps\"].defines.append(\"FASTRTPS_DYN_LINK\")\n if self.options.with_ssl:\n self.cpp_info.components[\"fastrtps\"].requires.append(\"openssl::openssl\")\n self.cpp_info.components[\"fastrtps\"].builddirs.append(self._module_subfolder)\n self.cpp_info.components[\"fastrtps\"].build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.components[\"fastrtps\"].build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n # component fast-discovery\n self.cpp_info.components[\"fast-discovery-server\"].names[\"cmake_find_package\"] = \"fast-discovery-server\"\n self.cpp_info.components[\"fast-discovery-server\"].names[\"cmake_find_multi_package\"] = \"fast-discovery-server\"\n self.cpp_info.components[\"fast-discovery-server\"].bindirs = [\"bin\"]\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var for fast-dds::fast-discovery-server with : {}\".format(bin_path)),\n self.env_info.PATH.append(bin_path)\n # component tools\n self.cpp_info.components[\"tools\"].names[\"cmake_find_package\"] = \"tools\"\n self.cpp_info.components[\"tools\"].names[\"cmake_find_multi_package\"] = \"tools\"\n self.cpp_info.components[\"tools\"].bindirs = [os.path.join(\"bin\",\"tools\")]\n bin_path = os.path.join(self._pkg_bin, \"tools\")\n self.output.info(\"Appending PATH env var for fast-dds::tools with : {}\".format(bin_path)),\n self.env_info.PATH.append(bin_path)\n","sub_path":"recipes/fast-dds/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"311475375","text":"\"\"\"\nBase model for multilingual models.\n\"\"\"\nfrom new import classobj\n\nfrom django.db import models\nfrom django.db.models.base import ModelBase\n\nfrom multilingual.languages import get_all\n\nfrom .fields import TranslationProxyField, TranslationRelation, TRANSLATION_FIELD_NAME\nfrom .manager import MultilingualManager\nfrom .options import MultilingualOptions\nfrom .translation import TranslationModelBase, TranslationModel\n\n# TODO: inheritance of multilingual models and translation models\n\n\nclass MultilingualModelBase(ModelBase):\n def __new__(cls, name, bases, attrs):\n ### START - Build translation model\n # At first we build translation model so we can add it to attrs\n # Purpose is to not call 'add_to_class' after model is registered\n\n # We have to copy attributes because they change during creation of a model\n trans_attrs = attrs.copy()\n\n # Make a copy of Meta, so changes in it when creating a translation model does not affect\n # creation of multilingual model\n if attrs.has_key('Meta'):\n trans_attrs['Meta'] = classobj.__new__(classobj, 'Meta', (attrs['Meta'],), attrs['Meta'].__dict__.copy())\n\n translation_name = name + \"Translation\"\n trans_attrs['multilingual_model_name'] = name\n c_trans_model = TranslationModelBase(translation_name, (TranslationModel, ), trans_attrs)\n ### END - Build translation model\n\n ### And some changes before we build multilingual model\n meta = attrs.get('Meta', None)\n abstract = getattr(meta, 'abstract', False)\n\n # Add translation model to attrs\n attrs['translation_model'] = c_trans_model\n\n if not abstract:\n # Add translation relations\n for language_code in [None] + get_all():\n field = TranslationRelation(c_trans_model, base_name=TRANSLATION_FIELD_NAME,\n language_code=language_code)\n attrs[field.name] = field\n\n # Add proxies for translated fields into attrs\n for field in (c_trans_model._meta.fields + c_trans_model._meta.many_to_many):\n if field.name in ('id', 'language_code', 'master'):\n continue\n for language_code in get_all():\n proxy = TranslationProxyField(field.name, language_code)\n attrs[proxy.name] = proxy\n proxy = TranslationProxyField(field.name, None)\n attrs[proxy.name] = proxy\n proxy = TranslationProxyField(field.name, None, fallback=True)\n attrs[proxy.name] = proxy\n\n # Handle manager\n if not 'objects' in attrs:\n # If there is no manager, set MultilingualManager as manager\n attrs['objects'] = MultilingualManager()\n elif not isinstance(attrs['objects'], MultilingualManager):\n # Make sure that if the class specifies objects then it is a subclass of our Manager.\n\n # Don't check other managers since someone might want to have a non-multilingual manager, but assigning\n # a non-multilingual manager to objects would be a common mistake.\n raise ValueError(\"Model %s specifies translations, so its 'objects' manager must be a subclass of \"\\\n \"multilingual.Manager.\" % name)\n\n # And now just create multilingual model\n return super(MultilingualModelBase, cls).__new__(cls, name, bases, attrs)\n\n def add_to_class(cls, name, value):\n # Catch meta and change its class, it is HACK, but it is the least ugly one\n if name == '_meta':\n value = MultilingualOptions(value.meta, value.app_label)\n super(MultilingualModelBase, cls).add_to_class(name, value)\n\n\nclass MultilingualModel(models.Model):\n __metaclass__ = MultilingualModelBase\n\n class Meta:\n abstract = True\n\n def save(self, force_insert=False, force_update=False, using=None):\n \"\"\"\n Change save method to save translations when multilingual object is saved.\n \"\"\"\n super(MultilingualModel, self).save(force_insert=force_insert, force_update=force_update, using=using)\n for field in self._meta.fields:\n if not isinstance(field, TranslationRelation):\n continue\n\n # Find translation. Use cache name to prevent any unnecessary SQL queries.\n # If it isn't loaded, it isn't changed.\n attr_name = field.get_cache_name()\n translation = getattr(self, attr_name, None)\n\n if translation is None:\n # Translation does not exist, continue with next\n continue\n\n # Set the master ID. The master and translation could be just created.\n translation.master_id = self.pk\n translation.save()\n","sub_path":"multilingual/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"588475436","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\n# 모든 쌍의 최단 경로를 찾고 그 합을 구하자.\nT = int(input())\n\nfor tc in range(1, T+1):\n inputs = list(map(int, input().split()))\n N = inputs[0]\n lines = inputs[1:]\n networks = [lines[i*N:i*N+N] for i in range(N)]\n\n dp = [[1001]*(N+1) for _ in range(N+1)]\n\n for i in range(N):\n for j in range(N):\n if networks[i][j] == 1:\n dp[i+1][j+1] = 1\n\n for k in range(1, N+1):\n for i in range(1, N+1):\n if k != i:\n for j in range(1, N+1):\n if j != i and j != k:\n dp[i][j] = min(dp[i][j], dp[i][k]+dp[k][j])\n minV = 1000*N\n for i in range(1, N+1):\n total = 0\n for j in range(1, N+1):\n value = dp[i][j]\n if 0 < value < 1001:\n total += value\n if total < minV:\n minV = total\n print(\"#{} {}\".format(tc, minV))\n","sub_path":"PYTHON/SWEXPERT/1263_사람네트워크2/1263_플로이드와샬.py","file_name":"1263_플로이드와샬.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"137551351","text":"# Given an integer, write a function to determine if it is a power of two.\n\n# Example 1:\n\n# Input: 1\n# Output: true \n# Explanation: 20 = 1\n# Example 2:\n\n# Input: 16\n# Output: true\n# Explanation: 24 = 16\n# Example 3:\n\n# Input: 218\n# Output: false\n\n\nclass solution:\n def isPowerOfTwo(self, n: int) -> bool:\n # for i in range(0,16):\n # if n==(1<1:\n # if n%2==1:\n # return False\n # else:\n # n /= 2\n # return True\n\n\n if n<1: return False\n if n==1: return True\n while n>1:\n if n%2==1: return False\n n//=2\n return True\nprint(solution.isPowerOfTwo(solution,2))\n","sub_path":"231_power_of_two.py","file_name":"231_power_of_two.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"162570805","text":"import pickle\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport urllib.request\r\nimport json\r\nimport re\r\n\r\nfrom datetime import datetime\r\nfrom FileReadWrite import FileReadWrite\r\n\r\nhistorical_records_pkl = FileReadWrite('historical_records.pkl')\r\nuser_project_matrix_csv = FileReadWrite('user_project_matrix.csv')\r\npage = 0\r\nrecord = 0\r\n\r\nif historical_records_pkl.exists() and user_project_matrix_csv.exists():\r\n historical_records = historical_records_pkl.get_data_pkl()\r\n user_project_matrix = user_project_matrix_csv.get_data_csv()\r\nelse:\r\n historical_records = pd.DataFrame(columns = ['profile', 'project', 'type', 'whens', 'origin', 'repetitions'])\r\n user_project_matrix = pd.DataFrame(columns=['user'])\r\n\r\npage = int(historical_records.shape[0]/500)\r\nrecord = int(historical_records.shape[0]%500)\r\n\r\ndef update_records():\r\n global page\r\n global record\r\n print('filter current page')\r\n cleaned = __filter_current_page(page, record)\r\n if len(cleaned['activity']) != 0:\r\n __update_data(cleaned)\r\n\r\n while True:\r\n page = int(page) + 1\r\n print('load page', page)\r\n# for testing\r\n # if page > 6:\r\n # break\r\n\r\n page = str(page)\r\n url = 'https://scistarter.org/api/stream-page?page='+page+'&key=5255cf33e739e9ecc20b9b260cb68567fbc81f6b1bfb4808ba2c39548501f0a1523e2e97d79563645cba40a09894bfdb277779d1145a596f237ebdc166afcf50'\r\n content = __getPage(url)\r\n if len(content['activity']) == 0:\r\n break\r\n else:\r\n __update_data(content)\r\n\r\n historical_records_pkl.put_data_pkl(historical_records)\r\n user_project_matrix_csv.put_data_csv(user_project_matrix)\r\n __put_update_time()\r\n\r\n\r\ndef __getPage(url):\r\n page = urllib.request.urlopen(url)\r\n content = page.read().decode(\"utf8\")\r\n page.close()\r\n JSON_full = {'activity': []}\r\n for entry in content.splitlines():\r\n# if '\"project\": null' in entry:\r\n# continue\r\n cleaned_text = __extractattribute(entry)\r\n JSON_single = __JSONconverter(cleaned_text)\r\n JSON_full['activity'].append(JSON_single)\r\n\r\n # return json.dumps(JSON_full)\r\n return JSON_full\r\n\r\ndef __filter_current_page(page, record):\r\n page = str(page)\r\n cleaned_data = {'activity': []}\r\n url = 'https://scistarter.org/api/stream-page?page='+page+'&key=5255cf33e739e9ecc20b9b260cb68567fbc81f6b1bfb4808ba2c39548501f0a1523e2e97d79563645cba40a09894bfdb277779d1145a596f237ebdc166afcf50'\r\n content = __getPage(url)\r\n cleaned = content['activity'][record:]\r\n cleaned_data['activity'] = cleaned\r\n return cleaned_data\r\n\r\ndef __JSONconverter (cleaned_text):\r\n ### input: cleaned attribute string: ['\"profile\": \"c3174748ab29f73d8c6226d0c2171aeb\"', '\"when\": \"2016-07-22 14:07:43\"', '\"project\": 25']\r\n ### output: JSON for one entry\r\n data = {}\r\n for a in cleaned_text:\r\n if ('\"profile\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n user = a[index[-2]+1:index[-1]]\r\n data['user'] = user\r\n elif ('\"project\"') in a:\r\n index = [m.start() for m in re.finditer(':', a)]\r\n project = a[index[-1]+2:]\r\n data['project'] = int(project)\r\n elif ('\"when\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n time = a[index[-2]+1:index[-1]]\r\n data['when'] = time\r\n elif ('\"type\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n mtype = a[index[-2]+1:index[-1]]\r\n data['type'] = mtype\r\n elif ('\"origin\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n origin = a[index[-2]+1:index[-1]]\r\n data['origin'] = origin\r\n elif ('\"repetitions\"') in a:\r\n index = [m.start() for m in re.finditer(':', a)]\r\n rep = a[index[-1]+2:]\r\n data['repetitions'] = int(rep)\r\n else:\r\n print(\"0\")\r\n # json_data = json.dumps(data)\r\n\r\n return data\r\n\r\ndef __extractattribute (entry):\r\n ### input: single entry, such as \"'{\"origin\": \"Unspecified\", \"profile\": \"c3174748ab29f73d8c6226d0c2171aeb\", \"extra\": \"\", \"repetitions\": 1, \"profile_utm_campaign\": \"\", \"profile_referrer\": \"\", \"duration\": 0.0, \"profile_utm_term\": \"\", \"authenticated\": true, \"profile_origin\": \"\", \"where\": null, \"when\": \"2016-07-22 14:07:43\", \"profile_utm_medium\": \"\", \"project\": 25, \"magnitude\": 1, \"profile_utm_source\": \"\", \"profile_utm_content\": \"\", \"type\": \"Participated\"}'\"\r\n ### output: user, time, project, such as ['\"profile\": \"c3174748ab29f73d8c6226d0c2171aeb\"', '\"when\": \"2016-07-22 14:07:43\"', '\"project\": 25']\r\n attribute_list = entry.split(\", \")\r\n cleaned_text = list(filter (lambda a: ('\"profile\"' in a or '\"when\"' in a or '\"project\"'in a or '\"type\"' in a or '\"origin\"' in a or '\"repetitions\"' in a), attribute_list))\r\n return cleaned_text\r\n\r\ndef __update_data(cleaned):\r\n for entry in cleaned['activity']:\r\n user = entry['user']\r\n project = entry['project']\r\n when = entry['when']\r\n origin = entry['origin']\r\n mtype = entry['type']\r\n repetitions = entry['repetitions']\r\n\r\n # Update user project matrix\r\n if not (user in list(user_project_matrix['user'])):\r\n new_row_number = user_project_matrix.shape[0]\r\n user_project_matrix.loc[new_row_number] = [user] + list(np.zeros(user_project_matrix.shape[1]-1,dtype=int))\r\n if not (str(project) in list(user_project_matrix)):\r\n user_project_matrix[str(project)] = 0\r\n # old_value = data[data['user']==user][str(project)]\r\n user_project_matrix.loc[user_project_matrix['user']==user, str(project)] = 1\r\n\r\n # Update historical records\r\n historical_row = historical_records.shape[0]\r\n historical_records.loc[historical_row] = [user, project, mtype, when, origin, repetitions]\r\n\r\ndef __put_update_time():\r\n if(os.path.isfile('update_times.txt')):\r\n file = open('update_times.txt', 'a+')\r\n file.write('\\n'+datetime.now().strftime(\"%Y-%m-%d, %H:%M:%S\"))\r\n file.close()\r\n else:\r\n file = open('update_times.txt', 'w+')\r\n file.write(datetime.now().strftime(\"%Y-%m-%d, %H:%M:%S\"))\r\n file.close()\r\n\r\nif __name__ == '__main__':\r\n update_records()\r\n","sub_path":"Updater.py","file_name":"Updater.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"609191017","text":"\"\"\"\nPython Flight Mechanics Engine (PyFME).\nCopyright (c) AeroPython Development Team.\nDistributed under the terms of the MIT License.\n\"\"\"\n\nfrom pyfme.environment.environment import Environment\nfrom pyfme.aircrafts import Component, Controller\nfrom pyfme.aircrafts.components import Aircraft\nimport numpy as np\n\n\nclass Propeller(Component):\n \"\"\"A propeller. The propeller is generating a thrust depending on the value\n of the internal controller, which is automatically created. Such controller\n may take values between 0 (minimum thrust) and 1 (maximum thrust)\n \"\"\"\n def __init__(self, r, omega, J, Ct,\n vec=np.asarray([1, 0, 0]),\n controller_name='delta_t',\n cog=np.zeros(3, dtype=np.float64),\n mass=0.0,\n inertia=np.zeros((3, 3), dtype=np.float64),\n Sw=0.0,\n parent=None):\n \"\"\"Create a new propeller\n\n Parameters\n ----------\n r : float\n Propeller radius (m)\n omega : array_like\n List of considered propeller angular velocities (RPM). The current\n rpm are linearly interpolated using the controller value, which can\n take values between 0 and 1\n J : array_like\n Advance ratio considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``Ct``\n Ct : array_like\n Thrust coeff. considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``J``\n vec : array_like\n Thrust direction vector\n controller_name : string\n Name of the associated controller to be automatically generated\n cog : array_like\n Local x, y, z coordinates -i.e. referered to the considered center\n of the aircraft- of the center of gravity (m, m, m)\n mass : float\n Mass of the component (kg)\n inertia : array_like\n 3x3 tensor of inertia of the component (kg * m2) for the upright\n aircraft.\n Current equations assume that the global aircraft has a symmetry\n plane (x_b - z_b), thus J_xy and J_yz must be null\n Sw : float\n Wetted surface (m2)\n parent : Component\n Parent component which owns the current component.\n \"\"\"\n super().__init__(cog, mass, inertia, Sw, parent=parent)\n\n # Velocities\n self.__r = r\n self.__delta_t = np.linspace(0, 1, num=len(omega))\n self.__omega = np.asarray(omega)\n self.__J = J\n self.__Ct = Ct\n self.__vec = vec\n self.controller = Controller(controller_name, 0.0, 1.0)\n\n @property\n def r(self):\n \"\"\"Propeller radius (m)\n\n Returns\n -------\n r : float\n Propeller radius (m)\n \"\"\"\n return self.__r\n\n @r.setter\n def r(self, r):\n \"\"\"Set the propeller radius (m)\n\n Parameters\n ----------\n r : float\n Propeller radius (m)\n \"\"\"\n self.__r = r\n\n @property\n def omega(self):\n \"\"\"List of considered propeller angular velocities (RPM)\n\n Returns\n -------\n omega : array_like\n List of considered propeller angular velocities (RPM). The current\n rpm are linearly interpolated using the controller value, which can\n take values between 0 and 1\n \"\"\"\n return self.__omega\n\n @omega.setter\n def omega(self, omega):\n \"\"\"Set the list of considered propeller angular velocities (RPM)\n\n Parameters\n ----------\n omega : array_like\n List of considered propeller angular velocities (RPM). The current\n rpm are linearly interpolated using the controller value, which can\n take values between 0 and 1\n \"\"\"\n self.__delta_t = np.linspace(0, 1, num=len(omega))\n self.__omega = np.asarray(omega)\n\n @property\n def J(self):\n \"\"\"Advance ratio considered values\n\n Returns\n -------\n J : array_like\n Advance ratio considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``Ct``\n \"\"\"\n return self.__omega\n\n @J.setter\n def J(self, J):\n \"\"\"Set the advance ratio considered values\n\n Parameters\n ----------\n J : array_like\n Advance ratio considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``Ct``\n \"\"\"\n self.__J = J\n\n @property\n def Ct(self):\n \"\"\"Thrust coeff. considered values\n\n Returns\n -------\n Ct : array_like\n Thrust coeff. considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``J``\n \"\"\"\n return self.__Ct\n\n @Ct.setter\n def Ct(self, Ct):\n \"\"\"Set the thrust coeff. considered values\n\n Parameters\n ----------\n Ct : array_like\n Thrust coeff. considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``J``\n \"\"\"\n self.__Ct = Ct\n\n @property\n def vec(self):\n \"\"\"Thrust direction vector\n\n Returns\n -------\n vec : array_like\n Thrust direction vector\n \"\"\"\n return self.__vec\n\n @vec.setter\n def vec(self, Ct):\n \"\"\"Set the thrust direction vector\n\n Parameters\n ----------\n vec : array_like\n Thrust direction vector\n \"\"\"\n self.__vec = vec\n\n def calculate_forces_and_moments(self):\n \"\"\"Compute the forces and moments of the global aircraft collecting all\n the subcomponents\n\n Returns\n -------\n f : array_like\n Drag, lateral and Lift forces (N)\n m : array_like\n Roll, pitch and yaw moments (N * m)\n \"\"\"\n f, m = super().calculate_forces_and_moments()\n\n aircraft = self.top_node()\n assert isinstance(aircraft, Aircraft)\n if aircraft.environment is None:\n raise Warning('aircraft.environment is None')\n return f, m\n\n # Get the airspeed (just in case we have an available aircraft)\n V = np.zeros(3, dtype=np.float64)\n # FIXME: Vectorial velocities should be considered to can model\n # other aircraft types, like helicopters\n V[0] = aircraft.TAS\n V = np.dot(V, self.__vec)\n\n delta_t = self.controller.value\n rho = aircraft.environment.rho\n omega = np.interp(delta_t, self.__delta_t, self.__omega) # rpm\n omega_RAD = (omega * 2.0 * np.pi) / 60.0 # rad/s\n\n J = (np.pi * V) / (omega_RAD * self.__r)\n Ct = np.interp(J, self.__J, self.__Ct)\n T = (2.0 / np.pi)**2 * rho * (omega_RAD * self.__r)**2 * Ct # N\n\n ff = T * self.__vec\n r = self.cog(use_subcomponents=False) - self.cog()\n mm = np.cross(r, ff)\n\n return f + ff, m + mm\n","sub_path":"src/pyfme/aircrafts/components/propeller.py","file_name":"propeller.py","file_ext":"py","file_size_in_byte":7636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"174835144","text":"from kafka import KafkaProducer\nimport time\nimport requests\nimport json\n\n\nkafka_bootstrap_servers = 'localhost:9092'\nkafka_topic_name = 'sample_topic'\n\nproducer = KafkaProducer(bootstrap_servers = kafka_bootstrap_servers,\nvalue_serializer = lambda v: json.dumps(v).encode('utf-8'))\n\njson_message = None\ncity_name = None\ntemperature = None\nhumidity = None\nopenweathermap_api_endpoint = None\nappid = None\n\ndef get_weather_detail(openweathermap_api_endpoint):\n print(openweathermap_api_endpoint)\n api_response = requests.get(openweathermap_api_endpoint)\n json_data = api_response.json()\n city_name = json_data['name']\n humidity = json_data['main']['humidity']\n temperature = json_data['main']['temp']\n json_message = {'CityName': city_name,\n \"temperature\": temperature,\n \"Humidity\": humidity,\n \"CreationTime\": time.strftime(\"%y-%m-%d %H:%M:%S\")}\n return json_message\n\n\ndef get_apikey():\n with open('weatherapikey.json') as f:\n return json.load(f)['weatherdetail']\n\n# a = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=Chennai&appid=98a1502877eff8b08da85801cf53cdc5\")\n# b = a.json()\n# print(b['name'])\n\n\nwhile True:\n city_name = 'Hyderabad'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n city_name = 'Chennai'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n city_name = 'Mumbai'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n\n city_name = 'Bangalore'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n\n\n","sub_path":"WeatherDataToTopic.py","file_name":"WeatherDataToTopic.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"64256799","text":"#!/usr/bin/python3\r\n\r\n######################################################################\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\r\n# Python Program to row objects\t\t\t\t \t\t\t\t # \r\n# Created on : 13/06/2020\t\t\t\t\t\t\t\t\t #\r\n# Author : Vikas Bansode\t\t\t\t\t\t\t\t #\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\r\n######################################################################\r\n\r\n\r\n\r\nimport sqlite3\r\nimport sys\r\n\r\ndb_filename = 'todo.db'\r\n\r\nwith sqlite3.connect(db_filename) as conn:\r\n\tconn.row_factory = sqlite3.Row\r\n\r\n\tcursor = conn.cursor()\r\n\r\n\tcursor.execute(\"\"\"\r\n\t\tselect name,description,deadline from project\r\n\t\twhere name = 'singh'\r\n\t\t\"\"\")\r\n\tname,description,deadline = cursor.fetchone()\r\n\r\n\tprint(\"Project details for {} ({})\\n due {}\".format(\r\n\t\tdescription,name,deadline))\r\n\r\n\tcursor.execute(\"\"\"\"\r\n\t\tselect id, priority, status, deadline, details from task\r\n\t\twhere project = 'singh' order by deadline\r\n\t\t\"\"\"\r\n\t\t)\r\n\r\n\tprint(\"\\nNext 5 task: \")\r\n\tfor row in cursor.fetchmany(5):\r\n\t\tprint('{:2d}[{:d}] {:<25} [{:<8}] ({})'.format(\r\n\t\t\trow['id'],row['priority'],row['details'],\r\n\t\t\trow['status'],row['deadline'],))","sub_path":"16.Database_program/09.row_objects.py","file_name":"09.row_objects.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"464628611","text":"import requests\n\ndef obtain_data(url):\n response = requests.get(url)\n if response.status_code == 200:\n print(\"OK\")\n return response.json()\n\n\n# def findSameUrl\n# docks:\n# http://docs.python-requests.org/en/master/user/quickstart/#response-content\n\nurl = \"https://jsonplaceholder.typicode.com/photos\"\n\ndata = obtain_data(url)\nprint(type(data)) # \nprint(type(data[0])) # \nprint(data[0][\"url\"]) # https://via.placeholder.com/600/92c952\n\"\"\"\n{\n 'albumId': 1,\n 'id': 1,\n 'title': 'accusamus beatae ad facilis cum similique qui sunt',\n 'url': 'https://via.placeholder.com/600/92c952',\n 'thumbnailUrl': 'https://via.placeholder.com/150/92c952'\n}\n\"\"\"\n\n\n\n\n# response = requests.get(url)\n# print(response) # ]\n# print(type(response)) # \n# print(response.status_code) # 200\n\n# print(data.json())\n","sub_path":"find-same-url/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"343098363","text":"from function_Uteis import basicos\nfrom random import randint\nimport pygame\n\n\ndef estilo_musicas_function():\n estilo_musicas = {}\n lista_keys = []\n estilo = basicos.lerArquivos('../../../musicas/','estilos m','txt')\n cont = 0\n for c in estilo:\n lista_keys.append(c.rstrip(\"\\n\"))\n estilo_musicas[f'{lista_keys[cont]}'] = []\n cont += 1\n for key in estilo_musicas.keys():\n try:\n songs = basicos.lerArquivos(f'../../../musicas/{key}/', f'{key} songs', 'txt')\n lista = []\n for song in songs:\n lista.append(song.rstrip(\"\\n\"))\n estilo_musicas[f'{key}'] = lista\n except FileNotFoundError:\n estilo_musicas[key] = []\n return estilo_musicas\n\n\ndef reproduzir_musica():\n try:\n estilo_musicas = estilo_musicas_function()\n\n print('-=' * 30)\n print(f'\\033[1;36m{\"menu estilos musicais\".center(60)}\\033[m')\n print('-=' * 30)\n\n print('-' * 30)\n for key in estilo_musicas.keys():\n print(f' * {key}')\n print('-' * 30)\n\n basicos.reproduzir_voz('Aquir estão todas as playlist já criadas')\n basicos.reproduzir_voz('selecione a playlist que voçê deseja')\n nomePlay = str(input('\\033[1;37mQual playlist voçê que selecionar? \\033[m')).strip().lower()\n\n if nomePlay in estilo_musicas.keys():\n NOTMusic = ''\n basicos.reproduzir_voz(f'voçê escolheu a playlist {nomePlay}')\n lista_musicas = []\n for key, musica in estilo_musicas.items():\n if key == nomePlay:\n if len(musica) > 0:\n for i, valor in enumerate(musica):\n lista_musicas.append(valor)\n print(f'{i} -> \\033[1;37m{valor}\\033[m')\n else:\n NOTMusic = 'não á musicas'\n if NOTMusic == \"não á musicas\":\n basicos.reproduzir_voz('Não á musicas nessa playlist')\n pass\n else:\n basicos.reproduzir_voz('Escolha entre o modo aleatório ou o modo manual')\n reps = str(input('\\033[1;37mmodo aleátorio | modo manual: \\033[m')).strip().lower()\n\n if 'aleátorio' in reps:\n pygame.mixer.init()\n repitido = []\n total_musicas = 0\n while 1:\n aleatorio = randint(0, (len(lista_musicas) - 1))\n if total_musicas == len(lista_musicas):\n break\n if aleatorio not in repitido:\n musica = lista_musicas[aleatorio]\n pygame.mixer.music.load(f\"../../../musicas/{nomePlay}/{musica}\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == 1:\n continue\n repitido.append(aleatorio)\n total_musicas += 1\n else:\n pygame.mixer.init()\n lista_musicas_selecionadas = []\n max = int(input('\\033[1;37mQuantas musicas voçê quer selecionar? \\033[m'))\n for c in range(0, max):\n music = int(input(f'\\033[1;37mqual musica deseja colocar na posição {c}° ? \\033[m'))\n lista_musicas_selecionadas.append(music)\n for c in range(0, max):\n musica = lista_musicas[lista_musicas_selecionadas[c]]\n pygame.mixer.music.load(f\"../../../musicas/{nomePlay}/{musica}\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == 1:\n continue\n else:\n basicos.reproduzir_voz('Essa playlist não existe')\n print('\\033[7mPLAYLIST ENCERRADA!\\033[m')\n print('-=' * 30)\n except Exception:\n basicos.reproduzir_voz('um erro aconteceu!, voçê não pode iniciar essa função')\n print('-=' * 20)\n\n\ndef estilosADM():\n print('==' * 20)\n print(f'\\033[1;36m{\"ADM Estilos musicais\".center(40)}\\033[m')\n print('==' * 20)\n print('--' * 20)\n print('[1] => \\033[1;37m\"ver estilos cadastrados\"\\033[m ')\n print('[2] => \\033[1;37m\"adicionar um novo estilo\"\\033[m ')\n print('[3] => \\033[1;37m\"excluir um estilo\"\\033[m ')\n print('[4] => \\033[1;37m\"Sair\"\\033[m ')\n print('--' * 20)\n\n while 1:\n\n resp = int(input('\\033[1;37mQual você que selecionar?\\033[m '))\n\n if resp == 1:\n Em = estilo_musicas_function()\n print('--' * 20)\n print(f'{\"Estilos Cadastrados\".center(40)}')\n print('--' * 20)\n for pos, key in enumerate(Em.keys()):\n print(f'{pos:.<30}', end=\"\")\n print(f'\\033[1;37m{key}\\033[m')\n elif resp == 2:\n nomePag = str(input('Nome do Estilo: ')).strip().lower()\n estilo = basicos.lerArquivos('../../../musicas/', 'estilos m', 'txt')\n Nestilo = []\n for c in estilo:\n Nestilo.append(c.rstrip(\"\\n\"))\n Nestilo.append(nomePag)\n NE = ''\n for v in Nestilo:\n NE += v + \"\\n\"\n basicos.criaArquivos('../../../musicas/','estilos m','txt', f'{NE}','w')\n elif resp == 3:\n EstExcluir = int(input('Qual Estilo deseja excluir? [\"-1\" para cancelar] '))\n if EstExcluir == -1:\n pass\n else:\n estilo = basicos.lerArquivos('../../../musicas/', 'estilos m', 'txt')\n Nestilo = []\n for c in estilo:\n Nestilo.append(c.rstrip(\"\\n\"))\n Nestilo.pop(EstExcluir)\n NE = ''\n for v in Nestilo:\n NE += v + \"\\n\"\n basicos.criaArquivos('../../../musicas/', 'estilos m', 'txt', f'{NE}', 'w')\n else:\n break\n","sub_path":"assistente virtual/function_Uteis/play_musica.py","file_name":"play_musica.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"207955098","text":"class Invoice:\r\n \r\n def __init__(self):\r\n self.items = {}\r\n \r\n def addProduct(self,qnt,price,discount):\r\n self.items[\"qnt\"] = qnt\r\n self.items[\"unit_price\"] = price\r\n self.items[\"discount\"] = discount\r\n return self.items\r\n\r\n def totalImpurePrice(self, products):\r\n total_impure_price = 0\r\n for k, v in products.items():\r\n total_impure_price += float(v[\"unit_price\"]* int(v[\"qnt\"]))\r\n total_impure_price = round(total_impure_price, 2)\r\n return total_impure_price\r\n \r\n def totalDiscount(self, products):\r\n total_discount = 0\r\n for k, v in products.items():\r\n total_discount += (int(v[\"qnt\"])*float(v[\"unit_price\"]))*float(v[\"discount\"])/100\r\n total_discount = round(total_discount,2)\r\n return total_discount\r\n \r\n def totalPurePrice(self, products):\r\n totalPurePrice = self.totalImpurePrice(products)- self.totalDiscount(products)\r\n return totalPurePrice\r\n \r\n def inputAnswer(self, input_value):\r\n while True:\r\n userInput = input(input_value)\r\n if userInput in [\"y\",\"n\"]:\r\n return userInput\r\n print(\"y or n! try again\")\r\n \r\n def inputNumber(self, input_value):\r\n while True:\r\n try:\r\n userInput = input(input_value)\r\n except ValueError:\r\n print(\"not a number! try again\")\r\n else:\r\n return userInput\r\n #added\r\n def getName(self, products,indexVal):\r\n key_list = list(products.keys())\r\n val_list = list(products.values())\r\n name = key_list[indexVal]\r\n return name","sub_path":"invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"345791247","text":"#!/usr/bin/env python\nimport time\nimport serial\nimport rospy\nimport thread\nfrom std_msgs.msg import String\n\n\nser = serial.Serial(\n port='/dev/ttyUSB0',\n baudrate=9600,\n parity=serial.PARITY_ODD,\n stopbits=serial.STOPBITS_TWO,\n bytesize=serial.SEVENBITS\n )\n\ndef send():\n input = 'hii'\n # Python 3 users\n # input = input(\">> \")\n rospy.loginfo(\"Txed The signal\"+input)\n pub_tx.publish(input)\n ser.write(input + '\\r\\n')\n\ndef receive():\n out = ''\n out += ser.read(10)\n time.sleep(1)\n pub_rx.publish(out)\n rospy.loginfo(\"Rcvd Mssg\",out)\n\n\nif __name__ == '__main__':\n\n # configure the serial connections (the parameters differs on the device you are connecting to)\n ser.isOpen()\n rospy.init_node('XBee_2')\n pub_tx=rospy.Publisher('XBee_2_TX', String, queue_size=10)\n pub_rx=rospy.Publisher('XBee_2_RX', String, queue_size=10)\n rate=rospy.Rate(0.1)\n thread.start_new_thread(receive,())\n while not rospy.is_shutdown():\n thread.start_new_thread(send,()) \n rate.sleep() ","sub_path":"XBee_ROS/src/XBee_2.py","file_name":"XBee_2.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"203652257","text":"#!/usr/bin/env python3.5\n# -*- coding: utf-8 -*-\n# @Time : 2017/11/14 15:45\n# @Author : HoxHou\n# @File : mysql_engine.py\n# @Software: PyCharm Community Edition\n\n\n\n\nimport re\n\nimport pymysql\n\n\nclass MySQLEngine(object):\n def __init__(self):\n \"\"\"\n MySQL 数据库ORM\n :param\n \"\"\"\n self.host = \"10.10.13.120\"\n self.user = \"root\"\n self.password = \"123456\"\n self.charset = \"utf8\"\n self.db_name = \"xhg_testautomation\"\n try:\n conn = pymysql.connect(host=self.host, user=self.user, password=self.password,\n database=self.db_name, charset=self.charset, connect_timeout=100)\n except pymysql.Error:\n raise\n self.__conn = conn\n\n def my_execute(self, execute_type, sql):\n \"\"\"\n :param execute_type:\n :param sql:\n :return:\n \"\"\"\n try:\n print(sql) # 后期保存到日志模块\n self.cursor = self.__conn.cursor()\n if execute_type is 'query':\n self.cursor.execute(sql)\n data_list = self.cursor.fetchall()\n table_fields = [each[0] for each in self.cursor.description]\n result=[]\n for row in data_list:\n obj_dict = {}\n # 字典键值对\n for index, value in enumerate(row):\n obj_dict[table_fields[index]] = value\n result.append(obj_dict)\n #print(result) # 后期保存到日志模块\n return result\n elif execute_type in ('insert', 'update', 'delete'):\n try:\n result={}\n self.cursor.execute(sql)\n if execute_type == 'insert':\n insert_id = self.__conn.insert_id()\n print('新插入的id:'+str(insert_id))\n result['insert_id'] = insert_id\n self.__conn.commit()\n print(\"受影响的行:%d\" % (self.cursor.rowcount))\n result['rowcount'] = self.cursor.rowcount\n return result\n except pymysql.Error:\n self.__conn.rollback()\n raise\n finally:\n self.__conn.close()\n","sub_path":"common/db_handler/mysql_engine.py","file_name":"mysql_engine.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"608028369","text":"# 234. Palindrome Linked List\n# Given a singly linked list, determine if it is a palindrome.\n\n# Example 1:\n\n# Input: 1->2\n# Output: false\n# Example 2:\n\n# Input: 1->2->2->1\n# Output: true\n# Follow up:\n# Could you do it in O(n) time and O(1) space?\n\n# convert to a list and then reverse the last and\n# compare with regular list\n\n\ndef isPalindrome(self, head: ListNode) -> bool:\n nums = []\n curr = head\n while curr:\n nums.append(curr.val)\n curr = curr.next\n\n return nums == nums[::-1]\n\n\n# slow fast pointers\n# move slow to one past halfway\n# reverse first half of the last on the way\n# traverse reverse first half and second half\n# checking for equality\n#\ndef isPalindrome(self, head):\n rev = None\n slow = fast = head\n while fast and fast.next:\n fast = fast.next.next\n rev, rev.next, slow = slow, rev, slow.next\n if fast:\n slow = slow.next\n while rev and rev.val == slow.val:\n slow = slow.next\n rev = rev.next\n return not rev\n","sub_path":"LinkedList/PalindromeLinkedList.py","file_name":"PalindromeLinkedList.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"610447448","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Sat Feb 10 23:22:48 2018\n\n@author: Wen\n\n'''\nimport numpy as np\nimport random\n\n# the 2048 game\n\nclass Board(object):\n def __init__(self, board_size=4):\n self.board_size = board_size\n self.actions = ['L', 'U', 'R', 'D']\n # sometimes certain action is not available\n self.available_actions = self.actions\n self.new_num = [2, 4]\n self.game_over = False\n self.indices = np.arange(board_size)\n # use 2 arrays to record the state of the board\n # 1-D array is easy to choose empty cells, 2-D array easy to check game status\n self.state = np.zeros(board_size * board_size)\n self.state_square = np.zeros((board_size, board_size))\n # randomly fill two cells in the board\n self.update()\n self.update()\n self.check_all()\n \n # check whether sliding to left is available\n def check(self):\n available = False\n # the game ends only if all actions are unavailable\n # check for sliding left\n # for other directions rotate the board and call the same function\n for i in self.indices:\n # store the original values in the row for update\n values = list(self.state_square[i])\n empty_indices = [j for j in self.indices if values[j] == 0]\n # if all cells are empty then continue to check the next row\n if len(empty_indices) == self.board_size:\n continue\n num_indices = [j for j in self.indices if values[j] > 0]\n # as long as the rightmost number is in the right of the leftmost empty cell then the action is available\n if empty_indices != []:\n if min(empty_indices) < max(num_indices):\n available = True\n break\n # otherwise only can move if adjacent cells are of the same value\n # in this case if number of numbered cells has to be greater than 1\n num_len = len(num_indices)\n if num_len < 2:\n continue\n else:\n for j in range(num_len - 1):\n if values[num_indices[j]] == values[num_indices[j+1]]:\n available = True\n break\n return available\n \n # check all directions, if there is no available moves then game over\n def check_all(self):\n self.available_actions = []\n \n action = 'L'\n if self.check():\n self.available_actions.append(action)\n \n action = 'U'\n self.state_square = np.rot90(self.state_square, 1)\n if self.check():\n self.available_actions.append(action)\n self.state_square = np.rot90(self.state_square, 3)\n \n action = 'D'\n self.state_square = np.rot90(self.state_square, 3)\n if self.check():\n self.available_actions.append(action)\n self.state_square = np.rot90(self.state_square, 1)\n \n action = 'R'\n self.state_square = np.flip(self.state_square, 1)\n if self.check():\n self.available_actions.append(action)\n self.state_square = np.flip(self.state_square, 1)\n \n if self.available_actions == []:\n self.game_over = True\n else:\n self.game_over = False\n \n # slide the board to the left\n def slide(self):\n for i in self.indices:\n # store the original values in the row for update\n # need to make a new list, otherwise it is merely a reference\n values = list(self.state_square[i])\n empty_indices = [j for j in self.indices if values[j] == 0]\n # if all cells are empty then continue to check the next row\n if len(empty_indices) == self.board_size:\n continue\n num_indices = [j for j in self.indices if values[j] > 0]\n num_len = len(num_indices)\n \n self.state_square[i] = 0\n if num_len < 2:\n self.state_square[i][0] = values[num_indices[0]]\n else:\n j = 0\n pos = 0\n while j < num_len:\n # whether reaches the last element\n if j < num_len - 1:\n if values[num_indices[j]] == values[num_indices[j+1]]:\n self.state_square[i][pos] = 2 * values[num_indices[j]]\n j += 2\n pos += 1\n continue\n \n self.state_square[i][pos] = values[num_indices[j]]\n j += 1\n pos += 1 \n \n # randomly choose an empty cell to fill it with 2 or 4 randomly\n def update(self):\n idx = random.choice([i for i in range(self.board_size * self.board_size) if self.state[i] == 0])\n new_num = random.choice(self.new_num)\n # need to update both state arrays\n self.state[idx] = self.state_square[int(np.floor(idx / self.board_size))][idx % self.board_size] = new_num\n \n # make a move using a certain action\n def move(self, action):\n # for different action, rotate the matrix correspondingly and slide to the left\n # then restore the matrix afterward\n # 'U' and 'D' corresponds to rotate 90 and 270 degrees\n # 'R' corresponds to flip the matrix\n if not action in self.available_actions:\n print(action, 'is not an available action.')\n return False\n \n print('Making a move:', action)\n if action == 'R':\n self.state_square = np.flip(self.state_square, 1)\n self.slide()\n self.state_square = np.flip(self.state_square, 1)\n else:\n rot = self.actions.index(action)\n self.state_square = np.rot90(self.state_square, rot)\n self.slide()\n self.state_square = np.rot90(self.state_square, 4 - rot)\n \n self.state = self.state_square.flat[:]\n \n return True\n \n # make a random move\n def random_move(self):\n self.move(random.choice(self.available_actions))\n \n def score(self):\n return max(self.state)\n \n def show(self):\n print(self.state_square.astype(int))\n\nif __name__ == '__main__':\n board = Board(4)\n board.show()\n steps = 0\n while not board.game_over:\n steps += 1\n board.random_move()\n board.update()\n board.show()\n board.check_all()\n if board.game_over:\n print('Game over!')\n print('Score:', board.score())\n print('Steps:', steps)\n break","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"322072821","text":"# Requires Python 3.5+\n# Requires mp3-tagger library (\"pip3 install mp3-tagger\" or download from here: https://pypi.org/project/mp3-tagger/)\n# nem kell -*- coding:Utf-8 -*-\n\nimport os\nimport glob\nimport json\nimport sys\nimport getopt\nfrom mp3_tagger import MP3File\nfrom mp3_tagger.id3 import VERSION_2, VERSION_BOTH, VERSION_1\n\n\n#PATH = \"/home/apulai/mp3\"\n#PATH = \"Z:\\\\\"\n#PATH = \"c:\\\\test\"\n#PATH=\"Z:\\\\juca\"\n#PATH=\"Z:\\\\mp3\\\\shrek\"\n#PATH=\"Z:\\\\mp3\\\\_Magyar\"\n#PATH=\"D:\\\\temp\"\n#PATH=\"Z:\\\\mp3\\\\_Latin\"\n#PATH=\"Z:\\\\mp3\\\\_Country\"\n#PATH=\"Z:\\\\mp3\\\\_Disco\"\n#PATH=\"Z:\\\\mp3\\\\_Folk\"\n#PATH=\"Z:\\\\mp3\\\\_Gyerek\"\n#PATH=\"Z:\\\\mp3\\\\_Hangoskonyv\"\n#PATH=\"Z:\\\\mp3\\\\_Jazz\\\\Take Five\"\nPATH=\"Z:\\\\mp3\\\\_Magyar\\\\Valami Amerika\"\n#PATH=\"Z:\\\\mp3\\\\_Pop\\\\Boney M - The Magic Of Boney M\"\n#PATH=\"Z:\\mp3\\_Magyar\\István a király\"\n#PATH=\"Z:\\\\mp3\\\\_Vegyes\"\n#PATH=\"Z:\\\\mp3\\\\_Jazz\\\\Smooth Africa\"\n#PATH=\"Z:\\\\mp3\\\\_Rock\"\n#PATH=\"Z:\\\\mp3\\\\_Country\"\n#PATH=\"/mnt/backupdsk/mp3/_Magyar\"\n\n# We will look for these extensions\nLIST_OF_EXTENSIONS = \".mp3\", \".MP3\"\n\n# In some of the tags there were very strange chars\n# We want them to be removed\nBAD_CHARS = \" \\n\\x00\\r\\ufeff\"\n\n# Pseudo done: TODO: Skip only those directories which were marked as consistent in the processed.log file (likely load proccessed log before run\n# We log error message and then the directory name\n\n# Pseudo done: TODO: Log somehow if mp3 file had only v1 tags\n# Folder is logged\n\n\n#LOGFILE_NAME = \"uxprocessed.log\"\nLOGFILE_NAME = \"processed.log\"\n#PROCESSED_DIR_FILE = PATH + \"/uxprocessed.log\"\nPROCESSED_DIR_FILE = PATH + \"/\" + LOGFILE_NAME\n\nrootDir = PATH\nreport_inconsistent_directories = 1\nupdate_mp3data = 1\n\ndef collect_mp3info(directory):\n \"\"\"\n function:\tcollect-mp3info\n input:\t foldername\n output:\t list of dictionaries containing mp3 tags per song\n operation:\topens each mp3 files, and extracts mp3 info into a list of dictionaries.\n might return an empty list\n\n \"\"\"\n print(\"Function: collect_mp3info Directory {}\".format(directory))\n songs_list = list()\n file_list = list()\n for extension in LIST_OF_EXTENSIONS:\n temp_list = glob.glob(directory + \"/*\" + extension, recursive=False)\n # Do not append a list to a list...\n file_list = file_list + temp_list\n\n #Since on windows .mp3 and .MP3 is not different\n #Make this list uniq again\n\n temp_list = file_list\n\n file_list = list()\n for x in temp_list:\n if x not in file_list:\n file_list.append(x)\n\n #extension = \".mp3\"\n #file_list = glob.glob(directory + \"/*\" + extension, recursive=False)\n\n # print(directory),\n #print(file_list),\n\n for file in file_list:\n # print(\"file:\", file)\n # print(\"file: \" + file)\n # print(\"file: {} directory:{}\".format(file,directory))\n # print(\"file: {0} directory:{1}\".format(file,directory))\n # print(\"file: {0} directory:{1} file: {0}\".format(file,directory))\n\n print(\".\", end=\"\")\n d = dict()\n try:\n d[\"hasbadchars\"] = False\n\n mp3 = MP3File(file)\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n d[\"tagversion\"]=\"v1\" # We hope tags will be v2, but let's set the worst case for us which is v1, if no v2 tags we will assume all was v1 and will not write\n\n if isinstance(mp3.artist, str): # If it's a string we are good...\n if len(mp3.artist) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1) # So there was a non-zero v2 tag\n else:\n d[\"tagversion\"] = \"v2\"\n d[\"artist\"] = mp3.artist.rstrip()\n else:\n d[\"artist\"] = \"\"\n\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n if isinstance(mp3.album, str): # If it's a string we are good...\n if len(mp3.album) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1)\n else:\n d[\"tagversion\"] = \"v2\" # So there was a non-zero v2 tag\n d[\"album\"] = mp3.album.rstrip()\n else:\n d[\"album\"] = \"\"\n\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n if isinstance(mp3.song, str): # If it's a string we are good...\n if len(mp3.song) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1)\n else:\n d[\"tagversion\"] = \"v2\" # So there was a non-zero v2 tag\n d[\"song\"] = mp3.song.rstrip()\n else:\n d[\"song\"] = \"\"\n\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n if isinstance(mp3.band, str): # If it's a string we are good...\n if len(mp3.band) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1)\n else:\n d[\"tagversion\"] = \"v2\" # So there was a non-zero v2 tag\n d[\"band\"] = mp3.band.rstrip()\n else:\n d[\"band\"] = \"\"\n\n d[\"filename\"] = file\n\n songs_list.append(d)\n except Exception as e:\n print(\"Warning: MP3 tag cannot be read from file: {}. Exception: {}\".format(file, e))\n writelogfile(\"ERR MP3:\" + format(file) + \"\\n\")\n print(\"\")\n print(json.dumps(songs_list, indent=4, ensure_ascii=False))\n\n return songs_list\n\ndef remove_bad_chars(song_list):\n \"\"\"\n function:\tremove_bad_chars\n input:\t song_list\n output:\t corrected song list\n operation:\tWalks so a song_list collected by collect_mp3info\n Tries to remove bad chars we have seen mostly on windows\n \"\"\"\n ret_list = list()\n for song in song_list:\n ret_song = dict()\n\n ret_song[\"hasbadchars\"]=False\n\n ret_song[\"artist\"] = song[\"artist\"].rstrip(BAD_CHARS)\n if (ret_song[\"artist\"] != song[\"artist\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"album\"] = song[\"album\"].rstrip(BAD_CHARS)\n if (ret_song[\"album\"] != song[\"album\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"song\"] = song[\"song\"].rstrip(BAD_CHARS)\n if (ret_song[\"song\"] != song[\"song\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"band\"] = song[\"band\"].rstrip(BAD_CHARS)\n if (ret_song[\"band\"] != song[\"band\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"tagversion\"]=song[\"tagversion\"]\n ret_song[\"filename\"] = song[\"filename\"]\n ret_list.append(ret_song)\n\n return ret_list\n\ndef is_mp3info_consistent(songs_list):\n \"\"\"\n function:\tis_mp3info_consistent\n input:\t list of dictionaries with mp3 tags\n output:\t True if album, band and artist are the same for all songs\n True if list is empty\n False if all band, artist and album tags are empty\n False in other cases\n operation:\ttakes the list's first element and compares subsequent entries\n if there is a difference returns False\n \"\"\"\n # if we got an empty list as input, we will return\n if len(songs_list) == 0:\n return True\n # artist_consistent = True\n album_consistent = True\n band_consistent = True\n artist_consistent = True\n first_nonempty_album = False\n first_nonempty_band = False\n first_nonempty_artist = False\n # we will compare each song to the first song\n first_song = songs_list[0]\n\n for song in songs_list:\n # We don't need to compare the first song to first_song one as well [1:] We can use 1: like operators on lists\n # But this is wrong what if we have only 1 song?!\n if song[\"album\"] != \"\":\n first_nonempty_album = True\n if song[\"band\"] != \"\":\n first_nonempty_band = True\n if song[\"artist\"] != \"\":\n first_nonempty_artist = True\n\n if first_song[\"artist\"] != song[\"artist\"]:\n artist_consistent = False\n print(\"Suspect: Artist inconsistent\")\n break\n if first_song[\"album\"] != song[\"album\"]:\n album_consistent = False\n print(\"Err: Album inconsistent\")\n break\n if first_song[\"band\"] != song[\"band\"]:\n band_consistent = False\n print(\"Err: Band inconsistent\")\n break\n\n # Not all artist was the same\n # We can correct it if band is consistent and album is consistent\n # And all artist is different then we are still OK\n\n if( artist_consistent == False):\n print(\"Double check artist consistency, if album and band is consitent, and no empty artists are then OK\")\n if ( band_consistent == True and album_consistent == True):\n totalnumberofsongs = len(songs_list)\n artistlist = list()\n for song in songs_list: # We need to generate the list of artists\n artistlist.append(song[\"artist\"])\n track = {}\n for value in artistlist:\n if (value == [] or (value is None)):\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n numberofdifferentartists = len(track)\n #if ( float(numberofdifferentartists)/float(totalnumberofsongs)==1.0):\n if( \"empty\" not in artistlist):\n artist_consistent = True\n print(\"Doublecheck: artist is OK, since no empty artist while band and album is consistent\")\n\n else:\n print(\"Double check: artist is really not OK\")\n\n if not first_nonempty_band:\n print(\"Band is empty for all songs!\")\n if not first_nonempty_album:\n print(\"Album is empty for all songs!\")\n if not first_nonempty_artist:\n print(\"Artist is empty for all songs!\")\n return album_consistent and band_consistent and artist_consistent and first_nonempty_album and \\\n first_nonempty_band and first_nonempty_artist\n\n\ndef suggest_mostfrequent_mp3info(songlist):\n \"\"\"\n function:\tsuggest_mostfrequent_mp3info\n input:\t list of mp3 objects - songlist\n output:\t band, album, artist tuple\n operation:\tlooks into the mp3 objects, and calculates the\n most frequent band and album string\n returns band, album\n \"\"\"\n\n # If we get no data let's return\n totalnumberofsongs = len(songlist)\n if totalnumberofsongs == 0:\n return \"empty\", \"empty\", \"empty\"\n\n albumlist = list()\n bandlist = list()\n artistlist = list()\n for song in songlist: # Create 3 separate list of attributes so we can work with them easier\n albumlist.append(song[\"album\"])\n bandlist.append(song[\"band\"])\n artistlist.append(song[\"artist\"])\n\n # Start work on list of albums, calculate most ferquent album name\n track = {}\n for value in albumlist:\n if( value == [] or (value is None) ): # sometimes we got NoneVaule, likely this won't happen anymore, but we still check\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n retvalalbum=max(track, key=track.get) # sometimes we got NoneVaule, likely this won't happen anymore, but we still check\n retvalalbumqty=track[retvalalbum]\n calculatedalbum=retvalalbum\n calculatedalbumqty=retvalalbumqty\n\n # Start work on list of artits, calculate most ferquent artist name\n track = {}\n for value in artistlist:\n if (value == [] or(value is None)):\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n\n # We will select here the most frequent artist\n calculatedartist = max(track, key=track.get)\n retvalartist = calculatedartist\n calculatedartistqty = track[calculatedartist]\n retvalartistqty = calculatedartistqty\n totalnumberofdifferentartist=len(track)\n\n\n # But If all song has an artist we will propose keep instead\n if (\"\" not in artistlist):\n retvalartist = \"keep\"\n\n # Start work on list of band, calculate most ferquent band name\n track = {}\n for value in bandlist:\n if (value == \"\" or (value is None)):\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n retvalband = max(track, key=track.get)\n retvalbandqty = track[retvalband]\n calculatedband=retvalband\n calculatedbandqty=retvalbandqty\n\n#If band is empty propose artist as band\n if retvalband == \"empty\" :\n # If the most frequent artist is present in more than 15% of the songs\n # and the band is empty let's propose artist as the band\n if float(calculatedartistqty)/float(totalnumberofsongs) >= 0.15:\n retvalband = calculatedartist\n\n\n\n print(\"Total number of songs in this folder:\\t{}\".format(totalnumberofsongs))\n\n print(\"Most frequent band:\\t{} \\tnumber of occurances: {} .\".format(calculatedband, calculatedbandqty))\n print(\"Most frequent album:\\t{} \\tnumber of occurances: {} .\".format(calculatedalbum, calculatedalbumqty))\n print(\"Most frequent artist:\\t{} \\tnumber of occurances: {} . \".format(calculatedartist, calculatedartistqty))\n\n print(\"Returning proposal for band:\\t{} \\tnumber of occurances: {} .\".format(retvalband,retvalbandqty))\n print(\"Returning proposal for album:\\t{} \\tnumber of occurances: {} .\".format(retvalalbum,retvalalbumqty))\n print(\"Returning proposal for artist:\\t{} \\tnumber of occurances: {} . \".format(retvalartist, retvalartistqty))\n\n return retvalband,retvalalbum,retvalartist\n\n\ndef update_mp3info(songlist, requiredtag, write_v1_tags=False):\n \"\"\"\n function:\tupdate_mp3info\n input:\t songlist a directory of mp3 tags, dictionary of required mp3, write_v1_tags by default false\n output:\n operation:\twrites mp3tags into each song, if tag == keep keeps tag (artist only)\n future: updates processed dir logfile\n \"\"\"\n\n # TODO: add album cover!\n\n print(\"Function: update_mp3info\")\n #print(dir),\n #print(fileList),\n for song in songlist:\n needtosave=False\n if( song[\"album\"] != requiredtag[\"album\"]):\n needtosave=True\n if( song[\"band\"] != requiredtag[\"band\"]):\n needtosave=True\n if (song[\"song\"] == \"\"):\n needtosave=True\n if( song[\"artist\"] != requiredtag[\"artist\"] and requiredtag[\"artist\"] != \"keep\" ):\n needtosave=True\n\n if( song[\"tagversion\"]==\"v1\" and write_v1_tags == False):\n # ISSUE: mp3tagger seems not to handle corrctly if there is no tag or only v1 tags\n needtosave = False\n print(\"WARNING: Song with V1 tags only: {}\".format(song[\"filename\"]))\n #writelogfile(\"Log: only V1 tag excpetion: {}\".format(song[\"filename\"]))\n\n if needtosave==True :\n try:\n mp3 = MP3File(song[\"filename\"])\n mp3.set_version(VERSION_BOTH)\n mp3.band = requiredtag[\"band\"].rstrip(BAD_CHARS)\n mp3.album = requiredtag[\"album\"].rstrip(BAD_CHARS)\n if song[\"song\"] == \"\":\n # My TC friend is totally bored sometimes somewhere so he learns stuff like [:-4]\n mp3.song = os.path.basename(song[\"filename\"])[:-4]\n mp3.song = mp3.song.rstrip(BAD_CHARS)\n if (requiredtag[\"artist\"] != \"keep\"):\n mp3.artist = requiredtag[\"artist\"].rstrip(BAD_CHARS)\n #print('Writing tags to %s' % song[\"filename\"] )\n mp3.save()\n except Exception as e:\n print(\"Warning: MP3 tag cannot be saved for file: {}. Exception: {}\".format(song[\"filename\"], e))\n writelogfile(\"Log: Warning: MP3 tag cannot be saved for file:\" + format(song[\"filename\"])+ format(e))\n else:\n print(\"Info: MP3 tag updated for file: {}\".format(song[\"filename\"]))\n\ndef rewrite_songs_with_bad_chars(songlist):\n \"\"\"\n function:\trewrite_songs_with_bad_chars\n input:\t songlist a directory of mp3 tags\n output:\n operation:\twrites mp3tags and rstrips again\n \"\"\"\n for song in songlist:\n try:\n if song[\"hasbadchars\"] == True and song[\"tagversion\"] == \"v2\":\n try:\n mp3 = MP3File(song[\"filename\"])\n mp3.set_version(VERSION_BOTH)\n mp3.band = song[\"band\"].rstrip(BAD_CHARS)\n mp3.album = song[\"album\"].rstrip(BAD_CHARS)\n mp3.song = song[\"song\"].rstrip(BAD_CHARS)\n mp3.artist = song[\"artist\"].rstrip(BAD_CHARS)\n mp3.save()\n except Exception as e:\n print(\n \"Warning: MP3 tag cannot be saved for file: {}. Exception: {}\".format(song[\"filename\"], e))\n writelogfile(\n \"Log: Warning: MP3 tag cannot be saved for file:\" + format(song[\"filename\"]) + format(e))\n else:\n print(\"Info: MP3 badchars removed for file: {}\".format(song[\"filename\"]))\n elif song[\"tagversion\"] == \"v1\":\n writelogfile(\n \"ERR V1 BADCHAR: MP3 tag cannot be saved for file:\" + format(song[\"filename\"]) + format(e))\n except NameError:\n print(\"Info no bad chars, or not checked\")\n return\n\n\ndef writelogfile(str):\n try:\n with open(PROCESSED_DIR_FILE, \"a\") as f:\n f.write(str)\n except IOError:\n print(\"Processed directories log file: {} cannot be opened.\".format(PROCESSED_DIR_FILE))\n\n\ndef walkdir_OBSOLETE(dir):\n \"\"\"\n function:\twalkdir - OBSOLETE\n input:\t foldername\n output:\t none\n operation:\trecureseivly walks through the directories\n tries to collect mp3 info in each dir\n checks mp3 info per directroy\n future changes: make it non-recursive?\n \"\"\"\n for dirName, subdirList, fileList in os.walk(dir):\n print('\\nArrived in directory: %s' % dirName)\n songlist = collect_mp3info(dirName)\n\n # songlist maybe empty, in this case we skip info check\n if( len(songlist) > 0):\n if( is_mp3info_consistent(songlist)== False):\n print(\"Album is INCONSISTENT\")\n if( update_mp3data == 1):\n suggestedband,suggestedalbum,suggestedartist=suggest_mostfrequent_mp3info(songlist)\n print(\"Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n accept = input(\"Accept suggested (Y/n/q)?\")\n if accept.lower() == 'n':\n suggestedband = input(\"Enter new band: %s \" % suggestedband) or suggestedband\n suggestedalbum = input(\"Enter new album: %s \" % suggestedalbum) or suggestedalbum\n suggestedartist = input(\"Enter new artist (or keep or blank) %s\" % suggestedartist) or suggestedartist\n print(\"New values: Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n if accept.lower() == 'q':\n exit(2)\n d = dict ()\n d[\"band\"] = suggestedband\n d[\"album\"] = suggestedalbum\n d[\"artist\"] = suggestedartist\n update_mp3info(songlist,d)\n if( report_inconsistent_directories == 1):\n writelogfile(\"Inconsistent:\" + dirName + \"\\n\")\n\n else:\n print(\"Album seems to be OK\")\n writelogfile(\"Consistent:\" + dirName + \"\\n\")\n if( len(subdirList) == 0):\n print(\"No subdirs\")\n else:\n for dname in subdirList:\n print(\"Going to: {}\".format(dname))\n walkdir_OBSOLETE(dname)\n print(\"Directroy processed: {}\".format(dirName))\n\ndef v1_tags_present(song_list):\n # Walk through the tags and check if any of them is v1\n # if v1 we return true\n for song in song_list:\n if song[\"tagversion\"] == \"v1\":\n return True\n return False\n\n\ndef process_dir(current_directory):\n \"\"\"\n function:\tprocess_dir\n input:\t foldername\n output:\t 0 if directory is updated\n 1 if directory is not updated\n 2 if directory has v1 tags and it is not updated\n operation:\tgenerates list of songs in current directory\n collects mp3info\n processes mp3info\n \"\"\"\n\n song_list=collect_mp3info(current_directory)\n if (len(song_list) > 0):\n # If there are v1 tags present we will log only an error for this directory\n if (v1_tags_present(song_list) == True ):\n print(\"Album has songs with v1 tags only, not safe to process\")\n return 2\n if (is_mp3info_consistent(song_list) == False):\n print(json.dumps(song_list, indent=4, ensure_ascii=False))\n print(\"Album is inconsistent\")\n if (update_mp3data == 1):\n # Try to analyze the collected info, and come back with suggestions\n suggestedband, suggestedalbum, suggestedartist = suggest_mostfrequent_mp3info(song_list)\n # Ask for user input\n print(\"Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n accept = input(\"Accept suggested (Y/n/q/(s)kip)?\")\n if accept.lower() == 'n':\n suggestedband = input(\"Enter new band: %s \" % suggestedband) or suggestedband\n suggestedalbum = input(\"Enter new album: %s \" % suggestedalbum) or suggestedalbum\n suggestedartist = input(\n \"Enter new artist (or keep to keep) %s\" % suggestedartist) or suggestedartist\n print(\n \"New values: Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n if accept.lower() == 'q':\n exit(2)\n if accept.lower() != 's':\n d = dict()\n d[\"band\"] = suggestedband\n d[\"album\"] = suggestedalbum\n d[\"artist\"] = suggestedartist\n update_mp3info(song_list, d)\n else:\n print (\"Skipping this directory\")\n return 1\n else:\n print(\"Album is consistent\")\n\n\n return 0\n\n\ndef walkdir(dir):\n \"\"\"\n function:\twalk\n input:\t root folder name\n output:\t none\n operation:\tgenerates list of directories\n processes each unprocessed directory\n logs processed directories\n \"\"\"\n\n # List all directories\n directories = glob.glob(PATH + '/**/*/', recursive=True)\n\n #Debug if all directories are listed\n #i = 1\n #for p in directories:\n # print(\"{} {}\".format(i,p))\n # i=i+1\n #exit(1)\n\n # Add current directory\n directories.append(dir)\n\n number_of_directories_found = len(directories)\n print(\"Found {} directories to scan\".format(number_of_directories_found))\n\n # We will skip processed directories\n # Therefore we try to load the list of processed directories\n try:\n with open(PROCESSED_DIR_FILE) as f:\n processed_dirs = f.read().splitlines()\n except IOError:\n print(\"Processed directories log file: {} cannot be opened.\".format(PROCESSED_DIR_FILE))\n processed_dirs = []\n # print(processed_dirs)\n # Logfile of processed directories are now loaded\n\n current_directory = ''\n first_file = True\n first_file_in_dir = True\n new = {}\n for current_directory in directories:\n # We will check in this list if our current directory was alread processed:\n if current_directory not in processed_dirs:\n # If our current directory was not already processed we will process it.\n # We will collect and update mp3 info in to following call:\n #print(\"Processing dir: {}\".format(current_directory))\n retval = process_dir(current_directory)\n\n # Process_dir will return different error codes for different problems\n # Let's check them 1 by 1\n if retval == 0:\n # If we managed to refresh this directory,\n # we log it as updated\n processed_dirs.append(current_directory)\n writelogfile(current_directory + '\\n')\n elif retval == 1:\n print(\"Directory was skipped / not processed\")\n #We are adding some easy to grep error in the log\n #This will also invalidate the directory when we will load the processed.log file\n #next time we run this tool (there is likely no Skip: driectory when listing the contents)\n writelogfile(\"Skip:\" + current_directory + '\\n')\n else:\n # We are adding some easy to grep error in the log\n # This will also invalidate the directory when we will load the processed.log file\n # next time we run this tool (there is likely no Skip: driectory when listing the contents)\n print(\"Directory had V1 only tags\")\n writelogfile(\"ERR V1:\" + current_directory + '\\n')\n else:\n print(\"Directory: {} was already processed.\".format(current_directory))\n number_of_directories_found = number_of_directories_found - 1;\n print(\"Number of directories to go {}\".format(number_of_directories_found))\n print(\"Walk complete. Remeber to check logfile for errors, like folders with v1 tags only.\")\n\n#TODO: if no arguments, then use current folder as path\ndef main(argv):\n global PATH\n global PROCESSED_DIR_FILE\n global LOGFILE_NAME\n\n try:\n PATH\n except NameError:\n print(\"PATH is not defined, we will use current directory\")\n PATH = os.getcwd()\n else:\n print(\"PATH is defined in the script body\")\n\n\n try:\n opts, args = getopt.getopt(argv, \"hp:l:\",[\"path=\",\"log=\"])\n except getopt.GetoptError:\n print('mp3tagger.py -p -l ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('mp3tagger.py -p -l ')\n sys.exit()\n elif opt in (\"-p\", \"--path\"):\n PATH = arg\n elif opt in (\"-l\", \"--logdir\"):\n LOGFILE_NAME = arg\n\n PROCESSED_DIR_FILE = PATH + \"/\" + LOGFILE_NAME\n\n print(\"Path {} Logdir {} Concat {}\".format(PATH,LOGFILE_NAME,PROCESSED_DIR_FILE))\n walkdir(PATH)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n exit(0)\n #songlist = collect_mp3info(\"Z:\\\\mp3\\\\_Magyar\\\\István a király\\\\Cd1\")\n songlist = collect_mp3info(\"D:\\\\temp\\\\mp3\\\\Valami Amerika\")\n suggestedband, suggestedalbum, suggestedartist = suggest_mostfrequent_mp3info(songlist)\n print(\"Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n requiredtag = dict()\n requiredtag[\"artist\"]=\"ARTISTA\"\n requiredtag[\"album\"]=\"ALBUMM\"\n requiredtag[\"band\"]=\"Banda\"\n update_mp3info(songlist, requiredtag)\n rewrite_songs_with_bad_chars(songlist)\n","sub_path":"mp3tagger.py","file_name":"mp3tagger.py","file_ext":"py","file_size_in_byte":28076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"521553225","text":"# -*- coding: utf-8 -*-\n\nfrom server.api.v1.urls import router as v1_router\nfrom django.conf.urls import url, include\nfrom rest_framework_jwt.views import (\n obtain_jwt_token,\n refresh_jwt_token,\n verify_jwt_token,\n)\n\nurlpatterns = [\n url('auth/login/', obtain_jwt_token),\n url('auth/token_verify/', verify_jwt_token),\n url('auth/token_refresh/', refresh_jwt_token),\n url('v1/', include(v1_router.urls)),\n]\n","sub_path":"{{cookiecutter.project_name}}/server/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"9594598","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport re,json,collections\r\nimport csv\r\nimport os\r\nfrom threading import Thread\r\n\r\nsymList = []\r\nans = collections.defaultdict(list)\r\n\r\ndef crawl(sym):\r\n url = \"https://finance.yahoo.com/quote/{}/key-statistics?p={}\".format(sym, sym)\r\n header = {\r\n 'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\r\n soup = BeautifulSoup(urllib.request.urlopen(url), 'html.parser',from_encoding=\"iso-8859-1\")\r\n result = re.search('root.App.main = (.*)\\;', soup.text)\r\n result = json.loads(result.group(1))\r\n dic = result\r\n keys = ['priceToBook','trailingPE']\r\n res = collections.defaultdict()\r\n queue = []\r\n for key, val in dic.items():\r\n if key in keys:\r\n res[key] = val\r\n if type(val) == list or type(val) == dict:\r\n queue.append(val)\r\n while queue:\r\n q2 = []\r\n for item in queue:\r\n if type(item) == list:\r\n for jtem in item:\r\n if type(jtem) == list or type(jtem) == dict:\r\n q2.append(jtem)\r\n elif type(item) == dict:\r\n for key, val in item.items():\r\n if key in keys:\r\n res[key] = val\r\n if type(val) == list or type(val) == dict:\r\n q2.append(val)\r\n queue = q2\r\n nums = ['','','']\r\n if 'trailingPE' in res and 'raw' in res['trailingPE']:\r\n pe = res['trailingPE']['raw']\r\n nums[0] = pe\r\n if 'priceToBook' in res and 'raw' in res['priceToBook']:\r\n pb = res['priceToBook']['raw']\r\n nums[1] = pb\r\n if nums[0] != '' and nums[1] != '':\r\n exb = str(round(nums[0]*nums[1],2))\r\n nums[2] = exb\r\n ans[sym] = nums\r\n print (sym,nums)\r\n\r\n\r\ndef toSyms(path):\r\n reader = csv.reader(open(path, \"r\"))\r\n for row in reader:\r\n symList.append(row[0].replace(' ',''))\r\n return symList\r\n\r\n\r\n\r\npath = 'st.csv'\r\ntoSyms(path)\r\n\r\nthreads = []\r\nfor key in symList[0:200]:\r\n t = Thread(target=crawl, args=(key,))\r\n t.start()\r\n threads.append(t)\r\nfor b in threads:\r\n b.join()\r\nprint (len(ans))\r\nprint (ans)\r\n\r\n\r\n","sub_path":"mulThread.py","file_name":"mulThread.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"315844876","text":"from odoo import models, fields, api\nfrom datetime import datetime, timedelta\n\nclass crm_lead(models.Model):\n _inherit = 'crm.lead'\n\n @api.depends('annual_revenue','planned_revenue')\n @api.onchange('planned_revenue')\n def _annual_revenue_calculate(self):\n if self.planned_revenue:\n self.annual_revenue = self.planned_revenue * 12\n if self.planned_revenue ==0:\n self.annual_revenue = 0\n return\n\n @api.depends('annual_revenue','planned_revenue')\n @api.onchange('annual_revenue')\n def _mothy_revenue_calculate(self):\n if self.annual_revenue:\n self.planned_revenue = 1.000*self.annual_revenue/12\n if self.annual_revenue ==0:\n self.planned_revenue = 0\n return\n\n @api.one\n @api.depends('x_subscription_period', 'x_month_number')\n def compute_end_date(self):\n for record in self:\n if not record.x_subscription_period:\n return\n month = record.x_month_number or 0\n start = datetime.strptime(record.x_subscription_period, '%Y-%m-%d')\n add_month = month % 12\n add_years = int(month / 12) + (int(start.month) + add_month) / 12\n end_month = (int(start.month) + add_month) % 12 if (int(start.month) + add_month) / 12 > 0 else int(\n start.month) + add_month\n end_year = start.year + add_years\n end_day = start.day -1\n if end_day == 0:\n end_month = end_month -1\n end_day = 31\n if end_month == 0:\n end_month=12\n end_year = end_year - 1\n\n while True:\n end_format = '%s-%s-%s' % (end_year, end_month, end_day)\n try:\n record.x_end_date = datetime.strptime(end_format, '%Y-%m-%d')\n break\n except:\n end_day -= 1\n\n x_subscription_period = fields.Date(string=\"Subscription Period\", default=fields.Datetime.now)\n one_time_revenue = fields.Integer('One Time Revenue')\n annual_revenue = fields.Float('Annual Revenue')\n x_month_number = fields.Integer('Number of Month', default=0)\n x_end_date = fields.Date(\"End Date\", compute=compute_end_date)\n invoice_type = fields.Selection([('fiancial', 'Financial Terminal'),('trading_gts', 'Trading (GTS)'), ('trading_dzh', 'Trading (DZHI)'), ('event','Conference & Event'),('digital','Digital')])\n\n\n\n\n","sub_path":"beta-dev1/dzh_modifier_fields_1707/models/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"469179396","text":"\"\"\"\nFunctions\n---------\n\nFunctions with a common interface for parameters\n\"\"\"\nimport numpy as np\n\ndef sigmoid (x, parameters):\n \"\"\"Sigmoid function\n POI = A2 + (A1 - A2) / (1. + exp((x - x0) / dx))\n \n Parameters\n ----------\n x: float or array of floats\n variable\n parameters: dict\n dictionary containing 'sigmoid_A1','sigmoid_A2','sigmoid_x0',\n and 'sigmoid_dx'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n A1 = parameters['sigmoid_A1']\n A2 = parameters['sigmoid_A2']\n x0 = parameters['sigmoid_x0']\n dx = parameters['sigmoid_dx']\n \n return A2 + (A1 - A2)/(1.+ np.exp((x - x0)/dx))\n \ndef sigmoid2 (x, parameters):\n \"\"\"Sigmoid 2 function\n POI = K / (C + (A*x**B))\n \n Parameters\n ----------\n x: float or array of floats or array of floats\n variable\n parameters: dict\n dictionary containing 'sigmoid2_K','sigmoid2_C','sigmoid2_A',\n and 'sigmoid2_B'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n K = parameters['sigmoid2_K']\n C = parameters['sigmoid2_C']\n A = parameters['sigmoid2_A']\n B = parameters['sigmoid2_B']\n return K / (C + (A * x**B)) \n\n\ndef linear (x, parameters):\n \"\"\"Sigmoid function\n POI = a + (b * x )\n \n Parameters\n ----------\n x: float or array of floats\n variable\n parameters: dict\n dictionary containing 'linear_a', and 'linear_b'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n a = parameters['linear_a']\n b = parameters['linear_b']\n return a + (b * x)\n \ndef hill (x, parameters):\n \"\"\"Hill function\n POI = (B*(x^n))/(1+(x^n))\n \n Parameters\n ----------\n x: float or array of floats\n variable\n parameters: dict\n dictionary containing 'hill_B' and 'hill_N'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n B = parameters['hill_B']\n N = parameters['hill_N']\n return (B * (x**N))/(1. + (x**N))\n\n# table of functions\ntable = {\n 'sigmoid': sigmoid,\n 'linear': linear,\n 'sigmoid2': sigmoid2,\n 'hill': hill,\n}\n","sub_path":"atm/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"543860084","text":"# -*- coding: utf-8 -*-\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom qq.items import QqItem\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy import log\n\n\nclass NewsSpider(CrawlSpider):\n name = \"news\"\n allowed_domains = [\"news.qq.com\"]\n start_urls = ['http://news.qq.com/']\n rules = (\n Rule(LinkExtractor(allow=(\".*.qq.com/a/\\d*/\\d*.htm\",), allow_domains=(\"qq.com\",)), callback=\"parse_item\",\n follow=True),\n Rule(LinkExtractor(allow=(\"news.qq.com/\\w*_index.shtml\",), allow_domains=(\"qq.com\",)), follow=True),\n Rule(LinkExtractor(allow=(\"(?y:\r\n print(\"La palabra %s tiene mas vocales.\" % (palabra1))\r\n elif xright):\n BL+=wf\n\n j+=1\n id+=1\nnp.savez(path+'PMT{}/BL'.format(pmt), BL=BL/j)\nplt.figure()\nplt.plot(BL/j, 'k.')\nplt.show()\n","sub_path":"calib/make_bl.py","file_name":"make_bl.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440326701","text":"# Copyright 2023 Google LLC. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom connector import channel\nfrom google3.cloud.graphite.mmv2.services.google.compute import (\n network_firewall_policy_rule_pb2,\n)\nfrom google3.cloud.graphite.mmv2.services.google.compute import (\n network_firewall_policy_rule_pb2_grpc,\n)\n\nfrom typing import List\n\n\nclass NetworkFirewallPolicyRule(object):\n def __init__(\n self,\n description: str = None,\n rule_name: str = None,\n priority: int = None,\n location: str = None,\n match: dict = None,\n action: str = None,\n direction: str = None,\n enable_logging: bool = None,\n rule_tuple_count: int = None,\n target_service_accounts: list = None,\n target_secure_tags: list = None,\n disabled: bool = None,\n kind: str = None,\n firewall_policy: str = None,\n project: str = None,\n service_account_file: str = \"\",\n ):\n channel.initialize()\n self.description = description\n self.rule_name = rule_name\n self.priority = priority\n self.location = location\n self.match = match\n self.action = action\n self.direction = direction\n self.enable_logging = enable_logging\n self.target_service_accounts = target_service_accounts\n self.target_secure_tags = target_secure_tags\n self.disabled = disabled\n self.firewall_policy = firewall_policy\n self.project = project\n self.service_account_file = service_account_file\n\n def apply(self):\n stub = network_firewall_policy_rule_pb2_grpc.ComputeBetaNetworkFirewallPolicyRuleServiceStub(\n channel.Channel()\n )\n request = (\n network_firewall_policy_rule_pb2.ApplyComputeBetaNetworkFirewallPolicyRuleRequest()\n )\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if Primitive.to_proto(self.rule_name):\n request.resource.rule_name = Primitive.to_proto(self.rule_name)\n\n if Primitive.to_proto(self.priority):\n request.resource.priority = Primitive.to_proto(self.priority)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n if NetworkFirewallPolicyRuleMatch.to_proto(self.match):\n request.resource.match.CopyFrom(\n NetworkFirewallPolicyRuleMatch.to_proto(self.match)\n )\n else:\n request.resource.ClearField(\"match\")\n if Primitive.to_proto(self.action):\n request.resource.action = Primitive.to_proto(self.action)\n\n if NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction):\n request.resource.direction = (\n NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction)\n )\n\n if Primitive.to_proto(self.enable_logging):\n request.resource.enable_logging = Primitive.to_proto(self.enable_logging)\n\n if Primitive.to_proto(self.target_service_accounts):\n request.resource.target_service_accounts.extend(\n Primitive.to_proto(self.target_service_accounts)\n )\n if NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n ):\n request.resource.target_secure_tags.extend(\n NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n )\n )\n if Primitive.to_proto(self.disabled):\n request.resource.disabled = Primitive.to_proto(self.disabled)\n\n if Primitive.to_proto(self.firewall_policy):\n request.resource.firewall_policy = Primitive.to_proto(self.firewall_policy)\n\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n request.service_account_file = self.service_account_file\n\n response = stub.ApplyComputeBetaNetworkFirewallPolicyRule(request)\n self.description = Primitive.from_proto(response.description)\n self.rule_name = Primitive.from_proto(response.rule_name)\n self.priority = Primitive.from_proto(response.priority)\n self.location = Primitive.from_proto(response.location)\n self.match = NetworkFirewallPolicyRuleMatch.from_proto(response.match)\n self.action = Primitive.from_proto(response.action)\n self.direction = NetworkFirewallPolicyRuleDirectionEnum.from_proto(\n response.direction\n )\n self.enable_logging = Primitive.from_proto(response.enable_logging)\n self.rule_tuple_count = Primitive.from_proto(response.rule_tuple_count)\n self.target_service_accounts = Primitive.from_proto(\n response.target_service_accounts\n )\n self.target_secure_tags = (\n NetworkFirewallPolicyRuleTargetSecureTagsArray.from_proto(\n response.target_secure_tags\n )\n )\n self.disabled = Primitive.from_proto(response.disabled)\n self.kind = Primitive.from_proto(response.kind)\n self.firewall_policy = Primitive.from_proto(response.firewall_policy)\n self.project = Primitive.from_proto(response.project)\n\n def delete(self):\n stub = network_firewall_policy_rule_pb2_grpc.ComputeBetaNetworkFirewallPolicyRuleServiceStub(\n channel.Channel()\n )\n request = (\n network_firewall_policy_rule_pb2.DeleteComputeBetaNetworkFirewallPolicyRuleRequest()\n )\n request.service_account_file = self.service_account_file\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if Primitive.to_proto(self.rule_name):\n request.resource.rule_name = Primitive.to_proto(self.rule_name)\n\n if Primitive.to_proto(self.priority):\n request.resource.priority = Primitive.to_proto(self.priority)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n if NetworkFirewallPolicyRuleMatch.to_proto(self.match):\n request.resource.match.CopyFrom(\n NetworkFirewallPolicyRuleMatch.to_proto(self.match)\n )\n else:\n request.resource.ClearField(\"match\")\n if Primitive.to_proto(self.action):\n request.resource.action = Primitive.to_proto(self.action)\n\n if NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction):\n request.resource.direction = (\n NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction)\n )\n\n if Primitive.to_proto(self.enable_logging):\n request.resource.enable_logging = Primitive.to_proto(self.enable_logging)\n\n if Primitive.to_proto(self.target_service_accounts):\n request.resource.target_service_accounts.extend(\n Primitive.to_proto(self.target_service_accounts)\n )\n if NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n ):\n request.resource.target_secure_tags.extend(\n NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n )\n )\n if Primitive.to_proto(self.disabled):\n request.resource.disabled = Primitive.to_proto(self.disabled)\n\n if Primitive.to_proto(self.firewall_policy):\n request.resource.firewall_policy = Primitive.to_proto(self.firewall_policy)\n\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n response = stub.DeleteComputeBetaNetworkFirewallPolicyRule(request)\n\n @classmethod\n def list(self, project, location, firewallPolicy, service_account_file=\"\"):\n stub = network_firewall_policy_rule_pb2_grpc.ComputeBetaNetworkFirewallPolicyRuleServiceStub(\n channel.Channel()\n )\n request = (\n network_firewall_policy_rule_pb2.ListComputeBetaNetworkFirewallPolicyRuleRequest()\n )\n request.service_account_file = service_account_file\n request.Project = project\n\n request.Location = location\n\n request.FirewallPolicy = firewallPolicy\n\n return stub.ListComputeBetaNetworkFirewallPolicyRule(request).items\n\n def to_proto(self):\n resource = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRule()\n )\n if Primitive.to_proto(self.description):\n resource.description = Primitive.to_proto(self.description)\n if Primitive.to_proto(self.rule_name):\n resource.rule_name = Primitive.to_proto(self.rule_name)\n if Primitive.to_proto(self.priority):\n resource.priority = Primitive.to_proto(self.priority)\n if Primitive.to_proto(self.location):\n resource.location = Primitive.to_proto(self.location)\n if NetworkFirewallPolicyRuleMatch.to_proto(self.match):\n resource.match.CopyFrom(NetworkFirewallPolicyRuleMatch.to_proto(self.match))\n else:\n resource.ClearField(\"match\")\n if Primitive.to_proto(self.action):\n resource.action = Primitive.to_proto(self.action)\n if NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction):\n resource.direction = NetworkFirewallPolicyRuleDirectionEnum.to_proto(\n self.direction\n )\n if Primitive.to_proto(self.enable_logging):\n resource.enable_logging = Primitive.to_proto(self.enable_logging)\n if Primitive.to_proto(self.target_service_accounts):\n resource.target_service_accounts.extend(\n Primitive.to_proto(self.target_service_accounts)\n )\n if NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n ):\n resource.target_secure_tags.extend(\n NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n )\n )\n if Primitive.to_proto(self.disabled):\n resource.disabled = Primitive.to_proto(self.disabled)\n if Primitive.to_proto(self.firewall_policy):\n resource.firewall_policy = Primitive.to_proto(self.firewall_policy)\n if Primitive.to_proto(self.project):\n resource.project = Primitive.to_proto(self.project)\n return resource\n\n\nclass NetworkFirewallPolicyRuleMatch(object):\n def __init__(\n self,\n src_ip_ranges: list = None,\n dest_ip_ranges: list = None,\n layer4_configs: list = None,\n src_secure_tags: list = None,\n src_region_codes: list = None,\n dest_region_codes: list = None,\n src_threat_intelligences: list = None,\n dest_threat_intelligences: list = None,\n src_fqdns: list = None,\n dest_fqdns: list = None,\n src_address_groups: list = None,\n dest_address_groups: list = None,\n ):\n self.src_ip_ranges = src_ip_ranges\n self.dest_ip_ranges = dest_ip_ranges\n self.layer4_configs = layer4_configs\n self.src_secure_tags = src_secure_tags\n self.src_region_codes = src_region_codes\n self.dest_region_codes = dest_region_codes\n self.src_threat_intelligences = src_threat_intelligences\n self.dest_threat_intelligences = dest_threat_intelligences\n self.src_fqdns = src_fqdns\n self.dest_fqdns = dest_fqdns\n self.src_address_groups = src_address_groups\n self.dest_address_groups = dest_address_groups\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatch()\n )\n if Primitive.to_proto(resource.src_ip_ranges):\n res.src_ip_ranges.extend(Primitive.to_proto(resource.src_ip_ranges))\n if Primitive.to_proto(resource.dest_ip_ranges):\n res.dest_ip_ranges.extend(Primitive.to_proto(resource.dest_ip_ranges))\n if NetworkFirewallPolicyRuleMatchLayer4ConfigsArray.to_proto(\n resource.layer4_configs\n ):\n res.layer4_configs.extend(\n NetworkFirewallPolicyRuleMatchLayer4ConfigsArray.to_proto(\n resource.layer4_configs\n )\n )\n if NetworkFirewallPolicyRuleMatchSrcSecureTagsArray.to_proto(\n resource.src_secure_tags\n ):\n res.src_secure_tags.extend(\n NetworkFirewallPolicyRuleMatchSrcSecureTagsArray.to_proto(\n resource.src_secure_tags\n )\n )\n if Primitive.to_proto(resource.src_region_codes):\n res.src_region_codes.extend(Primitive.to_proto(resource.src_region_codes))\n if Primitive.to_proto(resource.dest_region_codes):\n res.dest_region_codes.extend(Primitive.to_proto(resource.dest_region_codes))\n if Primitive.to_proto(resource.src_threat_intelligences):\n res.src_threat_intelligences.extend(\n Primitive.to_proto(resource.src_threat_intelligences)\n )\n if Primitive.to_proto(resource.dest_threat_intelligences):\n res.dest_threat_intelligences.extend(\n Primitive.to_proto(resource.dest_threat_intelligences)\n )\n if Primitive.to_proto(resource.src_fqdns):\n res.src_fqdns.extend(Primitive.to_proto(resource.src_fqdns))\n if Primitive.to_proto(resource.dest_fqdns):\n res.dest_fqdns.extend(Primitive.to_proto(resource.dest_fqdns))\n if Primitive.to_proto(resource.src_address_groups):\n res.src_address_groups.extend(\n Primitive.to_proto(resource.src_address_groups)\n )\n if Primitive.to_proto(resource.dest_address_groups):\n res.dest_address_groups.extend(\n Primitive.to_proto(resource.dest_address_groups)\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleMatch(\n src_ip_ranges=Primitive.from_proto(resource.src_ip_ranges),\n dest_ip_ranges=Primitive.from_proto(resource.dest_ip_ranges),\n layer4_configs=NetworkFirewallPolicyRuleMatchLayer4ConfigsArray.from_proto(\n resource.layer4_configs\n ),\n src_secure_tags=NetworkFirewallPolicyRuleMatchSrcSecureTagsArray.from_proto(\n resource.src_secure_tags\n ),\n src_region_codes=Primitive.from_proto(resource.src_region_codes),\n dest_region_codes=Primitive.from_proto(resource.dest_region_codes),\n src_threat_intelligences=Primitive.from_proto(\n resource.src_threat_intelligences\n ),\n dest_threat_intelligences=Primitive.from_proto(\n resource.dest_threat_intelligences\n ),\n src_fqdns=Primitive.from_proto(resource.src_fqdns),\n dest_fqdns=Primitive.from_proto(resource.dest_fqdns),\n src_address_groups=Primitive.from_proto(resource.src_address_groups),\n dest_address_groups=Primitive.from_proto(resource.dest_address_groups),\n )\n\n\nclass NetworkFirewallPolicyRuleMatchArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [NetworkFirewallPolicyRuleMatch.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [NetworkFirewallPolicyRuleMatch.from_proto(i) for i in resources]\n\n\nclass NetworkFirewallPolicyRuleMatchLayer4Configs(object):\n def __init__(self, ip_protocol: str = None, ports: list = None):\n self.ip_protocol = ip_protocol\n self.ports = ports\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchLayer4Configs()\n )\n if Primitive.to_proto(resource.ip_protocol):\n res.ip_protocol = Primitive.to_proto(resource.ip_protocol)\n if Primitive.to_proto(resource.ports):\n res.ports.extend(Primitive.to_proto(resource.ports))\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleMatchLayer4Configs(\n ip_protocol=Primitive.from_proto(resource.ip_protocol),\n ports=Primitive.from_proto(resource.ports),\n )\n\n\nclass NetworkFirewallPolicyRuleMatchLayer4ConfigsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n NetworkFirewallPolicyRuleMatchLayer4Configs.to_proto(i) for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n NetworkFirewallPolicyRuleMatchLayer4Configs.from_proto(i) for i in resources\n ]\n\n\nclass NetworkFirewallPolicyRuleMatchSrcSecureTags(object):\n def __init__(self, name: str = None, state: str = None):\n self.name = name\n self.state = state\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTags()\n )\n if Primitive.to_proto(resource.name):\n res.name = Primitive.to_proto(resource.name)\n if NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.to_proto(\n resource.state\n ):\n res.state = NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.to_proto(\n resource.state\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleMatchSrcSecureTags(\n name=Primitive.from_proto(resource.name),\n state=NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.from_proto(\n resource.state\n ),\n )\n\n\nclass NetworkFirewallPolicyRuleMatchSrcSecureTagsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n NetworkFirewallPolicyRuleMatchSrcSecureTags.to_proto(i) for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n NetworkFirewallPolicyRuleMatchSrcSecureTags.from_proto(i) for i in resources\n ]\n\n\nclass NetworkFirewallPolicyRuleTargetSecureTags(object):\n def __init__(self, name: str = None, state: str = None):\n self.name = name\n self.state = state\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleTargetSecureTags()\n )\n if Primitive.to_proto(resource.name):\n res.name = Primitive.to_proto(resource.name)\n if NetworkFirewallPolicyRuleTargetSecureTagsStateEnum.to_proto(resource.state):\n res.state = NetworkFirewallPolicyRuleTargetSecureTagsStateEnum.to_proto(\n resource.state\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleTargetSecureTags(\n name=Primitive.from_proto(resource.name),\n state=NetworkFirewallPolicyRuleTargetSecureTagsStateEnum.from_proto(\n resource.state\n ),\n )\n\n\nclass NetworkFirewallPolicyRuleTargetSecureTagsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n NetworkFirewallPolicyRuleTargetSecureTags.to_proto(i) for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n NetworkFirewallPolicyRuleTargetSecureTags.from_proto(i) for i in resources\n ]\n\n\nclass NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.Value(\n \"ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.Name(\n resource\n )[\n len(\"ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum\") :\n ]\n\n\nclass NetworkFirewallPolicyRuleDirectionEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleDirectionEnum.Value(\n \"ComputeBetaNetworkFirewallPolicyRuleDirectionEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleDirectionEnum.Name(\n resource\n )[\n len(\"ComputeBetaNetworkFirewallPolicyRuleDirectionEnum\") :\n ]\n\n\nclass NetworkFirewallPolicyRuleTargetSecureTagsStateEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum.Value(\n \"ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum.Name(\n resource\n )[\n len(\"ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum\") :\n ]\n\n\nclass Primitive(object):\n @classmethod\n def to_proto(self, s):\n if not s:\n return \"\"\n return s\n\n @classmethod\n def from_proto(self, s):\n return s\n","sub_path":"python/services/compute/beta/network_firewall_policy_rule.py","file_name":"network_firewall_policy_rule.py","file_ext":"py","file_size_in_byte":23351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"524035563","text":"import os\nimport re\nimport requests\nimport time\n\n# 下载路径,默认为该文件所在路径\ndownloadPath = os.path.abspath(os.path.dirname(__file__))\ndownloadPath += os.path.sep + time.strftime(\"%Y%m%d\", time.localtime())\n\ndef downloadPic(url, fileName):\n # 保存图片\n print(\"正在下载 \" + fileName)\n # fileName = downloadCount + 1\n with open(downloadPath+os.path.sep+fileName, 'wb') as file:\n file.write(requests.get(url).content)\n\n\ndef getPicUrl():\n print(\"正在获取 ...\")\n response = requests.get(\"http://yunjie.f06.87yun.club/st/r/\")\n pattern = re.compile(r'=1):\n # for n in empval[0]:\n # self.df.iloc[ n,cn ] = replacer\n #success\n return True\n\n def apply_mapper(self, cols ,mets , arglist ):\n '''apply mapper to the columns\n example :\n apply_mapper( ['Age Group','Ethnicity'],'labelencoder' )\n applies LabelEncoder to AgeGroup and Ethnicity.\n mets can be just 'LabelEncoder' and it will be assigned to all the columns listed \n by cols. cols can be none and it will apply to all the column\n @cols - specify which column to apply the mapper\n @met - which method (labelencoder, onehotencoder, standardscaler, ordinalencoder)\n '''\n # mets is singular, apply it to all\n if( cols is not None):\n #apply to specific\n if( not isinstance( cols,list )):\n cols = [cols] #convert to list\n return self.lkup[mets](self,cols,arglist)\n else:\n #apply to all\n return self.lkup[mets](self,self.hlist,arglist)\n\n def _apply_labelencoder(self, cols, arglist):\n '''applies the labelencoder over the columns.'''\n ec = LabelEncoder()\n for c in cols:\n ec.fit( self.df[c] )\n self.df[c] = lec.transform( self.df[c] )\n return True\n\n def _apply_standardscaler(self,cols, arglist):\n '''applies the standardscaler such that the transform will have a\n std. dev. of 1 and a mean of 0. cols should ideally be a list'''\n sc = StandardScaler()\n self.df[cols] = sc.fit_transform( self.df[cols] )\n return True\n\n def _apply_minmaxscaler(self,cols, arglist):\n '''applies the minmaxscaler such that the feature is scaled\n to within range specified in args. see sklearn.preprocessing.MinMaxScaler'''\n sc = MinMaxScaler((args[0],args[1])) #args0 - min, args1 - max\n # by default minmax scaler uses 0 as min and 1 as max\n self.df[cols] = sc.fit_transform( self.df[cols] )\n return True\n\n def _apply_onehotencoder(self,cols, arglist):\n '''cols should be a list !, applies one hot encoder to the columns.\n update1: now cols can be singular, the function auto converts it to a list'''\n dummies = pandas.get_dummies( self.df.filter( cols, axis= self._constant_axis_COLS))\n self.df.drop( cols, axis= self._constant_axis_COLS, inplace=True)\n self.df = self.df.join( dummies )\n return True\n\n def rearrange_cols(self, target, mets, arglist ):\n '''rearrange the columns. move target end of the dataframe\n the last column -- useful to moving the target to the end for training'''\n # TODO: allow target to be moved to specified index\n excludelist = self.df.columns.values.tolist()\n if( target in excludelist ):\n if( isinstance(target,list) ):\n self.error(target,\"cannot be a list in rearrange operation\")\n return False\n ind_tar = excludelist.index(target)\n del excludelist[ind_tar]\n self.df = self.df[ excludelist + [target] ]\n return True\n else:\n self.error(target,\"not a column in the dataframe!\")\n return False\n\n def rewrite( self, filename = fname ,aindex=False):\n '''rewrites the dataframe to a csv file'''\n self.df.to_csv( filename , index=aindex)\n\n def preproc(self, procedure, col, mets, arglist):\n '''easy caller. use this to call the preprocessing methods'''\n '''use None as placeholder for mets if not applicable'''\n res = self.proc[procedure](self,col,mets,arglist)\n self.hlist = self.df.columns.values.tolist()\n return res\n\n proc = {\n \"preproc\": apply_mapper,\n \"ignore\": ignorecolumn,\n \"fill\": fill_empty,\n \"select\": selectcolumn,\n \"moveback\": rearrange_cols\n }\n\n lkup = {\n \"labelencoder\": _apply_labelencoder,\n \"onehotencoder\": _apply_onehotencoder,\n \"standardscaler\":_apply_standardscaler,\n \"minmaxscaler\":_apply_minmaxscaler\n }\n\n","sub_path":"preproc/neoctl.py","file_name":"neoctl.py","file_ext":"py","file_size_in_byte":11240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"406780160","text":"import json\nimport os\nimport pprint\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom mainapp.management.commands.load_edo_centers_accred import get_edo_auth\nfrom reestr.models import (GTU, SO, AccreditedCenter,\n AccreditedCertificationPoint, City, GroupSM, Level,\n SROMember, WeldType)\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nfrom requests.packages.urllib3.util.retry import Retry\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n\n# __import__('ipdb').set_trace()\n\n\nhead = {\n \"Host\": \"ac.naks.ru\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n}\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n help=\"clean all sro members\",\n )\n parser.add_argument(\n \"--wipe\",\n action=\"store_true\",\n help=\"wipe all sro members before\",\n )\n parser.add_argument(\n \"--deploy\",\n action=\"store_true\",\n help=\"use on remote server\",\n ),\n\n def handle(self, *args, **options):\n ##################################\n ##### make initiate_centers first!\n ##################################\n\n payload = get_edo_auth()\n\n # requests.adapters.DEFLAULT_RETRIES = 5\n if options[\"deploy\"]:\n certifi_cert = (\n \"/home/popov/django3/lib/python3.10/site-packages/certifi/cacert.pem\"\n )\n else:\n certifi_cert = (\n \"/home/popov/django3/lib/python3.8/site-packages/certifi/cacert.pem\"\n )\n del os.environ[\"CA_BUNDLE\"]\n del os.environ[\"REQUESTS_CA_BUNDLE\"]\n del os.environ[\"SSL_CERT_FILE\"]\n\n os.environ[\"CA_BUNDLE\"] = certifi_cert\n os.environ[\"REQUESTS_CA_BUNDLE\"] = certifi_cert\n os.environ[\"SSL_CERT_FILE\"] = certifi_cert\n\n with requests.Session() as sess:\n # sess.keep_alive = False\n # retry = Retry(connect=3, backoff_factor=1)\n # adapter = HTTPAdapter(max_retries=retry)\n # sess.mount('http://', adapter)\n # sess.mount('https://', adapter)\n # login_url = \"https://ac.naks.ru/\"\n # path_to_fullchain_pem = '/etc/letsencrypt/live/jango.naks.ru/fullchain.pem'\n\n path_to_fullchain_pem = \"/home/popov/naks-new/jango-naks-ru-chain.pem\"\n path_to_cert_folder = \"/etc/letsencrypt/live/jango.naks.ru/\"\n\n cert_tuple = (\n \"/etc/letsencrypt/live/jango.naks.ru/private.key\",\n \"/etc/letsencrypt/live/jango.naks.ru/servert.cert\",\n )\n\n login_url = \"https://ac.naks.ru/\"\n\n log_me_in = sess.post(login_url, data=payload, headers=head)\n all_orgs_url = \"https://ac.naks.ru/org/index.php?SHOWALL_1=1#nav_start\"\n\n all_orgs_page = sess.get(all_orgs_url, headers=head)\n org_soup = BeautifulSoup(all_orgs_page.text, \"html.parser\")\n all_orgs_hrefs = org_soup.find_all(\"a\", attrs={\"class\": \"ahidden\"})\n counter = 0\n # __import__('ipdb').set_trace()\n for href in all_orgs_hrefs:\n edo_id = re.findall(r\"\\d+\", href.attrs[\"href\"])\n sro_id_centers = AccreditedCenter.objects.filter(\n json_data__org_external_id=edo_id[0]\n )\n edit_org_url = (\n f\"https://ac.naks.ru/org/detail.php?ID={edo_id[0]}&action=edit\"\n )\n\n edit_page = sess.get(edit_org_url)\n edit_page_soup = BeautifulSoup(edit_page.text, \"html.parser\")\n org_name = edit_page_soup.find(\"h3\").get_text()\n\n sm, new_sro_member_created = SROMember.objects.get_or_create(\n short_name=org_name\n )\n if sro_id_centers.exists():\n for center in sro_id_centers:\n center.sro_member = sm\n center.save()\n\n np_select = edit_page_soup.find(\"select\", attrs={\"name\": \"PROP[np]\"})\n sro_option = np_select.find(\"option\", attrs={\"selected\": True})\n print(\"-->\", counter, org_name)\n # print(\"sro option\", sro_option.get_text())\n # sro_members_bd = SROMember.objects.all()\n\n if sro_option:\n if sro_option.get(\"value\") == \"1\":\n sm.status = \"a\"\n else:\n sm.status = \"na\"\n\n sro_actual_addr_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"PROP[address]\"}\n )\n sro_ur_addr_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"PROP[contact]\"}\n )\n sro_post_addr_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"PROP[mail_address]\"}\n )\n sm_short_name_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"NAME\"}\n )\n sm_full_name_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"LONGNAME\"}\n )\n city_input = edit_page_soup.find(\n \"input\",\n attrs={\"name\": \"PROP[city]\"},\n )\n sm.short_name = sm_short_name_input.get(\"value\")\n\n if city_input.get(\"value\"):\n city, city_created = City.objects.get_or_create(\n title=city_input.get(\"value\")\n )\n sm.city = city\n\n sm.full_name = sm_full_name_input.get(\"value\")\n sm.actual_address = sro_actual_addr_input.get(\"value\")\n sm.ur_address = sro_ur_addr_input.get(\"value\")\n if sro_post_addr_input:\n sm.post_address = sro_post_addr_input.get(\"value\")\n sm.save()\n if sm.status == \"a\":\n sm.load_point_coordinates()\n print(\n \"sro member updated\",\n sm.short_name,\n sm.full_name,\n sm.coordinates,\n sm.status,\n )\n counter += 1\n\n if options.get(\"clean\"):\n sro_members_all = SROMember.objects.all()\n for sm in sro_members_all:\n if not sm.centers.select_related().count():\n sm.delete()\n","sub_path":"mainapp/management/commands/parse_sro.py","file_name":"parse_sro.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"333437503","text":"import os\nimport sys\nimport subprocess\nimport setuptools\nimport shutil\nimport tempfile\nimport logging\nfrom zc.buildout.download import Download\nimport zc\nimport zc.recipe.egg\n\nDOWNLOAD_URL = \"https://projects.unbit.it/downloads/uwsgi-{0}.tar.gz\"\nMARKER = object()\n\n\ndef str_to_bool(s):\n \"\"\"\n Converts a string to a bool value; looks at the first character,\n if it's y(es), t(rue) or 1 returns True, otherwise, False.\n \"\"\"\n if len(s) > 0 and s[0] in \"yYtT1\":\n return True\n return False\n\n\nclass UWSGI:\n \"\"\"\n Buildout recipe downloading, compiling and configuring python paths for uWSGI.\n \"\"\"\n\n def __init__(self, buildout, name, options):\n self.egg = zc.recipe.egg.Egg(buildout, options[\"recipe\"], options)\n self.name = name\n self.buildout = buildout\n self.log = logging.getLogger(self.name)\n\n global_options = buildout[\"buildout\"]\n # Use the \"download-cache\" directory as cache, if present\n self.cache_dir = global_options.get(\"download-cache\")\n\n if self.cache_dir is not None:\n # If cache_dir isn't an absolute path, make it relative to\n # buildout's directory\n if not os.path.isabs(self.cache_dir):\n self.cache_dir = os.path.join(global_options[\"directory\"], self.cache_dir)\n\n self.use_system_binary = str_to_bool(options.get(\"use-system-binary\", \"false\"))\n self.uwsgi_version = options.get(\"version\", \"latest\")\n self.md5sum = options.get('md5sum') or None # empty string => None\n self.uwsgi_binary_path = os.path.join(global_options[\"bin-directory\"], \"uwsgi\")\n\n # xml, ini\n self.config_file_format = options.get(\"output-format\", \"xml\").lower()\n if self.config_file_format not in [\"xml\", \"ini\"]:\n self.log.warning(\"unknown output configuration format, defaulting to xml\")\n self.config_file_format = \"xml\"\n\n if \"extra-paths\" in options:\n options[\"pythonpath\"] = options[\"extra-paths\"]\n else:\n options.setdefault(\"extra-paths\", options.get(\"pythonpath\", \"\"))\n\n self.output = options.setdefault(\"output\",\n os.path.join(global_options[\"parts-directory\"],\n self.name,\n \"uwsgi.{0}\".format(self.config_file_format)))\n self.options = options\n\n def download_release(self):\n \"\"\"\n Download uWSGI release based on \"version\" option and return path to downloaded file.\n \"\"\"\n if self.cache_dir is not None:\n download = Download(cache=self.cache_dir)\n else:\n self.log.warning(\"not using a download cache for uwsgi\")\n download = Download()\n\n download_url = self.options.get(\"download-url\", DOWNLOAD_URL)\n download_path, is_temp = download(\n download_url.format(self.uwsgi_version), md5sum=self.md5sum)\n return download_path\n\n def extract_release(self, download_path):\n \"\"\"\n Extracts uWSGI package and returns path containing uwsgiconfig.py along with path to extraction root.\n \"\"\"\n uwsgi_path = None\n extract_path = tempfile.mkdtemp(\"-uwsgi\")\n setuptools.archive_util.unpack_archive(download_path, extract_path)\n for root, dirs, files in os.walk(extract_path):\n if \"uwsgiconfig.py\" in files:\n uwsgi_path = root\n return uwsgi_path, extract_path\n\n def build_uwsgi(self, uwsgi_path):\n \"\"\"\n Build uWSGI and returns path to executable.\n \"\"\"\n current_path = os.getcwd()\n profile = self.options.get(\"profile\", MARKER)\n\n if profile is MARKER:\n profile = '%s/buildconf/default.ini' % uwsgi_path\n elif not os.path.isabs(profile):\n # if the specified profile is not an absolute path, try\n # looking for it in the buildout folder first; otherwise,\n # look for it in the current directory\n buildout_dir_profile = '%s/buildconf/%s' % (uwsgi_path, profile)\n if os.path.isfile(buildout_dir_profile):\n profile = buildout_dir_profile\n else:\n profile = os.path.abspath(profile)\n\n # Change dir to uwsgi_path for compile.\n os.chdir(uwsgi_path)\n build_stdout = tempfile.TemporaryFile()\n try:\n # Build uWSGI. We don't use the Makefile, since it uses an\n # override variable (with :=) we cannot specify the\n # Python interpreter we want to use.\n subprocess.check_call([self.options.get('executable', sys.executable),\n os.path.join(uwsgi_path, 'uwsgiconfig.py'),\n '--build',\n profile],\n stdout=build_stdout)\n finally:\n # Change back to original path.\n os.chdir(current_path)\n\n if os.path.isfile(self.uwsgi_binary_path):\n os.unlink(self.uwsgi_binary_path)\n\n shutil.copy(os.path.join(uwsgi_path, \"uwsgi\"), self.uwsgi_binary_path)\n\n def get_extra_paths(self):\n # Add libraries found by a site .pth files to our extra-paths.\n if 'pth-files' in self.options:\n import site\n for pth_file in self.options['pth-files'].splitlines():\n pth_libs = site.addsitedir(pth_file, set())\n if not pth_libs:\n self.log.warning('No site *.pth libraries found for pth_file=%s' % pth_file)\n else:\n self.log.info('Adding *.pth libraries=%s' % pth_libs)\n self.options['extra-paths'] += '\\n' + '\\n'.join(pth_libs)\n\n # Add local extra-paths.\n return [p.replace('/', os.path.sep) for p in\n self.options['extra-paths'].splitlines() if p.strip()]\n\n def create_configuration_file(self):\n warned = False\n conf = []\n\n for key, value in self.options.items():\n\n if key.startswith(\"xml-\") and len(key) > 4:\n if not warned:\n self.log.warning(\"using 'xml-' options has been deprecated in favor of 'config-'. \"\n \"See documentation for details.\")\n warned = True\n\n key = key[4:]\n\n elif key.startswith(\"config-\") and len(key) > 7:\n key = key[7:]\n else:\n continue\n\n if \"\\n\" in value:\n for subvalue in value.splitlines():\n conf.append((key, subvalue))\n else:\n conf.append((key, value))\n\n _, ws = self.egg.working_set()\n\n # get list of paths to put into pythonpath\n pythonpaths = ws.entries + self.get_extra_paths()\n\n # mungle basedir of pythonpath entries\n if 'pythonpath-eggs-directory' in self.options:\n source = self.options['eggs-directory']\n target = self.options['pythonpath-eggs-directory']\n pythonpaths = [path.replace(source, target) for path in pythonpaths]\n\n # generate pythonpath directives\n for path in pythonpaths:\n conf.append((\"pythonpath\", path))\n\n directory = os.path.dirname(self.output)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n if self.config_file_format == \"xml\":\n self.write_config_as_xml(conf)\n elif self.config_file_format == \"ini\":\n self.write_config_as_ini(conf)\n\n return self.output\n\n def write_config_as_xml(self, conf_options):\n conf = \"\"\n for key, value in conf_options:\n if value.lower() == \"true\":\n conf += \"<{0}/>\\n\".format(key)\n elif value.lower() != \"false\":\n conf += \"<{0}>{1}\\n\".format(key, value)\n\n with open(self.output, \"w\") as f:\n f.write(\"\\n{0}\".format(conf))\n\n def write_config_as_ini(self, conf_options):\n conf = \"[uwsgi]\\n\"\n for key, value in conf_options:\n conf += \"{0} = {1}\\n\".format(key, value)\n with open(self.output, \"w\") as f:\n f.write(conf)\n\n def is_uwsgi_installed(self):\n if not os.path.isfile(self.uwsgi_binary_path):\n return False\n\n if self.uwsgi_version == 'latest':\n # If you ask for the latest version, we say we don't, in order to\n # force a download+recompile (since we can't know for sure if the package was\n # updated upstream or not)\n return False\n\n # Check the version\n process = subprocess.Popen([self.uwsgi_binary_path, '--version'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n stdout, stderr = process.communicate()\n return stdout.strip() == self.uwsgi_version\n\n def install(self):\n paths = []\n if not self.use_system_binary:\n if not self.is_uwsgi_installed():\n # Download uWSGI.\n download_path = self.download_release()\n\n # Extract uWSGI.\n uwsgi_path, extract_path = self.extract_release(download_path)\n\n try:\n # Build uWSGI.\n self.build_uwsgi(uwsgi_path)\n finally:\n # Remove extracted uWSGI package.\n shutil.rmtree(extract_path)\n\n paths.append(self.uwsgi_binary_path)\n\n # Create uWSGI config file.\n paths.append(self.create_configuration_file())\n return paths\n\n update = install\n","sub_path":"buildout/recipe/uwsgi.py","file_name":"uwsgi.py","file_ext":"py","file_size_in_byte":9774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"287773859","text":"#!/usr/bin/env python\n\nimport time\nimport os\nimport sys\nimport xmlrpclib\nimport urlparse\nimport logging\nimport json\nimport shlex\nimport subprocess\nimport threading\nimport copy\n\nfrom crontab import CronItem\nfrom datetime import datetime, timedelta\nfrom socket import gethostname\n\n\nLOG_LEVEL = logging.INFO\nCRON_FILE = 'superbeat.cron'\n\nlog = logging.getLogger('superbeat')\nlogging.basicConfig(\n level=LOG_LEVEL,\n format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\ncmd_map = {\n 'start': 'supervisor.startProcess',\n 'stop': 'supervisor.stopProcess',\n 'exec': 'run_process',\n}\n\nwait_arg_map = {\n 'wait': 'true',\n 'nowait': 'false'\n}\n\n\ndef cron_file_name():\n cron_file = sys.argv[1] if len(sys.argv) > 1 else CRON_FILE\n exe_dir = os.path.abspath(os.path.dirname(sys.argv[0]))\n return os.path.join(exe_dir, os.path.expanduser(cron_file))\n\n\ndef cron_file_time():\n try:\n return os.stat(cron_file_name()).st_mtime\n except IOError:\n return 0\n\n\ndef read_cron_file():\n cron_tabs = []\n filename = cron_file_name()\n try:\n for n, line in enumerate(open(filename)):\n line = line.strip()\n if line and not line.startswith('#'):\n cron_tabs.append(('TAB_%02d' % n, line))\n log.info('read cron file: %s', filename)\n except IOError:\n log.debug('skip cron file: %s', filename)\n return cron_tabs\n\n\ndef json_conv(s):\n try:\n return json.loads(s)\n except ValueError:\n return s\n\n\ndef prepare_crons():\n cron_args = [('ARG_%02d' % n, arg) for n, arg in enumerate(sys.argv[2:])]\n cron_vars = [(name, val) for name, val in os.environ.items()\n if name.startswith('CRON_')]\n cron_tabs = read_cron_file()\n localhost = gethostname().partition('.')[0]\n\n for name, token in sorted(cron_args + cron_vars + cron_tabs):\n try:\n when, where, action = token.split(':', 2)\n cron = CronItem(when.strip())\n where = where.strip().split()\n args = shlex.split(action.strip())\n cmd = args.pop(0)\n cmd = cmd_map.get(cmd, cmd)\n if args:\n args[-1] = wait_arg_map.get(args[-1], args[-1])\n args = map(json_conv, args)\n cmd_str = '%s(%s)' % (cmd, ','.join(map(str, args)))\n if '*' in where or localhost in where:\n log.debug('prepare cron %s: %s - %s' %\n (name, cmd_str, when.strip()))\n yield (cron, cmd, args, cmd_str)\n else:\n log.info('skip non-local cron \"%s\"' % token)\n except (ValueError, IOError) as exc:\n log.error('invalid cron \"%s\": %s' % (token, exc))\n\n\ndef connect_rpc():\n try:\n url = os.environ['SUPERVISOR_SERVER_URL']\n except KeyError:\n log.error('please run under supervisord')\n sys.exit(1)\n\n try:\n user_pass = os.environ['SUPERVISOR_USERPASS'] + '@'\n except KeyError:\n user_pass = ''\n\n if user_pass:\n parts = urlparse.urlsplit(url)\n rpc_url = urlparse.urlunsplit((\n parts.scheme, user_pass + parts.netloc,\n parts.path, parts.query, parts.fragment))\n else:\n rpc_url = url\n\n log.info('connect to supervisor %s', url)\n if url.startswith('unix://'):\n # See: http://stackoverflow.com/a/23837147\n import httplib\n import socket\n\n class UnixStreamHTTPConnection(httplib.HTTPConnection):\n def connect(self):\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.sock.connect(self.host)\n\n class UnixStreamTransport(xmlrpclib.Transport, object):\n def __init__(self, socket_path):\n self.socket_path = socket_path\n super(UnixStreamTransport, self).__init__()\n\n def make_connection(self, host):\n return UnixStreamHTTPConnection(self.socket_path)\n\n sock_path = rpc_url[7:]\n log.debug('socket path is %s', sock_path)\n conn = xmlrpclib.Server(\n 'http://127.0.0.1', transport=UnixStreamTransport(sock_path))\n else:\n log.debug('rpc url is %s', rpc_url)\n conn = xmlrpclib.ServerProxy(rpc_url)\n log.debug('methods: %s', conn.system.listMethods())\n\n return conn\n\n\ndef resolve_method(cmd, runners):\n for obj in runners:\n try:\n if getattr(obj, cmd, None):\n return obj, cmd\n except Exception:\n try:\n if cmd in obj.system.listMethods():\n return obj, cmd\n except Exception:\n pass\n else:\n raise ValueError('method \"%s\" not found' % cmd)\n\n\nclass RunProcess(object):\n def __init__(self):\n self._threads = set()\n\n def __repr__(self):\n return '<%s>' % self.__class__.__name__\n\n def _poll(self):\n for thread in tuple(self._threads):\n if not thread.is_alive():\n thread.join(0)\n self._threads.remove(thread)\n\n def run_process(self, *args):\n args = copy.copy(args)\n\n def target():\n null = open(os.devnull)\n proc = subprocess.Popen(\n args,\n shell=False,\n stdin=null.fileno(),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n bufsize=0)\n cmd = '%s[%d]' % (os.path.basename(args[0]), proc.pid)\n log.debug('%s started', cmd)\n stdout, stderr = proc.communicate()\n null.close()\n for line in stdout.split('\\n'):\n line = line.strip()\n if line:\n log.info('%s says: %s', cmd, line)\n if proc.returncode:\n log.info('%s finished with status %d', cmd, proc.returncode)\n else:\n log.debug('%s finished', cmd)\n\n thread = threading.Thread(target=target)\n thread.start()\n thread.join(0.1)\n if thread.is_alive():\n self._threads.add(thread)\n\n\ndef main():\n tick = datetime.now().replace(second=0, microsecond=0)\n one_min = timedelta(0, 60)\n one_sec = timedelta(0, 1)\n gap_sec = one_sec * 2\n\n rpc_conn = connect_rpc()\n file_time = cron_file_time()\n run_proc = RunProcess()\n runners = (run_proc, rpc_conn)\n cron_list = list(prepare_crons())\n\n while 1:\n tick += one_min\n tick_eps = tick - gap_sec\n delay = (tick_eps - datetime.now()).total_seconds()\n log.debug('will tick in %d seconds', delay)\n if delay > 0:\n time.sleep(delay)\n run_proc._poll()\n\n new_file_time = cron_file_time()\n if new_file_time != file_time:\n cron_list = list(prepare_crons())\n file_time = new_file_time\n\n next_list = []\n for cron, cmd, args, cmd_str in cron_list:\n cron_next = cron.schedule().get_next()\n next_list.append((cron_next, cmd, args, cmd_str))\n\n delay = (tick - datetime.now()).total_seconds()\n if delay > 0:\n time.sleep(delay)\n\n for cron_next, cmd, args, cmd_str in next_list:\n diff = (tick - cron_next).total_seconds()\n log.debug('%s next %s (diff=%s)', cmd_str, cron_next, diff)\n if -30 < diff < 30:\n log.debug('calling %s', cmd_str)\n try:\n obj, met = resolve_method(cmd, runners)\n getattr(obj, met)(*args)\n log.info('called: %s', cmd_str)\n except ValueError:\n log.error('%s: method not found', cmd_str)\n except xmlrpclib.Fault as fault:\n log.error('%s: call failed: %s',\n cmd_str, fault.faultString)\n except xmlrpclib.ProtocolError as perr:\n log.error('%s: protocol failed: %s', cmd_str, perr.errmsg)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n log.info('stopped')\n","sub_path":"roles.devel/dev-supervisor/files/superbeat.py","file_name":"superbeat.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"425661336","text":"def main():\n\n res=open('attractor_10A.txt').read().split('\\n')\n sen=open('attractor_231.txt').read().split('\\n')\n\n resd = {}\n send={}\n for line in res:\n #print(line)\n (key, val) = line.split()\n resd[key] = float(val)\n\n for line in sen:\n (key, val)=line.split()\n send[key]=float(val)\n\n for key,val1 in resd.items():\n val2=send.get(key)\n if val1<0 and val2>0:\n print (key+'\\t'+str(val1)+'\\t'+str(val2))\n if val1>0 and val2<0:\n print (key+'\\t'+str(val1)+'\\t'+str(val2))\nmain()\n \n","sub_path":"_site/_projects/project2/OLD/NetworkAnalysis 1/SFA_2/find_readoutnodes.py","file_name":"find_readoutnodes.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"579671434","text":"import tkinter\nfrom tkinter import ttk\nimport keyboard\nimport math\nfrom screeninfo import get_monitors\nimport datetime\n\n\ndef handle_focus(event):\n if event.widget == root:\n root.focus_set()\n calcInput.userInputEntry.focus_set()\n\n\nglobal screenWidth, screenHeight\n\nroot = tkinter.Tk()\nscreenInfo = get_monitors()[0]\nscreenWidth = screenInfo.width\nscreenHeight = screenInfo.height\nprint(screenWidth, screenHeight)\n\nroot.title(\"Calculator\")\nroot.geometry(\"%sx67+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\nroot.resizable(False, False)\nroot.wm_attributes('-type', 'splash')\nroot.attributes(\"-topmost\", True)\nglobal histFilename\nhistFilename = 'calculatorHistory.txt'\n\n\nclass CalculatorMain:\n\n def __init__(self, main):\n\n self.magicFrame = tkinter.Frame(main) # all the other grid elements (in column 0) are relative to this one so by giving it monitor width, the other frames have it too\n self.magicFrame.config(height=0, width=screenWidth)\n self.magicFrame.grid(row=0, column=0, sticky=\"WE\")\n\n # self.mathFunctionsFrame = tkinter.Frame(main)\n # self.mathFunctionsFrame.config(height=80, bg=\"red\")\n # self.mathFunctionsFrame.grid(row=1, column=0)\n # self.mathFunctionsFrame.grid_remove()\n\n self.numberAndFuncLayoutFrame = tkinter.Frame(main)\n self.numberAndFuncLayoutFrame.config(height=20)\n self.numberAndFuncLayoutFrame.grid(row=2, column=0, sticky=\"WE\")\n self.numberAndFuncLayoutFrame.grid_remove()\n\n self.buttonMenuFrame = tkinter.Frame(main)\n self.buttonMenuFrame.config(height=20)\n self.buttonMenuFrame.grid(row=3, column=0, sticky=\"WE\")\n\n self.inputFrame = tkinter.Frame(main)\n self.inputFrame.config(height=40)\n self.inputFrame.grid(row=4, column=0, sticky=\"WE\")\n self.inputFrame.focus_set()\n\n\n def showANDhide(self, main, event=None):\n if 'normal' == main.state():\n main.withdraw()\n else:\n main.deiconify()\n main.after(1, lambda: main.focus_force())\n # calcInput.userInputEntry.focus()\n\n\n\nclass CalculatorInput:\n\n def __init__(self, frame):\n self.userInputEntry = tkinter.Entry(frame, font=(\"Helvetica\", 20), takefocus=1)\n self.userInputEntry.pack(fill=tkinter.BOTH)\n self.upAndDownKeyPress = 0\n\n def calculateUserInput(self, event=None):\n\n command = self.userInputEntry.get()\n command = command.rstrip('\\n')\n try:\n result = eval(command)\n except:\n result = \"error\"\n\n self.addToHistory(command, result)\n\n self.userInputEntry.delete(0, tkinter.END)\n self.userInputEntry.insert(0, result)\n self.upAndDownKeyPress = 0\n\n def clearUserInput(self):\n self.userInputEntry.delete(0, tkinter.END)\n\n def clearLastInputedChar(self):\n get = self.userInputEntry.get()[:-1]\n self.userInputEntry.delete(0, tkinter.END)\n self.userInputEntry.insert(0, get)\n\n def addToHistory(self, command, result):\n try:\n file = open(histFilename, 'x')\n except FileExistsError:\n file = open(histFilename, 'a')\n\n writeMsg = \"DATE: \" + str(datetime.datetime.now())+\" \"+command + \" = \" + str(result)\n file.write(writeMsg)\n file.write('\\n')\n file.close()\n\n try:\n if calcHistoryMenu.window.winfo_exists():\n calcHistoryMenu.insertNewLine(writeMsg)\n\n except AttributeError or tkinter.TclError:\n pass\n\n def traverseHistoryUpDown(self, event, key):\n\n if key == 'up':\n self.upAndDownKeyPress += 1\n if key == 'down':\n self.upAndDownKeyPress -= 1\n\n if self.upAndDownKeyPress <= 0:\n self.userInputEntry.delete(0, tkinter.END)\n self.upAndDownKeyPress = 0\n return\n\n file = open(histFilename, 'r')\n lines = file.read().splitlines()\n if self.upAndDownKeyPress >= len(lines):\n self.upAndDownKeyPress = len(lines)\n line = lines[-1*self.upAndDownKeyPress]\n\n line = line[40:] # remove everything except the initial command\n line = line.split('=', 1)[0]\n line = ''.join(line.split())\n\n self.userInputEntry.delete(0, tkinter.END)\n self.userInputEntry.insert(0, line)\n\n\nclass CalculatorButtonMenu:\n\n def __init__(self, frame, main):\n\n self.btnEquals = tkinter.Button(frame, text=\"=\", command=lambda: calcInput.calculateUserInput())\n self.btnEquals.pack(side=\"left\")\n\n self.numberLayoutPopUp = tkinter.Button(frame, text=\"🠕\", command=lambda: self.popUpMoreFuncFrame(main))\n self.numberLayoutPopUp.pack(side=\"left\")\n\n self.btnClear = tkinter.Button(frame, text=\"C\", command=lambda: calcInput.clearUserInput())\n self.btnClear.pack(side=\"left\")\n\n self.btnDot = tkinter.Button(frame, text=\".\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnDot.cget('text')))\n\n self.btnPlus = tkinter.Button(frame, text=\"+\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnPlus.cget('text')))\n\n self.btnMinus = tkinter.Button(frame, text=\"-\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnMinus.cget('text')))\n\n self.btnMultiply = tkinter.Button(frame, text=\"*\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnMultiply.cget('text')))\n\n self.btnDivide = tkinter.Button(frame, text=\"/\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnDivide.cget('text')))\n\n self.btnComma = tkinter.Button(frame, text=\",\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnComma.cget('text')))\n\n self.exitButton = tkinter.Button(frame, text=\"x\", command=lambda: self.closeMainWindow(main))\n self.exitButton.pack(side=\"right\")\n\n self.histViewButton = tkinter.Button(frame, text=\"History\", command=lambda: self.viewCalcHistory(main))\n self.histViewButton.pack(side=\"right\")\n\n self.mathLayoutPopUp = tkinter.Button(frame, text=\"🠕🠕\", command=lambda: calcNumberAndFuncLayout.mathFunctionsPopUp(main))\n self.mathLayoutPopUp.pack(side=\"right\")\n\n def closeMainWindow(self, main):\n main.destroy()\n\n def viewCalcHistory(self, main):\n try:\n if calcHistoryMenu.window.winfo_exists():\n calcHistoryMenu.destroyWindow()\n self.histViewButton[\"text\"] = \"History\"\n else:\n calcHistoryMenu.createWindow(main)\n self.histViewButton[\"text\"] = \"Close History\"\n\n except AttributeError:\n calcHistoryMenu.createWindow(main)\n self.histViewButton[\"text\"] = \"Close History\"\n\n def popUpMoreFuncFrame(self, main):\n if calc.numberAndFuncLayoutFrame.winfo_ismapped():\n calc.numberAndFuncLayoutFrame.grid_remove()\n main.geometry(\"%sx67+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\n\n self.btnDot.pack_forget()\n self.btnPlus.pack_forget()\n self.btnMinus.pack_forget()\n self.btnMultiply.pack_forget()\n self.btnDivide.pack_forget()\n self.btnComma.pack_forget()\n\n else:\n main.geometry(\"%sx98+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\n calc.numberAndFuncLayoutFrame.grid()\n\n self.btnDot.pack(side=\"left\")\n self.btnPlus.pack(side=\"left\")\n self.btnMinus.pack(side=\"left\")\n self.btnMultiply.pack(side=\"left\")\n self.btnDivide.pack(side=\"left\")\n self.btnComma.pack(side=\"left\")\n self.btnClear.pack_forget()\n self.btnClear.pack(side=\"left\")\n\n\nclass CalculatorNumberAndFunctionLayout:\n\n def __init__(self, frame):\n\n self.btn1 = tkinter.Button(frame, text=\"1\", command=lambda: self.insertInUserInputEntry(self.btn1.cget('text')))\n self.btn1.pack(side=\"left\")\n self.btn2 = tkinter.Button(frame, text=\"2\", command=lambda: self.insertInUserInputEntry(self.btn2.cget('text')))\n self.btn2.pack(side=\"left\")\n self.btne = tkinter.Button(frame, text=\"e\", command=lambda: self.insertInUserInputEntry('math.e'))\n self.btne.pack(side=\"left\")\n self.btn3 = tkinter.Button(frame, text=\"3\", command=lambda: self.insertInUserInputEntry(self.btn3.cget('text')))\n self.btn3.pack(side=\"left\")\n self.btnpi = tkinter.Button(frame, text=\"π\", command=lambda: self.insertInUserInputEntry('math.pi'))\n self.btnpi.pack(side=\"left\")\n self.btn4 = tkinter.Button(frame, text=\"4\", command=lambda: self.insertInUserInputEntry(self.btn4.cget('text')))\n self.btn4.pack(side=\"left\")\n self.btn5 = tkinter.Button(frame, text=\"5\", command=lambda: self.insertInUserInputEntry(self.btn5.cget('text')))\n self.btn5.pack(side=\"left\")\n self.btn6 = tkinter.Button(frame, text=\"6\", command=lambda: self.insertInUserInputEntry(self.btn6.cget('text')))\n self.btn6.pack(side=\"left\")\n self.btne = tkinter.Button(frame, text=\"𝜏\", command=lambda: self.insertInUserInputEntry('math.tau'))\n self.btne.pack(side=\"left\")\n self.btn7 = tkinter.Button(frame, text=\"7\", command=lambda: self.insertInUserInputEntry(self.btn7.cget('text')))\n self.btn7.pack(side=\"left\")\n self.btn8 = tkinter.Button(frame, text=\"8\", command=lambda: self.insertInUserInputEntry(self.btn8.cget('text')))\n self.btn8.pack(side=\"left\")\n self.btn9 = tkinter.Button(frame, text=\"9\", command=lambda: self.insertInUserInputEntry(self.btn9.cget('text')))\n self.btn9.pack(side=\"left\")\n self.btn0 = tkinter.Button(frame, text=\"0\", command=lambda: self.insertInUserInputEntry(self.btn0.cget('text')))\n self.btn0.pack(side=\"left\")\n self.btnClearOne = tkinter.Button(frame, text=\"<-\", command=lambda: calcInput.clearLastInputedChar())\n self.btnClearOne.pack(side=\"left\")\n\n self.optFrame = tkinter.Frame(frame)\n\n self.optPossibilitiesValues = [\"combination(n,k)\", \"permutation(n,k)\", \"factorial(x)\"]\n self.optPossibilities = ttk.Combobox(self.optFrame, values=self.optPossibilitiesValues)\n self.optPossibilities.state = (['disabled'])\n self.optPossibilities.set('possibilities')\n self.optPossibilities.bind(\"\", lambda e: \"break\")\n self.optPossibilities.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optPossibilities.get()))\n self.optPossibilities.pack(side=\"right\")\n\n self.optTrigonometryValues = [\"sin(x)\", \"cos(x)\", \"tan(x)\", \"asin(x)\", \"acos(x)\", \"atan(x)\"]\n self.optTrigonometry = ttk.Combobox(self.optFrame, values=self.optTrigonometryValues)\n self.optTrigonometry.state = (['disabled'])\n self.optTrigonometry.set('trigonometry')\n self.optTrigonometry.bind(\"\", lambda e: \"break\")\n self.optTrigonometry.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optTrigonometry.get()))\n self.optTrigonometry.pack(side=\"right\")\n\n self.optAngularConversionValues = [\"radians\", \"degrees\"]\n self.optAngularConversion = ttk.Combobox(self.optFrame, values=self.optAngularConversionValues)\n self.optAngularConversion.state = (['disabled'])\n self.optAngularConversion.set('angular conversion')\n self.optAngularConversion.bind(\"\", lambda e: \"break\")\n self.optAngularConversion.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optAngularConversion.get()))\n self.optAngularConversion.pack(side=\"right\")\n\n self.optHyperbolicValues = [\"sinh(x)\", \"cosh(x)\", \"tanh(x)\", \"asinh(x)\", \"acosh(x)\", \"atanh(x)\"]\n self.optHyperbolic = ttk.Combobox(self.optFrame, values=self.optHyperbolicValues)\n self.optHyperbolic.state = (['disabled'])\n self.optHyperbolic.set('hyperbolic')\n self.optHyperbolic.bind(\"\", lambda e: \"break\")\n self.optHyperbolic.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optHyperbolic.get()))\n self.optHyperbolic.pack(side=\"right\")\n\n self.optOtherValues = [\"pow(x, y)\", \"sqrt(x)\", \"log(x, base)\"]\n self.optOther = ttk.Combobox(self.optFrame, values=self.optOtherValues)\n self.optOther.state = (['disabled'])\n self.optOther.set('other')\n self.optOther.bind(\"\", lambda e: \"break\")\n self.optOther.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optOther.get()))\n self.optOther.pack(side=\"right\")\n\n self.selectionCommandTable = {\n \"combination(n,k)\": \"math.comb()\",\n \"permutation(n,k)\": \"math.perm()\",\n \"factorial(x)\": \"math.factorial()\",\n \"sin(x)\": \"math.sin()\",\n \"cos(x)\": \"math.cos()\",\n \"tan(x)\": \"math.tan()\",\n \"asin(x)\": \"math.asin()\",\n \"acos(x)\": \"math.acos()\",\n \"atan(x)\": \"math.atan()\",\n \"radians\": \"math.radians()\",\n \"degrees\": \"math.degrees()\",\n \"sinh(x)\": \"math.sinh()\",\n \"cosh(x)\": \"math.cosh()\",\n \"tanh(x)\": \"math.tanh()\",\n \"asinh(x)\": \"math.asinh()\",\n \"acosh(x)\": \"math.acosh()\",\n \"atanh(x)\": \"math.atanh()\",\n \"pow(x, y)\": \"math.pow()\",\n \"sqrt(x)\": \"math.sqrt()\",\n \"log(x, base)\": \"math.log()\"\n }\n\n def insertInUserInputEntry(self, char):\n self.optOther.set('other')\n self.optHyperbolic.set('hyperbolic')\n self.optAngularConversion.set('angular conversion')\n self.optTrigonometry.set('trigonometry')\n self.optPossibilities.set('possibilities')\n\n calcInput.userInputEntry.insert(calcInput.userInputEntry.index(\"insert\"), char)\n\n def detectMathFunction(self, event, selection):\n self.insertMathFunctionInEntry(self.selectionCommandTable[selection])\n\n def insertMathFunctionInEntry(self, command):\n entryMsg = calcInput.userInputEntry.get()\n\n isDigitBool = 0\n commaCounter = 0\n for i in entryMsg: # also add , if user didnt\n if i.isnumeric():\n isDigitBool = 1\n\n if isDigitBool:\n if i == ',':\n commaCounter += 1\n\n if commaCounter == 0:\n prev = ''\n for i in enumerate(entryMsg):\n if i[1].isnumeric() or i[1] == '.':\n prev = i[1]\n elif prev.isnumeric():\n entryMsg = entryMsg[:i[0]] + ',' + entryMsg[i[0]+1:]\n else:\n prev = i[1]\n\n entryMsg = command[:-1] + entryMsg + \")\"\n calcInput.userInputEntry.delete(0, tkinter.END)\n calcInput.userInputEntry.insert(0, entryMsg)\n\n def mathFunctionsPopUp(self, main):\n if calc.numberAndFuncLayoutFrame.winfo_ismapped():\n pass\n else:\n calc.numberAndFuncLayoutFrame.grid()\n main.geometry(\"%sx98+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\n\n if self.optFrame.winfo_ismapped():\n self.optFrame.pack_forget()\n else:\n self.optFrame.pack(side=\"right\")\n\n\nclass CalculatorHistoryMenu:\n\n def createWindow(self, main):\n self.window = tkinter.Toplevel(main)\n self.window.config(bg=\"grey\")\n self.window.geometry(\"%sx%s+%s+0\" % (screenWidth, screenHeight, screenWidth))\n self.window.protocol(\"WM_DELETE_WINDOW\", self.onClose)\n\n self.histText = tkinter.Text(self.window)\n self.histText.config(bg=\"grey\", font=(\"Helvetica\", 15), height=screenHeight-(screenHeight-main.winfo_screenheight()))\n self.histText.pack(side=\"top\", anchor=\"center\", fill=tkinter.BOTH)\n self.histText.bind(\"\", lambda e: \"break\") # so that u cant edit\n self.writeHist()\n\n def destroyWindow(self):\n self.window.destroy()\n\n def writeHist(self):\n file = open(histFilename, \"r\")\n lines = file.readlines()\n\n for line in lines:\n self.histText.insert(1.0, line)\n\n file.close()\n\n def insertNewLine(self, message):\n self.histText.insert(1.0, \"%s\\n\" % (message))\n\n def onClose(self):\n self.window.destroy()\n calcButtonMenu.histViewButton[\"text\"] = \"History\"\n\n\nglobal calc, calcButtonMenu, calcInput, calcNumberAndFuncLayout, calcHistoryMenu\ncalc = CalculatorMain(root)\ncalcInput = CalculatorInput(calc.inputFrame)\ncalcHistoryMenu = CalculatorHistoryMenu()\ncalcButtonMenu = CalculatorButtonMenu(calc.buttonMenuFrame, root)\ncalcNumberAndFuncLayout = CalculatorNumberAndFunctionLayout(calc.numberAndFuncLayoutFrame)\n\n\nkeyboard.add_hotkey(\"ctrl+space\", lambda: calc.showANDhide(root))\nroot.bind('', lambda event: calcInput.calculateUserInput())\ncalcInput.userInputEntry.bind(\"\", lambda event, key=\"up\": calcInput.traverseHistoryUpDown(event, key))\ncalcInput.userInputEntry.bind(\"\", lambda event, key=\"down\": calcInput.traverseHistoryUpDown(event, key))\ncalcInput.userInputEntry.bind(\"\", lambda arg: calcInput.clearUserInput())\nroot.bind(\"\", handle_focus)\n\n\nroot.mainloop()\n","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":17255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"479590331","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\n$Id$\n\"\"\"\nfrom zope.interface import Interface\nfrom zope.configuration.fields import GlobalObject, Tokens, \\\n PythonIdentifier, MessageID\nfrom zope.schema import TextLine, Id\nfrom zope.app.security.fields import Permission\n\nclass IBasicComponentInformation(Interface):\n\n component = GlobalObject(\n title=u\"Component to be used\",\n required=False\n )\n\n permission = Permission(\n title=u\"Permission\",\n required=False\n )\n\n factory = GlobalObject(\n title=u\"Factory\",\n required=False\n )\n\nclass IBasicViewInformation(Interface):\n \"\"\"\n This is the basic information for all views.\n \"\"\"\n \n for_ = Tokens(\n title=u\"Specifications of the objects to be viewed\",\n description=u\"\"\"This should be a list of interfaces or classes\n \"\"\",\n required=True,\n value_type=GlobalObject(missing_value=object())\n )\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"The permission needed to use the view.\",\n required=False\n )\n\n class_ = GlobalObject(\n title=u\"Class\",\n description=u\"A class that provides attributes used by the view.\",\n required=False\n )\n\n layer = TextLine(\n title=u\"The layer the view is in.\",\n description=u\"\"\"\n A skin is composed of layers. It is common to put skin\n specific views in a layer named after the skin. If the 'layer'\n attribute is not supplied, it defaults to 'default'.\"\"\",\n required=False\n )\n\n allowed_interface = Tokens(\n title=u\"Interface that is also allowed if user has permission.\",\n description=u\"\"\"\n By default, 'permission' only applies to viewing the view and\n any possible sub views. By specifying this attribute, you can\n make the permission also apply to everything described in the\n supplied interface.\n\n Multiple interfaces can be provided, separated by\n whitespace.\"\"\",\n required=False,\n value_type=GlobalObject()\n )\n\n allowed_attributes = Tokens(\n title=u\"View attributes that are also allowed if user has permission.\",\n description=u\"\"\"\n By default, 'permission' only applies to viewing the view and\n any possible sub views. By specifying 'allowed_attributes',\n you can make the permission also apply to the extra attributes\n on the view object.\"\"\",\n required=False,\n value_type=PythonIdentifier()\n )\n\nclass IBasicResourceInformation(Interface):\n \"\"\"\n Basic information for resources\n \"\"\"\n\n name = TextLine(\n title=u\"The name of the resource.\",\n description=u\"The name shows up in URLs/paths. For example 'foo'.\",\n required=True,\n default=u'',\n )\n\n provides = GlobalObject(\n title=u\"The interface this component provides.\",\n description=u\"\"\"\n A view can provide an interface. This would be used for\n views that support other views.\"\"\",\n required=False,\n default=Interface,\n )\n\n type = GlobalObject(\n title=u\"Request type\",\n required=True\n )\n\nclass IInterfaceDirective(Interface):\n \"\"\"\n Define an interface\n \"\"\"\n \n interface = GlobalObject(\n title=u\"Interface\",\n required=True\n )\n\n type = GlobalObject(\n title=u\"Interface type\",\n required=False\n )\n\nclass IAdapterDirective(Interface):\n \"\"\"\n Register an adapter\n \"\"\"\n\n factory = Tokens(\n title=u\"Adapter factory/factories\",\n description=u\"\"\"A list of factories (usually just one) that create the\n adapter instance.\"\"\",\n required=True,\n value_type=GlobalObject()\n )\n\n provides = GlobalObject(\n title=u\"Interface the component provides\",\n description=u\"\"\"This attribute specifes the interface the adapter\n instance must provide.\"\"\",\n required=True\n )\n\n for_ = Tokens(\n title=u\"Specifications to be adapted\",\n description=u\"\"\"This should be a list of interfaces or classes\n \"\"\",\n required=True,\n value_type=GlobalObject(missing_value=object())\n )\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"\"\"This adapter is only available, if the principal has\n this permission.\"\"\",\n required=False\n )\n\n name = TextLine(\n title=u\"Name\",\n description=u\"\"\"Adapters can have names. This attribute allows you to\n specify the name for this adapter.\"\"\",\n required=False\n )\n\nclass ISubscriberDirective(Interface):\n \"\"\"\n Register a subscriber\n \"\"\"\n\n factory = GlobalObject(\n title=u\"Subscriber factory\",\n description=u\"A factory used to create the subscriber instance.\",\n required=True\n )\n\n provides = GlobalObject(\n title=u\"Interface the component provides\",\n description=u\"\"\"This attribute specifes the interface the adapter\n instance must provide.\"\"\",\n required=False,\n )\n\n for_ = Tokens(\n title=u\"Interfaces or classes that this subscriber depends on\",\n description=u\"This should be a list of interfaces or classes\",\n required=True,\n value_type=GlobalObject(missing_value = object()),\n )\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"\"\"This subscriber is only available, if the principal has\n this permission.\"\"\",\n required=False\n )\n\nclass IUtilityDirective(IBasicComponentInformation):\n \"\"\"\n Register a utility\n \"\"\"\n\n provides = GlobalObject(\n title=u\"Interface the component provides\",\n required=True\n )\n\n name = TextLine(\n title=u\"Name\",\n required=False\n )\n\nclass IFactoryDirective(Interface):\n \"\"\"\n Define a factory\n \"\"\"\n\n component = GlobalObject(\n title=u\"Component to be used\",\n required=True\n )\n \n id = TextLine(\n title=u\"ID\",\n required=False\n )\n\n title = MessageID(\n title=u\"Title\",\n description=u\"\"\"\n text suitable for use in the 'add content' menu of a\n management interface\"\"\",\n required=False\n )\n\n description = MessageID(\n title=u\"Description\",\n description=u\"Longer narrative description of what this factory does\",\n required=False\n )\n\n\nclass IViewDirective(IBasicViewInformation, IBasicResourceInformation):\n \"\"\"\n Register a view for a component\n \"\"\"\n\n factory = Tokens(\n title=u\"Factory\",\n required=False,\n value_type=GlobalObject()\n )\n\nclass IDefaultViewDirective(IBasicResourceInformation):\n \"\"\"The name of the view that should be the default.\n\n This name refers to view that should be the\n view used by default (if no view name is supplied\n explicitly).\n \"\"\"\n\n for_ = GlobalObject(\n title=u\"The interface this view is the default for.\",\n description=u\"\"\"\n The view is the default view for the supplied interface. If\n this is not supplied, the view applies to all objects (XXX\n this ought to change).\"\"\",\n required=False\n )\n\n\n\nclass IResourceDirective(IBasicComponentInformation,\n IBasicResourceInformation):\n \"\"\"\n Register a resource\n \"\"\"\n \n layer = TextLine(\n title=u\"The layer the resource is in.\",\n required=False\n )\n\n allowed_interface = Tokens(\n title=u\"Interface that is also allowed if user has permission.\",\n required=False,\n value_type=GlobalObject()\n )\n\n allowed_attributes = Tokens(\n title=u\"View attributes that are also allowed if user has permission.\",\n required=False,\n value_type=PythonIdentifier()\n )\n\nclass ILayerDirective(Interface):\n \"\"\"\n Register a layer\n \"\"\"\n\n name = TextLine(\n title=u\"Layer name\",\n description=u\"Layer name\",\n required=True\n )\n\nclass ISkinDirective(Interface):\n \"\"\"\n Register a skin\n \"\"\"\n\n name = TextLine(\n title=u\"Skin name\",\n description=u\"Skin name\",\n required=True\n )\n\n layers = Tokens(\n title=u\"The layers it consists of.\",\n required=True,\n value_type=TextLine()\n )\n\nclass IDefaultSkinDirective(Interface):\n \"\"\"\n Register a skin\n \"\"\"\n\n name = TextLine(\n title=u\"Default skin name\",\n description=u\"Default skin name\",\n required=True\n )\n\nclass IServiceTypeDirective(Interface):\n\n id = TextLine(\n title=u\"ID of the service type\",\n required=True\n )\n\n interface = GlobalObject(\n title=u\"Interface of the service type\",\n required=True\n )\n\nclass IServiceDirective(IBasicComponentInformation):\n \"\"\"\n Register a service\n \"\"\"\n\n serviceType = TextLine(\n title=u\"ID of service type\",\n required=True\n )\n\nclass IClassDirective(Interface):\n \"\"\"\n Make statements about a class\n \"\"\"\n\n class_ = GlobalObject(\n title=u\"Class\",\n required=True\n )\n\nclass IImplementsSubdirective(Interface):\n \"\"\"\n Declare that the class given by the content directive's class\n attribute implements a given interface\n \"\"\"\n\n interface = Tokens(\n title=u\"One or more interfaces\",\n required=True,\n value_type=GlobalObject()\n )\n\nclass IRequireSubdirective(Interface):\n \"\"\"\n Indicate that the a specified list of names or the names in a\n given Interface require a given permission for access.\n \"\"\"\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"\"\"\n Specifies the permission by id that will be required to\n access or mutate the attributes and methods specified.\"\"\",\n required=False\n )\n\n attributes = Tokens(\n title=u\"Attributes and methods\",\n description=u\"\"\"\n This is a list of attributes and methods that can be accessed.\"\"\",\n required=False,\n value_type=PythonIdentifier()\n )\n \n set_attributes = Tokens(\n title=u\"Attributes that can be set\",\n description=u\"\"\"\n This is a list of attributes that can be modified/mutated.\"\"\",\n required=False,\n value_type=PythonIdentifier()\n )\n\n interface = Tokens(\n title=u\"Interfaces\",\n description=u\"\"\"\n The listed interfaces' methods and attributes can be accessed.\"\"\",\n required=False,\n value_type=GlobalObject()\n )\n\n set_schema = Tokens(\n title=u\"The attributes specified by the schema can be set\",\n description=u\"\"\"\n The listed schemas' properties can be modified/mutated.\"\"\",\n required=False,\n value_type=GlobalObject()\n )\n\n like_class = GlobalObject(\n title=u\"Configure like this class\",\n description=u\"\"\"\n This argument says that this content class should be configured in the\n same way the specified class' security is. If this argument is\n specifed, no other argument can be used.\"\"\",\n required=False\n )\n \nclass IAllowSubdirective(Interface):\n \"\"\"\n Declare a part of the class to be publicly viewable (that is,\n requires the zope.Public permission). Only one of the following\n two attributes may be used.\n \"\"\"\n\n attributes = Tokens(\n title=u\"Attributes\",\n required=False,\n value_type=PythonIdentifier()\n )\n\n interface = Tokens(\n title=u\"Interface\",\n required=False,\n value_type=GlobalObject()\n )\n\nclass IFactorySubdirective(Interface):\n \"\"\"\n Specify the factory used to create this content object\n \"\"\"\n\n id = TextLine(\n title=u\"ID\",\n description=u\"\"\"\n the identifier for this factory in the ZMI factory\n identification scheme. If not given, defaults to the literal\n string given as the content directive's 'class' attribute.\"\"\",\n required=False\n )\n\n title = MessageID(\n title=u\"Title\",\n description=u\"\"\"\n text suitable for use in the 'add content' menu of a\n management interface\"\"\",\n required=False\n )\n\n description = MessageID(\n title=u\"Description\",\n description=u\"Longer narrative description of what this factory does\",\n required=False\n )\n","sub_path":"Zope3/tags/ZopeInterface-3.0.0b1/src/zope/app/component/metadirectives.py","file_name":"metadirectives.py","file_ext":"py","file_size_in_byte":13163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"595326339","text":"#!/home/brandonmfong/SOURCES/Repo/DualPowerGeneration/MaxPowerTracker/py/bin/python3\n\n\"\"\"Simple FTDI EEPROM configurator.\n\"\"\"\n\n# Copyright (c) 2019, Emmanuel Blot \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the Neotion nor the names of its contributors may\n# be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL NEOTION BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom argparse import ArgumentParser, FileType\nfrom logging import Formatter, StreamHandler, DEBUG, ERROR\nfrom sys import modules, stderr\nfrom traceback import format_exc\nfrom pyftdi import FtdiLogger\nfrom pyftdi.eeprom import FtdiEeprom\nfrom pyftdi.misc import hexdump\n\ndef main():\n \"\"\"Main routine\"\"\"\n debug = False\n try:\n argparser = ArgumentParser(description=modules[__name__].__doc__)\n argparser.add_argument('device', nargs='?', default='ftdi:///?',\n help='serial port device name')\n argparser.add_argument('-x', '--hexdump', action='store_true',\n help='dump EEPROM content as ASCII')\n argparser.add_argument('-o', '--output', type=FileType('wt'),\n help='output ini file to save EEPROM content')\n argparser.add_argument('-s', '--serial-number',\n help='set serial number')\n argparser.add_argument('-m', '--manufacturer',\n help='set manufacturer name')\n argparser.add_argument('-p', '--product',\n help='set product name')\n argparser.add_argument('-e', '--erase', action='store_true',\n help='erase the whole EEPROM content')\n argparser.add_argument('-u', '--update', action='store_true',\n help='perform actual update, use w/ care')\n argparser.add_argument('-v', '--verbose', action='count', default=0,\n help='increase verbosity')\n argparser.add_argument('-d', '--debug', action='store_true',\n help='enable debug mode')\n args = argparser.parse_args()\n debug = args.debug\n\n if not args.device:\n argparser.error('Serial device not specified')\n\n loglevel = max(DEBUG, ERROR - (10 * args.verbose))\n loglevel = min(ERROR, loglevel)\n if debug:\n formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '\n '%(message)s', '%H:%M:%S')\n else:\n formatter = Formatter('%(message)s')\n FtdiLogger.set_formatter(formatter)\n FtdiLogger.set_level(loglevel)\n FtdiLogger.log.addHandler(StreamHandler(stderr))\n\n eeprom = FtdiEeprom()\n eeprom.open(args.device)\n if args.erase:\n eeprom.erase()\n if args.serial_number:\n eeprom.set_serial_number(args.serial_number)\n if args.manufacturer:\n eeprom.set_manufacturer_name(args.manufacturer)\n if args.product:\n eeprom.set_product_name(args.product)\n if args.hexdump:\n print(hexdump(eeprom.data))\n if args.update:\n eeprom.commit(False)\n if args.verbose > 0:\n eeprom.dump_config()\n if args.output:\n eeprom.save_config(args.output)\n\n except (IOError, ValueError) as exc:\n print('\\nError: %s' % exc, file=stderr)\n if debug:\n print(format_exc(chain=False), file=stderr)\n exit(1)\n except KeyboardInterrupt:\n exit(2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"MaxPowerTracker/py/bin/ftconf.py","file_name":"ftconf.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"614355138","text":"import numpy as np\nfrom numpy.linalg import solve\nimport pandas as pd\nimport time\n\nclass BCMP_lib:\n \n def __init__(self, N, R, K, mu, type_list):\n self.N = N #網内の拠点数(プログラム内部で指定する場合は1少なくしている)\n self.R = R #クラス数\n self.K = K #網内の客数 K = [K1, K2]のようなリスト。トータルはsum(K)\n alp, self.p = self.getProbArrival() #推移確率の自動生成と到着率を求める\n self.saveCSVi(self.p, './tpr/tprNR_'+str(N)+'_'+str(R)+'.csv')#推移確率をcsvで保存しておく\n self.alpha = alp.T #転置して計算形式に合わせる\n self.mu = mu #サービス率 (N×R)\n self.type_list = type_list #Type1(FCFS),Type2プロセッサシェアリング(Processor Sharing: PS),Type3無限サーバ(Infinite Server: IS),Type4後着順割込継続型(LCFS-PR)\n self.combi_list = []\n self.combi_list = self.getCombiList2([], self.K, self.R, 0, self.combi_list) #K, Rのときの組み合わせ\n NK = [k+1 for k in self.K]\n NK.insert(0, self.N)\n self.mp_set= np.zeros(NK) #周辺分布を格納\n self.exp = np.zeros((self.N, self.R)) #平均計内人数を格納\n self.tp = np.zeros((self.N, self.R)) #スループットを格納\n self.rho = [] #利用率を格納(各拠点ごと)\n #self.m = m #各拠点の窓口数\n #self.alpha = alpha\n #課題\n #1. 推移確率行列から到着率の計算過程を入れる(推移確率は取り込みとランダム生成(エルゴード性を担保)の両方)\n #2. 窓口数を加味した計算(利用率やFSの部分)\n \n def getBCMP(self):\n for n in range(self.N): #P324 Table 8.3\n for i, k in enumerate(self.combi_list):\n g = self.getConvolution(n, k)\n #print('G{0}({1}) = {2}'.format(n,k,g))\n if n == self.N-1 and i == len(self.combi_list)-1: #combi_listの最後の要素のとき(注意:常に最後のリストで大丈夫か?)\n self.GK = g\n \n self.getMarginalProbabilitySet() #[(0,0),(0,1),(0,2)],[(1,0),(1,1),(1,2)]\n \n self.getEXP()\n \n self.getThroughput()\n \n self.getUsageRate()\n \n return self.mp_set, self.exp, self.tp, self.rho\n \n \n def getMarginalProbabilitySet(self): #全ての周辺分布を求める\n for n in range(self.N):\n for k in self.combi_list:\n nk = []\n nk.append([n])\n for i in k: #ndarrayでリストで要素を指定するときには[[n],[k1],[k2]]のように[]をつけて、タプルにする\n nk.append([i])\n mp = self.getMarginalProbability(n, k)\n self.mp_set[tuple(nk)] = mp\n \n \n #周辺分布を求める(拠点nに対して)\n #G(K)をgetConvolutionでN=n-1、Si=[K1,K2]となった時がGKなので最初にそれを取得してから(8.23)を計算する\n def getMarginalProbability(self, n, Si):\n mp = self.FS(n, Si) * self.getGi(n, np.array(K)-np.array(Si)) / self.GK\n return mp\n \n def getGi(self, n, Si): #P322 式(8.25,26,27) 引数nはG_N^(i)のiに対応する\n if sum(Si) == 0: #式(8.27)\n gi = 1\n else:\n gi = self.getConvolution(self.N-1, Si)\n combi_list = []\n combi_list = self.getCombiList2([], Si, R, 0, combi_list)\n for k in combi_list:\n if(sum(k) == 0):#j=0の場合は除く(8.26) \n continue\n else:\n gi -= self.FS(n, k) * self.getGi(n, np.array(Si)-np.array(k))\n return gi\n \n def getConvolution(self, n, Si):#P321式(8.21)\n g = 0\n combi_list = []\n combi_list = self.getCombiList2([], Si, R, 0, combi_list)\n if n == 0:\n g = self.FS(n, Si)\n elif n >= 1:\n for k in combi_list:\n g += self.getConvolution(n-1, k) * self.FS(n, np.array(Si)-np.array(k)) \n return g \n \n # P323 Table 8.2 Fi(Si)の計算, P303式(7.82)\n #Siはノードiにおけるクラス別人数分布:(ノードiのクラス0の人数, ノードiのクラス1の人数)\n #Siは(0,0),(1,0),(0,1),(1,1),(0,2),(1,2) K1=1,K2=2なので\n def FS(self, n, Si):#sは状態分布, type_number =1(FCFS),2(PS),3(IS),4(LCFS-PR)\n f = 1\n if self.type_list[n] == 1:\n print('FCFS') #ここはまだ未実装\n else:\n for r in range(R):#type-3はこのループで終わり\n f *= 1 / self.fact(Si[r]) * (self.alpha[n,r] / self.mu[n,r])**Si[r]\n if self.type_list[n] == 2 or self.type_list[n] == 4:#type-2,4は累乗をかける \n f *= self.fact(sum(Si))\n return f\n \n def fact(self, n):\n if n <= 1:\n return 1\n return n * self.fact(n-1)\n \n def getCombiList2(self, combi, K, R, idx, Klist):\n if len(combi) == R:\n Klist.append(combi.copy())\n return Klist\n for v in range(K[idx]+1):\n combi.append(v)\n Klist = self.getCombiList2(combi, K, R, idx+1, Klist)\n combi.pop()\n return Klist\n \n #平均系内人数を求める\n def getEXP(self):\n for n in range(self.N):\n for k in self.combi_list:\n nk = []\n nk.append([n])\n for i in k: #ndarrayでリストで要素を指定するときには[[n],[k1],[k2]]のように[]をつけて、タプルにする\n nk.append([i])\n for r in range(self.R):\n self.exp[n,r] += k[r] * self.mp_set[tuple(nk)]\n #return self.exp\n \n #スループット算出\n def getThroughput(self):\n for n in range(self.N):\n for r in range(self.R):\n r1 = np.zeros(self.R, dtype = int)\n r1[r] = 1\n self.tp[n,r] = self.alpha[n,r] * self.getConvolution(self.N-1, np.array(self.K) - r1) / self.GK\n #return self.tp\n \n def getUsageRate(self): #利用率算出 P322 式(8.29)lambda / (m * mu) 今回はm = 1 \n self.rho = self.tp / self.mu\n #return self.rho\n \n #クラス数分推移確率行列を生成して、それぞれの到着率を返す関数\n def getProbArrival(self):\n pr = np.zeros((self.R*self.N, self.R*self.N))\n alpha = np.zeros((self.R, self.N))\n for r in range(self.R):\n class_number = 0\n while class_number != 1: #エルゴード性を持つか確認\n p = np.random.rand(self.N, self.N)\n for i, val in enumerate(np.sum(p, axis=1)): #正規化 axis=1で行和\n p[i] /= val\n for i in range(self.N):#推移確率のマージ\n for j in range(self.N):\n pr[r*self.N+i,r*self.N+j] = p[i,j]\n equivalence, class_number = self.getEquivalence(0, 5, p)#0は閾値、5はステップ数\n if class_number == 1: #クラス数が1(エルゴード性を持つ)\n break\n alpha_r = self.getCloseTraffic(p)\n for i, val in enumerate(alpha_r): #到着率を配列alphaに格納\n alpha[r,i] = val\n #print('r = {0}, i = {1}, val = {2}'.format(r,i,val))\n return alpha, pr\n \n def getCloseTraffic(self, p):\n e = np.eye(len(p)-1) #次元を1つ小さくする\n pe = p[1:len(p), 1:len(p)].T - e #行と列を指定して次元を小さくする\n lmd = p[0, 1:len(p)] #0行1列からの値を右辺に用いる\n slv = solve(pe, lmd * (-1))\n alpha = np.insert(slv, 0, 1.0) #α1=1を追加\n return alpha\n \n #同値類を求める関数\n def getEquivalence(self, th, roop, p):\n list_number = 0 #空のリストを最初から使用する\n\n #1. 空のリストを作成して、ノードを追加しておく(作成するのはノード数分)\n equivalence = [[] for i in range(len(p))] \n \n #2. Comunicationか判定して、Commnicateの場合リストに登録\n for ix in range(roop):\n p = np.linalg.matrix_power(p.copy(), ix+1) #累乗\n for i in range(len(p)):\n for j in range(i+1, len(p)):\n if(p[i][j] > th and p[j][i] > th): #Communicateの場合\n #3. Communicateの場合登録するリストを選択\n find = 0 #既存リストにあるか\n for k in range(len(p)):\n if i in equivalence[k]: #既存のk番目リストに発見(iで検索)\n find = 1 #既存リストにあった\n if j not in equivalence[k]: #jがリストにない場合登録\n equivalence[k].append(j) \n break\n if j in equivalence[k]: #既存のk番目リストに発見(jで検索)\n find = 1 #既存リストにあった\n if i not in equivalence[k]:\n equivalence[k].append(i) \n break\n if(find == 0):#既存リストにない\n equivalence[list_number].append(i)\n if(i != j):\n equivalence[list_number].append(j)\n list_number += 1\n\n #4. Communicateにならないノードを登録\n for i in range(len(p)):\n find = 0\n for j in range(len(p)):\n if i in equivalence[j]:\n find = 1\n break\n if find == 0:\n equivalence[list_number].append(i)\n list_number += 1\n\n #5. エルゴード性の確認(class数が1のとき)\n class_number = 0\n for i in range(len(p)):\n if len(equivalence[i]) > 0:\n class_number += 1\n\n return equivalence, class_number\n \n #データの保存\n def saveCSVi(self, df, fname):\n pdf = pd.DataFrame(df) #データフレームをpandasに変換\n pdf.to_csv(fname, index=True) #index=Falseでインデックスを出力しない\n \nif __name__ == '__main__':\n \n N = 4 #与える\n R = 2 #与える\n K_total = 5 #与える\n K = [(K_total + i) // R for i in range(R)] #クラス人数を自動的に配分する\n mu = np.full((N, R), 1) #サービス率を同じ値で生成(サービス率は調整が必要)\n type_list = np.full(N, 2) #サービスタイプはPSとする\n #K1 = 1\n #K2 = 2\n #K = [K1, K2]\n #mu = np.array([[1/1, 1/2],[1/4, 1/5],[1/8, 1/10],[1/12, 1/16]])\n #type_list = [2, 4, 4, 3] #Node1:Type2プロセッサシェアリング(Processor Sharing: PS), Node2:Type4後着順割込継続型(LCFS-PR), Node3:Type4後着順割込継続型(LCFS-PR), Node4:Type3無限サーバ(Infinite Server: IS), その他Type1(FCFS) \n #alpha = np.array([[1, 1],[0.4, 0.4],[0.4, 0.3],[0.2, 0.3]])\n \n #bcmp = BCMP_lib(N, R, K, mu, type_list, alpha)\n start = time.time()\n bcmp = BCMP_lib(N, R, K, mu, type_list) #この条件で推移確率を自動生成して、到着率をコンストラクタで求める\n mp_set, exp, tp, rho = bcmp.getBCMP()\n elapsed_time = time.time() - start\n print (\"calclation_time:{0}\".format(elapsed_time) + \"[sec]\")\n \n print('周辺分布')\n print(mp_set)\n print('平均系内人数')\n print(exp)\n print('スループット')\n print(tp)\n print('利用率')\n print(rho)","sub_path":"BCMP_lib.py","file_name":"BCMP_lib.py","file_ext":"py","file_size_in_byte":11922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"597713359","text":"\"\"\"Representations Extractor for ``transformers`` toolkit models.\n\nScript that given a file with input sentences and a ``transformers``\nmodel, extracts representations from all layers of the model. The script\nsupports aggregation over sub-words created due to the tokenization of\nthe provided model.\n\nAuthor: Fahim Dalvi\nLast Modified: 2 March, 2020\nLast Modified: 9 September, 2020\nLast Modified: 15 September, 2020\nLast Modified: 1 February, 2020\n\"\"\"\n\nimport argparse\nimport collections\nimport json\nimport sys\n\nimport numpy as np\nimport torch\nimport h5py\n\nfrom tqdm import tqdm\nfrom transformers import AutoTokenizer, AutoModel\n\n\ndef get_model_and_tokenizer(model_desc, device=\"cpu\", random_weights=False):\n \"\"\"\n Automatically get the appropriate ``transformers`` model and tokenizer based\n on the model description\n\n Parameters\n ----------\n model_desc : str\n Model description; can either be a model name like ``bert-base-uncased``\n or a path to a trained model\n\n device : str, optional\n Device to load the model on, cpu or gpu. Default is cpu.\n\n random_weights : bool, optional\n Whether the weights of the model should be randomized. Useful for analyses\n where one needs an untrained model.\n\n Returns\n -------\n model : transformers model\n An instance of one of the transformers.modeling classes\n tokenizer : transformers tokenizer\n An instance of one of the transformers.tokenization classes\n \"\"\"\n model = AutoModel.from_pretrained(model_desc, output_hidden_states=True).to(device)\n tokenizer = AutoTokenizer.from_pretrained(model_desc)\n\n if random_weights:\n print(\"Randomizing weights\")\n model.init_weights()\n\n return model, tokenizer\n\n\ndef aggregate_repr(state, start, end, aggregation):\n \"\"\"\n Function that aggregates activations/embeddings over a span of subword tokens.\n This function will usually be called once per word. For example, if we had the sentence::\n\n This is an example\n\n which is tokenized by BPE into::\n\n this is an ex @@am @@ple\n\n The function should be called 4 times::\n\n aggregate_repr(state, 0, 0, aggregation)\n aggregate_repr(state, 1, 1, aggregation)\n aggregate_repr(state, 2, 2, aggregation)\n aggregate_repr(state, 3, 5, aggregation)\n\n Returns a zero vector if end is less than start, i.e. the request is to\n aggregate over an empty slice.\n\n Parameters\n ----------\n state : numpy.ndarray\n Matrix of size [ NUM_LAYERS x NUM_SUBWORD_TOKENS_IN_SENT x LAYER_DIM]\n start : int\n Index of the first subword of the word being processed\n end : int\n Index of the last subword of the word being processed\n aggregation : {'first', 'last', 'average'}\n Aggregation method for combining subword activations\n\n Returns\n -------\n word_vector : numpy.ndarray\n Matrix of size [NUM_LAYERS x LAYER_DIM]\n \"\"\"\n if end < start:\n sys.stderr.write(\"WARNING: An empty slice of tokens was encountered. \" +\n \"This probably implies a special unicode character or text \" +\n \"encoding issue in your original data that was dropped by the \" +\n \"transformer model's tokenizer.\\n\")\n return np.zeros((state.shape[0], state.shape[2]))\n if aggregation == \"first\":\n return state[:, start, :]\n elif aggregation == \"last\":\n return state[:, end, :]\n elif aggregation == \"average\":\n return np.average(state[:, start : end + 1, :], axis=1)\n\n\ndef extract_sentence_representations(\n sentence,\n model,\n tokenizer,\n device=\"cpu\",\n include_embeddings=True,\n aggregation=\"last\",\n tokenization_counts={}\n):\n \"\"\"\n Get representations for one sentence\n \"\"\"\n # this follows the HuggingFace API for transformers\n\n special_tokens = [\n x for x in tokenizer.all_special_tokens if x != tokenizer.unk_token\n ]\n special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)\n\n original_tokens = sentence.split(\" \")\n # Add a letter and space before each word since some tokenizers are space sensitive\n tmp_tokens = [\n \"a\" + \" \" + x if x_idx != 0 else x for x_idx, x in enumerate(original_tokens)\n ]\n assert len(original_tokens) == len(tmp_tokens)\n\n with torch.no_grad():\n # Get tokenization counts if not already available\n for token_idx, token in enumerate(tmp_tokens):\n tok_ids = [\n x for x in tokenizer.encode(token) if x not in special_tokens_ids\n ]\n if token_idx != 0:\n # Ignore the first token (added letter)\n tok_ids = tok_ids[1:]\n\n if token in tokenization_counts:\n assert tokenization_counts[token] == len(\n tok_ids\n ), \"Got different tokenization for already processed word\"\n else:\n tokenization_counts[token] = len(tok_ids)\n ids = tokenizer.encode(sentence, truncation=True)\n input_ids = torch.tensor([ids]).to(device)\n # Hugging Face format: tuple of torch.FloatTensor of shape (batch_size, sequence_length, hidden_size)\n # Tuple has 13 elements for base model: embedding outputs + hidden states at each layer\n all_hidden_states = model(input_ids)[-1]\n\n if include_embeddings:\n all_hidden_states = [\n hidden_states[0].cpu().numpy() for hidden_states in all_hidden_states\n ]\n else:\n all_hidden_states = [\n hidden_states[0].cpu().numpy()\n for hidden_states in all_hidden_states[1:]\n ]\n all_hidden_states = np.array(all_hidden_states)\n\n print('Sentence : \"%s\"' % (sentence))\n print(\"Original (%03d): %s\" % (len(original_tokens), original_tokens))\n print(\n \"Tokenized (%03d): %s\"\n % (\n len(tokenizer.convert_ids_to_tokens(ids)),\n tokenizer.convert_ids_to_tokens(ids),\n )\n )\n\n # Remove special tokens\n ids_without_special_tokens = [x for x in ids if x not in special_tokens_ids]\n idx_without_special_tokens = [\n t_i for t_i, x in enumerate(ids) if x not in special_tokens_ids\n ]\n filtered_ids = [ids[t_i] for t_i in idx_without_special_tokens]\n assert all_hidden_states.shape[1] == len(ids)\n all_hidden_states = all_hidden_states[:, idx_without_special_tokens, :]\n assert all_hidden_states.shape[1] == len(filtered_ids)\n print(\n \"Filtered (%03d): %s\"\n % (\n len(tokenizer.convert_ids_to_tokens(filtered_ids)),\n tokenizer.convert_ids_to_tokens(filtered_ids),\n )\n )\n segmented_tokens = tokenizer.convert_ids_to_tokens(filtered_ids)\n\n # Perform actual subword aggregation/detokenization\n counter = 0\n detokenized = []\n final_hidden_states = np.zeros(\n (all_hidden_states.shape[0], len(original_tokens), all_hidden_states.shape[2])\n )\n inputs_truncated = False\n\n for token_idx, token in enumerate(tmp_tokens):\n current_word_start_idx = counter\n current_word_end_idx = counter + tokenization_counts[token]\n\n # Check for truncated hidden states in the case where the\n # original word was actually tokenized\n if (tokenization_counts[token] != 0 and current_word_start_idx >= all_hidden_states.shape[1]) \\\n or current_word_end_idx > all_hidden_states.shape[1]:\n final_hidden_states = final_hidden_states[:, :len(detokenized), :]\n inputs_truncated = True\n break\n\n final_hidden_states[:, len(detokenized), :] = aggregate_repr(\n all_hidden_states,\n current_word_start_idx,\n current_word_end_idx - 1,\n aggregation,\n )\n detokenized.append(\n \"\".join(segmented_tokens[current_word_start_idx:current_word_end_idx])\n )\n counter += tokenization_counts[token]\n\n print(\"Detokenized (%03d): %s\" % (len(detokenized), detokenized))\n print(\"Counter: %d\" % (counter))\n\n if inputs_truncated:\n print(\"WARNING: Input truncated because of length, skipping check\")\n else:\n assert counter == len(ids_without_special_tokens)\n assert len(detokenized) == len(original_tokens)\n print(\"===================================================================\")\n\n return final_hidden_states, detokenized\n\n\ndef extract_representations(\n model_desc,\n input_corpus,\n output_file,\n device=\"cpu\",\n aggregation=\"last\",\n output_type=\"json\",\n random_weights=False,\n ignore_embeddings=False,\n):\n print(f\"Loading model: {model_desc}\")\n model, tokenizer = get_model_and_tokenizer(\n model_desc, device=device, random_weights=random_weights\n )\n\n print(\"Reading input corpus\")\n\n def corpus_generator(input_corpus_path):\n with open(input_corpus_path, \"r\") as fp:\n for line in fp:\n yield line.strip()\n return\n\n print(\"Preparing output file\")\n if output_type == \"hdf5\":\n if not output_file.endswith(\".hdf5\"):\n print(\n \"[WARNING] Output filename (%s) does not end with .hdf5, but output file type is hdf5.\"\n % (output_file)\n )\n output_file = h5py.File(output_file, \"w\")\n sentence_to_index = {}\n elif output_type == \"json\":\n if not output_file.endswith(\".json\"):\n print(\n \"[WARNING] Output filename (%s) does not end with .json, but output file type is json.\"\n % (output_file)\n )\n output_file = open(output_file, \"w\", encoding=\"utf-8\")\n\n print(\"Extracting representations from model\")\n tokenization_counts = {} # Cache for tokenizer rules\n for sentence_idx, sentence in enumerate(corpus_generator(input_corpus)):\n hidden_states, extracted_words = extract_sentence_representations(\n sentence,\n model,\n tokenizer,\n device=device,\n include_embeddings=(not ignore_embeddings),\n aggregation=aggregation,\n tokenization_counts=tokenization_counts\n )\n\n print(\"Hidden states: \", hidden_states.shape)\n print(\"# Extracted words: \", len(extracted_words))\n\n if output_type == \"hdf5\":\n output_file.create_dataset(\n str(sentence_idx),\n hidden_states.shape,\n dtype=\"float32\",\n data=hidden_states,\n )\n # TODO: Replace with better implementation with list of indices\n final_sentence = sentence\n counter = 1\n while final_sentence in sentence_to_index:\n counter += 1\n final_sentence = f\"{sentence} (Occurrence {counter})\"\n sentence = final_sentence\n sentence_to_index[sentence] = str(sentence_idx)\n elif output_type == \"json\":\n output_json = collections.OrderedDict()\n output_json[\"linex_index\"] = sentence_idx\n all_out_features = []\n\n for word_idx, extracted_word in enumerate(extracted_words):\n all_layers = []\n for layer_idx in range(hidden_states.shape[0]):\n layers = collections.OrderedDict()\n layers[\"index\"] = layer_idx\n layers[\"values\"] = [\n round(x.item(), 8)\n for x in hidden_states[layer_idx, word_idx, :]\n ]\n all_layers.append(layers)\n out_features = collections.OrderedDict()\n out_features[\"token\"] = extracted_word\n out_features[\"layers\"] = all_layers\n all_out_features.append(out_features)\n output_json[\"features\"] = all_out_features\n output_file.write(json.dumps(output_json) + \"\\n\")\n\n if output_type == \"hdf5\":\n sentence_index_dataset = output_file.create_dataset(\n \"sentence_to_index\", (1,), dtype=h5py.special_dtype(vlen=str)\n )\n sentence_index_dataset[0] = json.dumps(sentence_to_index)\n\n output_file.close()\n\n\nHDF5_SPECIAL_TOKENS = {\".\": \"__DOT__\", \"/\": \"__SLASH__\"}\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"model_desc\", help=\"Name of model\")\n parser.add_argument(\n \"input_corpus\", help=\"Text file path with one sentence per line\"\n )\n parser.add_argument(\n \"output_file\",\n help=\"Output file path where extracted representations will be stored\",\n )\n parser.add_argument(\n \"--aggregation\",\n help=\"first, last or average aggregation for word representation in the case of subword segmentation\",\n default=\"last\",\n )\n parser.add_argument(\n \"--output-type\",\n choices=[\"hdf5\", \"json\"],\n default=\"json\",\n help=\"Output format of the extracted representations\",\n )\n parser.add_argument(\"--disable_cuda\", action=\"store_true\")\n parser.add_argument(\"--ignore_embeddings\", action=\"store_true\")\n parser.add_argument(\n \"--random_weights\",\n action=\"store_true\",\n help=\"generate representations from randomly initialized model\",\n )\n args = parser.parse_args()\n\n assert args.aggregation in [\n \"average\",\n \"first\",\n \"last\",\n ], \"Invalid aggregation option, please specify first, average or last.\"\n\n if not args.disable_cuda and torch.cuda.is_available():\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n\n extract_representations(\n args.model_desc,\n args.input_corpus,\n args.output_file,\n device=device,\n aggregation=args.aggregation,\n output_type=args.output_type,\n random_weights=args.random_weights,\n ignore_embeddings=args.ignore_embeddings,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"neurox/data/extraction/transformers_extractor.py","file_name":"transformers_extractor.py","file_ext":"py","file_size_in_byte":13974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"151305237","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport pdb\nfrom openerp import api, fields, models\n\n_logger = logging.getLogger(__name__)\n\n\nclass ContratosVentas(models.Model):\n _inherit = 'account.analytic.account'\n\n tipo = fields.Selection((('client', 'Cliente'), ('provider', 'Proveedor')), 'Tipo', default='client')\n\n @api.model\n def _prepare_invoice_data(self, contract):\n invoice_data = super(ContratosVentas, self)._prepare_invoice_data(contract)\n _logger.info('Invoice Data: {}'.format(invoice_data))\n if contract.tipo == 'provider':\n account_id = self.env['res.partner'].browse(invoice_data['partner_id']).property_account_payable.id\n invoice_data.update({'type': 'in_invoice', 'account_id': account_id})\n _logger.info('Invoice Data despues de actulizar el tipo: {}'.format(invoice_data))\n\n return invoice_data\n\n\n @api.v7\n def _prepare_invoice_lines(self, cr, uid, contract, fiscal_position_id, context=None):\n fpos_obj = self.pool.get('account.fiscal.position')\n fiscal_position = None\n if fiscal_position_id:\n fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)\n invoice_lines = []\n for line in contract.recurring_invoice_line_ids:\n\n if contract.tipo == 'provider':\n res = line.product_id\n account_id = res.property_account_expense.id\n if not account_id:\n account_id = res.categ_id.property_account_expense_categ.id\n else:\n res = line.product_id\n account_id = res.property_account_income.id\n if not account_id:\n account_id = res.categ_id.property_account_income_categ.id\n\n account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)\n\n taxes = res.taxes_id or False\n tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes)\n\n invoice_lines.append((0, 0, {\n 'name': line.name,\n 'account_id': account_id,\n 'account_analytic_id': contract.id,\n 'price_unit': line.price_unit or 0.0,\n 'quantity': line.quantity,\n 'uos_id': line.uom_id.id or False,\n 'product_id': line.product_id.id or False,\n 'invoice_line_tax_id': [(6, 0, tax_id)],\n }))\n return invoice_lines","sub_path":"addons-obs/contrato_ventas_ext/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"93278251","text":"from htk.utils import htk_setting\n\ndef help():\n event_handlers = htk_setting('HTK_SLACK_EVENT_HANDLERS')\n commands = ['`%s`' % command for command in sorted(event_handlers.keys())]\n usage_dict = {\n 'description': 'Displays available commands. Available commands are: %s' % ', '.join(commands),\n 'basic' : 'htk: command args',\n 'examples' : [\n 'htk: help help',\n ]\n }\n return usage_dict\n\ndef default():\n usage_dict = {\n 'description' : 'This is not a very useful command; it simply parrots back what you said (to test whether the Slack bot is functioning)',\n 'basic' : 'htk: default',\n 'examples' : [],\n }\n return usage_dict\n\ndef bible():\n usage_dict = {\n 'description' : 'Look up a Bible passage',\n 'basic' : 'htk: bible [esv|nasb] passage',\n 'examples' : [\n 'htk: bible esv John 3:16',\n 'htk: bible nasb 1 Cor 13:4-7',\n 'htk: bible Lamentations 3:22-23',\n 'htk: bible Psalm 119:11',\n ],\n }\n return usage_dict\n\ndef stock():\n usage_dict = {\n 'description' : 'Look up most recent stock quotes',\n 'basic': 'htk: stock SYMBOL[( |;|,)SYMBOLS]',\n 'examples' : [\n 'htk: stock AAPL AMZN GOOG LNKD YHOO',\n ],\n }\n return usage_dict\n\ndef weather():\n usage_dict = {\n 'description' : 'Look up weather',\n 'basic' : 'htk: weather LOCATION',\n 'examples' : [\n 'htk: weather 90210',\n 'htk: weather San Francisco, CA',\n 'htk: weather 1600 Pennsylvania Ave NW, Washington, DC 20500',\n ],\n }\n return usage_dict\n","sub_path":"lib/slack/event_handler_usages.py","file_name":"event_handler_usages.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"30323547","text":"#!/usr/bin/env python3\n\nname = 'AJ32'\ndf = None\ncoding = {'all_the_time': [1,2], 'not_at_all': [4,5], 'mid': [3]}\n\npartition = {'high': [['all_the_time', 'not_at_all', 'mid'], lambda x: high(x)],\n 'low': [['all_the_time', 'not_at_all', 'mid'], lambda x: low(x)]}\n\n\ndef high(*x):\n a, b, c = _organize(x)\n return a > b and a > c\n\ndef low(*x):\n a, b, c = _organize(x)\n return b > a and b > c\n\ndef _rename(col):\n return name + '_' + col\n\ndef _organize(row):\n x = row[0]\n a = x[_rename('all_the_time')]\n b = x[_rename('not_at_all')]\n c = x[_rename('mid')]\n return a, b, c\n\n# high: all_the_time > not_at_all AND all_the_time > mid\n# low: not_at_all > all_the_time AND not_at_all > mid\npartition_tract_nos = None\n","sub_path":"diseases/AJ32.py","file_name":"AJ32.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"214678423","text":"\"\"\"\nGiven two strings, determine if one is an anagram of the other.\nTwo words are anagrams of each other if they are made of the \nsame letters in a different order.\n\"\"\"\nimport re\nfrom collections import Counter\n\ndef is_word(str):\n\t\"\"\"\n\tDetermines whether a string can be considered a word.\n\tA word contains only uppercase and lowercase letters/\n\n \tArgs:\n \tstr: a string.\n\n \tReturns:\n \tTrue if str is a word, False otherwise.\n \t\"\"\"\n\tif not str:\n\t\t# string is empty/None\n\t\treturn False\n\tstr = str.strip()\n\tword_pattern = re.compile(\"^[a-zA-Z]+$\")\n\tif word_pattern.match(str) is None:\n\t\t# string contains characters other than letters\n\t\treturn False\n\treturn True\n\t\ndef are_anagrams(str1, str2):\n\t\"\"\"\n\tDetermines whether one word is an anagram of the other.\n\n \tArgs:\n \tstr1: a string, the first word.\n \tstr2: a string, the second word.\n\n \tReturns:\n \tTrue if str1 and str2 are anagrams, False otherwise.\n \t\"\"\"\n\n\tif (not is_word(str1)) or (not is_word(str2)):\n\t\t# one or both arguments are not words\n\t\treturn False\n\n\t# remove leading and trailing whitespace characters\n\tstr1 = str1.strip()\n\tstr2 = str2.strip()\n\n\n\tif len(str1) != len(str2):\n\t\t# if the strings are of different lengths they cannot be anagrams\n\t\treturn False\n\n\t\"\"\"\n\t# slower\n\tif sorted(str1) == sorted(str2):\n\t\t# if the strings are equal once they have been sorted then they are anagrams\n\t\treturn True\n\telse:\n\t\treturn False\n\t\"\"\"\n\treturn Counter(str1) == Counter(str2)","sub_path":"MilenaFilipovic/assignment-1/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"127723135","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 5 07:32:50 2019\r\n\r\n@author: Mahnoor\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg1 = cv2.imread('final.jpeg')\r\nimg = cv2.resize(img1, (960, 540)) \r\n\r\nimgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nret, thresh = cv2.threshold(imgray, 127, 255, 0)\r\ncontours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\r\nprint(\"Number of Contours = \" + str(len(contours)) )\r\n\r\nareas = []\r\nfor c in contours:\r\n areas.append(cv2.contourArea(c))\r\nprint(\"Area of contour(s): \",areas)\r\n \r\nmax = max(areas[0],areas[1]) \r\nsecondmax = min(areas[0],areas[1]) \r\n \r\nfor i in range(2,len(areas)): \r\n if areas[i]>max: \r\n secondmax=max\r\n max=areas[i] \r\n else: \r\n if areas[i]>secondmax: \r\n secondmax=areas[i]\r\n int(secondmax)\r\n \r\nprint(\"Second highest number is : \",str(secondmax)) \r\nindex = areas.index(secondmax)\r\nprint(index)\r\nprint(type(secondmax))\r\n\r\n\r\ncv2.drawContours(img, contours, index, (0, 0, 255), 1)\r\n\r\ncv2.imshow('Image', img)\r\ncv2.imshow('Image Gray', imgray)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"118896745","text":"import json\n# import time\nimport pandas as pd\nimport geopandas as gpd\nfrom django.contrib.gis.db import models\nfrom django.db.models import JSONField\nfrom django.core import serializers\nfrom django.utils import timezone\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer \n\nclass PandasModelMixin(models.Model):\n class Meta:\n abstract = True\n\n @classmethod\n def as_dataframe(cls, queryset=None, field_list=None):\n # t1 = time.time()\n\n if queryset is None:\n queryset = cls.objects.all()\n if field_list is None:\n field_list = [_field.name for _field in cls._meta._get_fields(reverse=False)]\n\n data = []\n [data.append([obj.serializable_value(column) for column in field_list]) for obj in queryset]\n\n columns = field_list\n\n df = pd.DataFrame(data, columns=columns)\n # print(\"Execution time without serialization: %s\" % time.time()-t1)\n return df\n\n @classmethod\n def as_dataframe_using_django_serializer(cls, queryset=None):\n # t1 = time.time()\n\n if queryset is None:\n queryset = cls.objects.all()\n\n if queryset.exists():\n serialized_models = serializers.serialize(format='python', queryset=queryset)\n serialized_objects = [s['fields'] for s in serialized_models]\n data = [x.values() for x in serialized_objects]\n\n columns = serialized_objects[0].keys()\n\n df = pd.DataFrame(data, columns=columns)\n df = pd.DataFrame()\n # print(\"Execution time using Django serializer: %s\" % time.time()-t1)\n return df\n\n @classmethod\n def as_dataframe_using_drf_serializer(cls, queryset=None, drf_serializer=None, field_list=None, as_gdf=False):\n from rest_framework import serializers\n\n\n if queryset is None:\n queryset = cls.objects.all()\n\n if drf_serializer is None:\n class CustomModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = cls\n fields = field_list or '__all__'\n\n drf_serializer = CustomModelSerializer\n\n serialized_objects = drf_serializer(queryset, many=True).data\n data = [x.values() for x in serialized_objects]\n\n columns = drf_serializer().get_fields().keys()\n\n df = pd.DataFrame(data, columns=columns)\n if as_gdf:\n df = gpd.GeoDataFrame(df)\n\n return df\n\n @classmethod\n def as_geojson_using_drfg_serializer(cls, queryset=None, drfg_serializer=None, field_list=None, geo_field=\"geom\"): #, id_field=None):\n from rest_framework import serializers\n\n if queryset is None:\n queryset = cls.objects.all()\n\n if drfg_serializer is None:\n class CustomModelSerializer(GeoFeatureModelSerializer):\n class Meta:\n model = cls\n fields = field_list or '__all__'\n geo_field = geo_field\n\n drfg_serializer = CustomModelSerializer\n\n table_as_geojson_odicts = drfg_serializer(queryset, many=True).data\n \n # if id_field:\n # features_with_id = []\n # for f in table_as_geojson_odicts['features']:\n # f['id'] = f['properties'][id_field]\n # features_with_id.append(f)\n # table_as_geojson_odicts['features'] = features_with_id\n\n return json.loads(json.dumps(table_as_geojson_odicts))\n\n @classmethod\n def as_dataframe_from_raw_query(cls, sql):\n r = cls.objects.raw(sql)\n cols = r.columns\n # convert the response to a list of dictionaries\n data = [{k: t.__dict__[k] for k in t.__dict__.keys() & set(cols)} for t in r]\n # return as a dataframe\n return pd.DataFrame(data, columns=cols)\n\n # @classmethod\n # def as geodataframe_using_drf_serializer\n\nclass TimestampedMixin(PandasModelMixin):\n \"\"\"Abstract class, provides auto-populating \"created\" and \"modified\" \n fields to any table model that inherits it.\n \"\"\"\n created = models.DateTimeField(editable=False, default=timezone.now)\n modified = models.DateTimeField(default=timezone.now)\n version = models.CharField(null=True, blank=True, max_length=255)\n\n def save(self, *args, **kwargs):\n ''' On save, update timestamps '''\n if not self.id:\n self.created = timezone.now()\n self.modified = timezone.now()\n return super().save(*args, **kwargs)\n\n class Meta:\n abstract = True\n\n\nclass MetricMixin(PandasModelMixin):\n\n #code = models.SlugField(max_length=255)\n code = models.CharField(max_length=255, null=True, blank=True)\n label = models.CharField(max_length=1000, null=True, blank=True)\n icon = models.CharField(max_length=1000, null=True, blank=True)\n color = models.CharField(max_length=255, null=True, blank=True)\n definition = models.TextField(null=True, blank=True)\n source = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return self.label\n \n class Meta:\n abstract = True\n ordering = ['code']\n\n\nclass DataPointMixin(MetricMixin):\n\n def data_default():\n return {}\n \n data = JSONField(default=data_default)\n # expected top-level keys:\n # [\n # metadata {dict},\n # properties {dict},\n # metrics {dict},\n # sum {float},\n # mean {float},\n # pct {float},\n # value {float}\n # ]\n\n class Meta:\n abstract = True\n ","sub_path":"server/api/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"238213126","text":"##encoding=UTF8\n\n\"\"\"\n[EN]create a invert index dictionary from a regular index dictionary\n\n[CN]将一个正向索引的字典转化成反向索引的字典\n 正向索引字典的例子:\n 物品1 : {索引1, 索引2, ...}\n 物品2 : {索引1, 索引2, ...}\n \n 反向索引字典的例子:\n 索引1 : {物品1, 物品2, ...}\n 索引2 : {物品1, 物品2, ...}\n \nimport:\n from angora.DATA.invertindex import invertindex\n\"\"\"\n\nfrom __future__ import print_function\nfrom six import iteritems\n\ndef invertindex(pos_index):\n \"\"\"\n [Args]\n ------\n pos_index: normal index dictionary\n key: value = item_id: set{[index1, index2, ..., ]}\n \n [Returns]\n ---------\n inv_index:\n key: value = index: set{[item_id1, item_id2, ...,]}\n \"\"\"\n invert_index = dict()\n for item_id, indices in iteritems(pos_index):\n for index in indices:\n if index not in invert_index:\n invert_index[index] = set({item_id})\n else:\n invert_index[index].add(item_id)\n return invert_index\n\nif __name__ == \"__main__\":\n def test_inv_index():\n print(\"{:=^40}\".format(\"test_inv_index\"))\n pos_index = {\"let it go\": {\"mp3\", \"pop\", \"dance\"},\n \"can you feel the love tonight\": {\"acc\", \"pop\", \"movie\"},\n \"Just dance\": {\"pop\", \"dance\", \"club\"}}\n print(invertindex(pos_index))\n \n test_inv_index()","sub_path":"angora/DATA/invertindex.py","file_name":"invertindex.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"399287761","text":"import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import layers\nfrom keras import models\nfrom keras import optimizers\n\nimport matplotlib.pyplot as plt\n\nIMAGE_DIR = \"GemAngle\"\nSHAPE = \"Round\"\nTRAIN_DIR = \"Train\"\nVALIDATE_DIR = \"Validate\"\nTEST_DIR = \"Test\"\nPOSSIBLE_ANGLES = [\"0\", \"5\", \"10\", \"15\", \"20\", \"25\", \"30\", \"35\", \"40\"] # Octagon so 0 -> 45\n\nIMAGE_DIM_X = 254\nIMAGE_DIM_Y = 254\n\n\nif __name__ == '__main__':\n\n base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), IMAGE_DIR + \"\\\\\" + SHAPE)\n \n train_dir = os.path.join(base_dir, TRAIN_DIR)\n print(\"Training images files in {0}\".format(dir))\n\n\n val_dir = os.path.join(base_dir, VALIDATE_DIR)\n print(\"Validation images files in {0}\".format(dir))\n\n test_dir = os.path.join(base_dir, TEST_DIR)\n print(\"Test images files in {0}\".format(dir))\n\n train_datagen = ImageDataGenerator(rescale=1./255)\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size=(IMAGE_DIM_X, IMAGE_DIM_Y),\n batch_size=20,\n class_mode='categorical')\n \n validation_generator = test_datagen.flow_from_directory(\n val_dir,\n target_size=(IMAGE_DIM_X, IMAGE_DIM_Y),\n batch_size=20,\n class_mode='categorical')\n \n for data_batch, labels_batch in train_generator:\n print(\"Data batch shape:\", data_batch.shape)\n print(\"Labels batch shape:\", labels_batch.shape)\n break\n\n #Build convnet\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape = (IMAGE_DIM_X, IMAGE_DIM_Y, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3,3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3,3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3,3), activation='relu'))\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(len(POSSIBLE_ANGLES), activation='softmax'))\n\n print(model.summary())\n\n model.compile(loss='mean_squared_error', #mean_squared_error #categorical_crossentropy\n optimizer=optimizers.RMSprop(lr=2e-5),\n metrics=['acc']) \n\n print(\"Classes \" + str(train_generator.class_indices))\n time.sleep(10)\n history = model.fit_generator(\n train_generator,\n steps_per_epoch = 100,\n epochs = 150,\n validation_data = validation_generator,\n validation_steps = 50) \n\n \n model_json = model.to_json()\n with open(\"round_angle_model.json\", \"w\") as json_file:\n json_file.write(model_json) \n model.save_weights(\"cnn_round_angles.h5\")\n print(\"Saved model to disk\")\n \n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(acc) + 1)\n plt.plot(epochs, acc, 'bo', label = \"Train acc\")\n plt.plot(epochs, val_acc, 'b', label = \"Validation acc\")\n plt.title(\"Training and validation acc\")\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label = \"Train loss\")\n plt.plot(epochs, val_loss, 'b', label = \"Validation loss\")\n plt.title(\"Training and validation loss\")\n plt.legend()\n\n plt.show()\n\n","sub_path":"gem_angle_train_round.py","file_name":"gem_angle_train_round.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"614723758","text":"import json\n\nfrom flask_cors import cross_origin\nfrom flask_socketio import emit, SocketIO\n\nfrom Utilities.Tocken import verify\n\nmySocket = SocketIO(cors_allowed_origins=\"*\")\n\n@mySocket.on('connect')\ndef on_connect():\n print('socket connected')\n emit('test', 'AAAAA')\n\n@mySocket.on('message')\ndef handle_message(data):\n authDetails = json.loads(data)\n\n if authDetails[\"type\"] != \"authorization\":\n print('Nu e bine!!')\n else:\n verify(authDetails['payload']['token'])","sub_path":"api/v1/sockets.py","file_name":"sockets.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"621427951","text":"from flask import session\nfrom apps.article.models import Article_type\nfrom apps.user.models import User\n\ndef user_type():\n # 获取文章分类\n types = Article_type.query.all()\n # 登录用户\n user = None\n user_id = session.get('uid', None)\n if user_id:\n user = User.query.get(user_id)\n return user, types\n\n","sub_path":"apps/user/util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"33135813","text":"import smtplib, json, os, sys\n\n#credentials\nimport sib\n\ndefault = {\n\t'sender': 'My SMTP mailer ',\n\t'recipient': '' # email address to receive message when no recipient is specified\n}\n\ndef sendMessage(param):\n\ttry:\n\t\tsubject = param['s']\n\texcept:\n\t\treturn {'status': False, 'description': 'missing subject [s]'}\n\ttry:\n\t\tmessage = param['m']\n\texcept:\n\t\treturn {'status': False, 'description': 'missing body [m]'}\n\n\t# create and send email\n\tmailserver = smtplib.SMTP('smtp-relay.sendinblue.com', 587)\n\tmailserver.login(sib.user, sib.pw)\n\n\t# defaults\n\tsender = default['sender']\n\trecipient = default['recipient']\n\tif 'f' in param:\n\t\tsender = param['f']\n\tif 't' in param:\n\t\trecipient = param['t']\n\tif recipient == '':\n\t\treturn {'status': False, 'description': 'no recipient named, set parameter [t]'}\n\n\temail = 'From: %s\\nTo: %s' % (sender, recipient)\n\temail += '\\nSubject: '+ param['s']\n\temail += \"\\n\\n\" + param['m']\n\tmailserver.sendmail(sender, recipient, email)\n\treturn {'status': True, 'description': 'sent message'}\n\n# file colled directly with JSONic string as argument\nif __name__ == '__main__':\n\ttry:\n\t\targ = sys.argv[1]\n\texcept:\n\t\tprint( \"No parameter provided!\\nquitting\" )\n\t\tsys.exit()\n\ttry:\n\t\tparam = json.loads( arg )\n\texcept:\n\t\tprint( \"Valid JSON not found\\nquitting\" )\n\t\tsys.exit()\n\n\tsendMessage( param )\n","sub_path":"mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"9371974","text":"'''https://www.acmicpc.net/problem/1504\n문제:\n - 방향성 없는 그래프 \n - 노드 2 이상 8백 이하\n - 링크 0 이상 20만 이하\n - 임의로 주어진 두 정점은 반드시 통과\n - 한번 이동했던 정점/간선 다시 이동 가능\n - 반드시 최단 경로로 이동\n'''\nimport sys\nimport heapq\nINF = int(1e+9)\nn, e = map(int, input().split())\ngraph = [[]*(n+1) for _ in range(n+1)]\n\nfor _ in range(e):\n n1, n2, w = map(int, input().split())\n graph[n1].append((w, n2))\n graph[n2].append((w, n1))\n\nm1, m2 = map(int, input().split())\n\ncase1 = 0 # 1 -> m1 -> m2 -> N\ncase2 = 0 # 1 -> m2 -> m1 -> N\n\nfor idx, start in enumerate([(0, 1), (0, m1), (0, m2)]):\n distances = [INF] * (n+1)\n prior_q = []\n heapq.heappush(prior_q, start)\n distances[start[1]] = 0\n\n while prior_q:\n cur_dist, cur_loc = heapq.heappop(prior_q)\n if distances[cur_loc] < cur_dist:\n continue\n \n for dist, loc in graph[cur_loc]:\n cumul_dist = cur_dist + dist\n if cumul_dist < distances[loc]:\n distances[loc] = cumul_dist\n heapq.heappush(prior_q, (cumul_dist, loc))\n if idx == 0:\n if case1 < INF:\n case1 += distances[m1]\n if case2 < INF:\n case2 += distances[m2]\n\n elif idx == 1:\n if case1 < INF:\n case1 += distances[m2]\n if case2 < INF:\n case2 += distances[n]\n\n elif idx == 2:\n if case1 < INF:\n case1 += distances[n]\n if case2 < INF:\n case2 += distances[m1]\n\n\nshortest = min(case1, case2)\nif shortest < INF:\n print(shortest)\nelse:\n print(-1)\n\n\n\n\n","sub_path":"shortest-path/1504_특정한최단경로.py","file_name":"1504_특정한최단경로.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"585992547","text":"import scrapy\n\n\nclass IndiangoSpider(scrapy.Spider):\n name = 'IndiaNGO'\n allowed_domains = ['https://www.indiangoslist.com/ngo-address/achukuru-welfare-society-in-itanagar-arunachal-pradesh_AR-2009-0015817']\n start_urls = ['https://www.indiangoslist.com/ngo-address/achukuru-welfare-society-in-itanagar-arunachal-pradesh_AR-2009-0015817']\n\n def parse(self, response):\n ngo_left = response.css(\".ngo_left_head::text\").extract()\n ngo_right = response.css(\".ngo_right_head::text\").extract()\n span = response.xpath(\"//*[@class='ngo_right_head']//text()\").extract()\n print(ngo_right)\n print(span)\n count_1 = 0\n count_2 = 0\n for i in range(len(span)):\n if span[i] == ' ':\n count_1 += 1\n elif span[i] == '\\n':\n count_2 += 1\n for _ in range(count_1):\n span.remove(' ')\n for _ in range(count_2):\n span.remove('\\n')\n print(span)\n span = span[0:len(ngo_left)]\n span[len(span)-4] = span[len(span)-4] + span[len(span)-3]\n span = span[0:len(span)-3] + span[len(span)-2:]\n print(span)\n for item in zip(ngo_left,span):\n\n scraped = {\n 'name' : item[0],\n 'description' : item[1]\n }\n yield scraped\n pass","sub_path":"HarryScrapy/ourfirstscraper/ourfirstscraper/spiders/IndiaNGO.py","file_name":"IndiaNGO.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"572224101","text":"from urllib.error import HTTPError\nimport bs4\nimport urllib.request\nimport datetime\n\ndef podajczas(tabelaminut):\n koniec = False\n for minutaRozkladowa in tabelaminut[hourtable]:\n\n if minutaRozkladowa == -1:\n for godzinyRozkladowe in range(24):\n if koniec:\n break\n for minutyRozkladowe in tabelaminut[godzinyRozkladowe]:\n if minutyRozkladowe != -1:\n czasGodzina = abs((godzinyRozkladowe + 4) % 24)\n print('Najszybszy autobus masz o: ' + str(czasGodzina) + ':' + str(minutyRozkladowe))\n if tabelaminut[godzinyRozkladowe][-1] == tabelaminut[godzinyRozkladowe][0]:\n print('Nastepny o: ' + str(czasGodzina + 1) + ':' + str(\n tabelaminut[godzinyRozkladowe + 1][0]))\n koniec = True\n break\n else:\n indeks = tabelaminut[godzinyRozkladowe].index(minutyRozkladowe)\n print('Nastepny o: ' + str(czasGodzina) + ':' + str(\n tabelaminut[godzinyRozkladowe][indeks + 1]))\n koniec = True\n break\n\n\n\n elif minutaRozkladowa > minuta:\n indeks = (tabelaminut[hourtable].index(minutaRozkladowa))\n print('Najszybszy autobus masz o: ' + str(godzina) + ':' + str(tabelaminut[hourtable][indeks]))\n if tabelaminut[hourtable][-1] == tabelaminut[hourtable][indeks]:\n print('Nastepny o: ' + str(godzina + 1) + ':' + str(tabelaminut[hourtable + 1][0]))\n else:\n print('Nastepny o: ' + str(godzina) + ':' + str(tabelaminut[hourtable][indeks + 1]))\n break\n\n\n\n elif minutaRozkladowa < minuta and minutaRozkladowa == tabelaminut[hourtable][-1]:\n if tabelaminut[hourtable + 1][0] == -1:\n for godzinyRozkladowe in range(24):\n if koniec:\n break\n for minutyRozkladowe in tabelaminut[godzinyRozkladowe]:\n if minutyRozkladowe != -1:\n czasGodzina = abs((godzinyRozkladowe % 24) + 4)\n print('Najszybszy autobus masz o: ' + str(czasGodzina) + ':' + str(minutyRozkladowe))\n if tabelaminut[godzinyRozkladowe][-1] == tabelaminut[godzinyRozkladowe][0]:\n print('Nastepny o: ' + str(czasGodzina + 1) + ':' + str(\n tabelaminut[godzinyRozkladowe + 1][0]))\n koniec = True\n break\n else:\n indeks = tabelaminut[godzinyRozkladowe].index(minutyRozkladowe + 1)\n print('Nastepny o: ' + str(czasGodzina) + ':' + str(\n tabelaminut[godzinyRozkladowe][indeks]))\n koniec = True\n break\n print('Najszybszy autobus masz o: ' + str(godzina + 1) + ':' + str(tabelaminut[hourtable + 1][0]))\n if tabelaminut[hourtable + 1][-1] == tabelaminut[hourtable + 1][0]:\n print('Nastepny o: ' + str(godzina + 2) + ':' + str(tabelaminut[hourtable + 2][0]))\n else:\n print('Nastepny o: ' + str(godzina + 1) + ':' + str(tabelaminut[hourtable + 1][1]))\n break\n\n\nprint('''\n _____ _____ _______ __ __ _______ _____ __ __ ______ \n| __ \\ /\\ | __ \\|__ __|\\ \\ / / |__ __||_ _|| \\/ || ____|\n| |__) |/ \\ | |__) | | | \\ \\_/ / | | | | | \\ / || |__ \n| ___// /\\ \\ | _ / | | \\ / | | | | | |\\/| || __| \n| | / ____ \\ | | \\ \\ | | | | | | _| |_ | | | || |____ \n|_| /_/ \\_\\|_| \\_\\ |_| |_| |_| |_____||_| |_||______|\n ''')\n# łączenie się z mpk\nprint('[*] Lacze z \\'http://www.mpk.poznan.pl/component/transport\\'')\nprint()\nprint('[*] Pobieram liste autobusow i tramwajow')\nprint()\nurl = 'http://www.mpk.poznan.pl/component/transport'\ntry:\n html = urllib.request.urlopen(url)\n page = html.read()\n html.close()\n htmlpage = bs4.BeautifulSoup(page, 'html.parser')\n tabela = htmlpage.find('div', {'id': 'MIMMPK'})\n tabelatramwaj = tabela.find_all('div', {'class': 'box_trams'})\n tramwajedzienne = tabelatramwaj[0].text.splitlines()\n tramwajenocne = tabelatramwaj[1].text.splitlines()\n tramwajedzienne = list(filter(None, tramwajedzienne))\n tramwajenocne = list(filter(None, tramwajenocne))\n\n tabelabusy = tabela.find_all('div', {'class': 'box_buses'})\n busydzienne = tabelabusy[0].text.splitlines()\n busynocne = tabelabusy[1].text.splitlines()\n busydzienne = list(filter(None, busydzienne))\n busynocne = list(filter(None, busynocne))\n\n choice = input('''[?] Pokaz mi wszystkie autobusy/tramwaje (1)\n lub \n Wpisz numer autobusu/tramwaju (2): ''')\n print()\n if choice == '1':\n x = 0\n print('Autobusy Dzienne')\n for autobus in busydzienne:\n print(str(x) + '. ' + autobus)\n x += 1\n x = 0\n print('Autobusy Nocne')\n for autobus in busynocne:\n print(str(x) + '. ' + autobus)\n x += 1\n x = 0\n print('Tramwaje Dzienne')\n for tramwaj in tramwajedzienne:\n print(str(x) + '. ' + tramwaj)\n x += 1\n x = 0\n print('Tramwaje Nocne')\n for tramwaj in tramwajenocne:\n print(str(x) + '. ' + tramwaj)\n x += 1\n input('Wcisnij enter by zakonczyc proram')\n exit(0)\n else:\n Wybranalinia = input('[?] Wpisz numer tramwaju/autobusu: ')\n print()\n\n url = url + '/' + Wybranalinia\n\n print('[*] Lacze z \\'http://www.mpk.poznan.pl/component/transport/' + Wybranalinia + '\\'')\n print()\n\n html = urllib.request.urlopen(url)\n page = html.read()\n html.close()\n htmlpage = bs4.BeautifulSoup(page, 'html.parser')\n tabelalewo = htmlpage.find('div', {'id': 'box_timetable_left'})\n tabelaprawo = htmlpage.find('div', {'id': 'box_timetable_right'})\n\n print('[*] Parsuje przystanki')\n print()\n if tabelaprawo is None:\n print('[!] Jedna linia ' + tabelalewo.h4.text)\n wybor = 1\n else:\n wybor = input('[?] ' + tabelalewo.h4.text + ' (1) lub ' + tabelaprawo.h4.text + ' (2) : ')\n print()\n if wybor == '1' or tabelaprawo is None:\n print('[*] Wybierz przystanek')\n print()\n tabela = tabelalewo.find_all('ul')\n tabela = tabela[1]\n tabela = tabela.find_all('li')\n numer = 0\n for data in tabela:\n print(str(numer) + '. ' + data.text.replace('\\n', ''))\n numer += 1\n print()\n wybor2 = int(input('[?] Numer przystanku: '))\n print()\n link = (tabela[wybor2].a['href'])\n else:\n print('[*] Wybierz przystanek')\n print()\n tabela = tabelaprawo.find_all('ul')\n tabela = tabela[1]\n tabela = tabela.find_all('li')\n numer = 0\n for data in tabela:\n print(str(numer) + '. ' + data.text.replace('\\n', ''))\n numer += 1\n print()\n wybor2 = int(input('[?] Numer przystanku: '))\n print()\n link = (tabela[wybor2].a['href'])\n url = 'http://www.mpk.poznan.pl' + link\n\n print('[*] Lacze z \\'http://www.mpk.poznan.pl' + link + '\\'')\n print()\n html = urllib.request.urlopen(url)\n page = html.read()\n html.close()\n print('[*] Parsuje html-a')\n print()\n page_soup = bs4.BeautifulSoup(page, \"html.parser\")\n\n # znajduje dane\n print('[*] Szukam danych')\n print()\n\n # rozklad jazdy\n tabelka = page_soup.findAll(\"tr\", {\"class\": \"MpkTimetableRow\"})\n\n godziny = []\n minutyRobocze = []\n minutySobotnie = []\n minutyOdswietne = []\n\n for rzad in tabelka:\n\n # biore minuty i godziny do tabelki\n x = rzad.findAll(\"td\", {\"class\": \"MpkMinutes\"})\n y = rzad.findAll(\"td\", {\"class\": \"MpkHours\"})\n\n # laduje godzine, przetwarzam i wrzucam do zmiennej\n godzina = y[0].text\n godzina = godzina.strip()\n godzina = (int(godzina))\n godziny.append(godzina)\n\n # laduje minuty, przetwarzam i wrzucam do zmiennej\n minR = x[0].text # robocze\n minS = x[1].text # sobotnie\n minSw = x[2].text # swiateczne\n\n minR = minR.strip()\n minS = minS.strip()\n minSw = minSw.strip()\n\n minR = minR.replace('N', '')\n minR = minR.replace('G', '')\n minR = minR.replace('p', '')\n minR = minR.replace('P', '')\n minR = minR.replace('F', '')\n\n minS = minS.replace('N', '')\n minS = minS.replace('G', '')\n minS = minS.replace('p', '')\n minS = minS.replace('P', '')\n minS = minS.replace('F', '')\n\n minSw = minSw.replace('N', '')\n minSw = minSw.replace('G', '')\n minSw = minSw.replace('p', '')\n minSw = minSw.replace('P', '')\n minSw = minSw.replace('F', '')\n\n # Jesli tabelka nie jest pusta to ma parsowac\n if not minR == '':\n minR = [int(s) for s in minR.split(' ')]\n\n # Jesli pusta to ma wypelnic -1\n else:\n minR = [-1]\n\n if not minS == '':\n minS = [int(s) for s in minS.split(' ')]\n else:\n minS = [-1]\n\n if not minSw == '':\n minSw = [int(s) for s in minSw.split(' ')]\n else:\n minSw = [-1]\n\n minutyRobocze.append(minR)\n minutySobotnie.append(minS)\n minutyOdswietne.append(minSw)\n print(' [+] Udalo sie pozyskac dane')\n print()\n\n print('====================================================================')\n dzienTygodnia = datetime.date.isoweekday(datetime.date.today())\n dzien = datetime.datetime.today()\n godzina = dzien.hour\n minuta = dzien.minute\n if minuta < 10:\n minutaString = str(0)+str(minuta)\n else:\n minutaString = str(minuta)\n print('Jest ' + str(dzien.hour) + ':' + minutaString + ' ' + str(dzien.date()))\n hourtable = abs((dzien.hour - 4) % 24)\n\n if dzienTygodnia == 6:\n print(\"Dzisiaj jest sobota\")\n podajczas(minutySobotnie)\n\n elif dzienTygodnia == 7:\n print(\"Dzisiaj jest niedziela\")\n podajczas(minutyOdswietne)\n\n else:\n print(\"Dzisiaj jest dzien roboczy\")\n podajczas(minutyRobocze)\n\n print()\n input(\"Wcisnij enter by zakonczyc program\")\n\nexcept HTTPError as e:\n print(' [-]Nie udalo sie polaczyc: ' + str(e))\n exit(1)\nexcept Exception as e:\n print(' [-]Cos sie nie powiodlo: ' + str(e))\n input('wcisnij enter')\n exit(1)\n","sub_path":"PartyTime.py","file_name":"PartyTime.py","file_ext":"py","file_size_in_byte":10993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"252397430","text":"\n\nfrom xai.brain.wordbase.nouns._polyhedron import _POLYHEDRON\n\n#calss header\nclass _POLYHEDRA(_POLYHEDRON, ):\n\tdef __init__(self,): \n\t\t_POLYHEDRON.__init__(self)\n\t\tself.name = \"POLYHEDRA\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"polyhedron\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_polyhedra.py","file_name":"_polyhedra.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"230255500","text":"import heapq\nn = int(input())\nmheap = list()\nfor i in range(n):\n cmnd = str(input())\n if cmnd[0] == 'I' :\n cmnd = cmnd[7:]\n heapq.heappush(mheap,(10**10 - int(cmnd)) )\n else:\n print(10**10 - heapq.heappop(mheap))\n","sub_path":"data/heap-max.py","file_name":"heap-max.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"546183392","text":"#\n# Copyright (c) 2023 Airbyte, Inc., all rights reserved.\n#\n\nimport logging\nfrom unittest.mock import Mock\n\nimport pytest\nfrom airbyte_cdk.models import SyncMode\nfrom google.ads.googleads.errors import GoogleAdsException\nfrom google.ads.googleads.v11.errors.types.errors import ErrorCode, GoogleAdsError, GoogleAdsFailure\nfrom google.ads.googleads.v11.errors.types.request_error import RequestErrorEnum\nfrom google.api_core.exceptions import DataLoss, InternalServerError, ResourceExhausted, TooManyRequests\nfrom grpc import RpcError\nfrom source_google_ads.google_ads import GoogleAds\nfrom source_google_ads.streams import ClickView, cyclic_sieve\n\nfrom .common import MockGoogleAdsClient as MockGoogleAdsClient\n\n\n@pytest.fixture\ndef mock_ads_client(mocker, config):\n \"\"\"Mock google ads library method, so it returns mocked Client\"\"\"\n mocker.patch(\"source_google_ads.google_ads.GoogleAdsClient.load_from_dict\", return_value=MockGoogleAdsClient(config))\n\n\n# EXPIRED_PAGE_TOKEN exception will be raised when page token has expired.\nexception = GoogleAdsException(\n error=RpcError(),\n failure=GoogleAdsFailure(errors=[GoogleAdsError(error_code=ErrorCode(request_error=RequestErrorEnum.RequestError.EXPIRED_PAGE_TOKEN))]),\n call=RpcError(),\n request_id=\"test\",\n)\n\n\ndef mock_response_1():\n yield [\n {\"segments.date\": \"2021-01-01\", \"click_view.gclid\": \"1\"},\n {\"segments.date\": \"2021-01-02\", \"click_view.gclid\": \"2\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n ]\n raise exception\n\n\ndef mock_response_2():\n yield [\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"5\"},\n {\"segments.date\": \"2021-01-04\", \"click_view.gclid\": \"6\"},\n {\"segments.date\": \"2021-01-05\", \"click_view.gclid\": \"7\"},\n ]\n\n\nclass MockGoogleAds(GoogleAds):\n count = 0\n\n def parse_single_result(self, schema, result):\n return result\n\n def send_request(self, query: str, customer_id: str):\n self.count += 1\n if self.count == 1:\n return mock_response_1()\n else:\n return mock_response_2()\n\n\ndef test_page_token_expired_retry_succeeds(mock_ads_client, config, customers):\n \"\"\"\n Page token expired while reading records on date 2021-01-03\n The latest read record is {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"}\n It should retry reading starting from 2021-01-03, already read records will be reread again from that date.\n It shouldn't read records on 2021-01-01, 2021-01-02\n \"\"\"\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-01\", \"end_date\": \"2021-01-15\"}\n\n google_api = MockGoogleAds(credentials=config[\"credentials\"])\n incremental_stream_config = dict(\n api=google_api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n customers=customers,\n end_date=\"2021-04-04\",\n )\n stream = ClickView(**incremental_stream_config)\n stream.get_query = Mock()\n stream.get_query.return_value = \"query\"\n\n result = list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n assert len(result) == 9\n assert stream.get_query.call_count == 2\n stream.get_query.assert_called_with({\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-15\"})\n\n\ndef mock_response_fails_1():\n yield [\n {\"segments.date\": \"2021-01-01\", \"click_view.gclid\": \"1\"},\n {\"segments.date\": \"2021-01-02\", \"click_view.gclid\": \"2\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n ]\n\n raise exception\n\n\ndef mock_response_fails_2():\n yield [\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"5\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"6\"},\n ]\n\n raise exception\n\n\nclass MockGoogleAdsFails(MockGoogleAds):\n def send_request(self, query: str, customer_id: str):\n self.count += 1\n if self.count == 1:\n return mock_response_fails_1()\n else:\n return mock_response_fails_2()\n\n\ndef test_page_token_expired_retry_fails(mock_ads_client, config, customers):\n \"\"\"\n Page token has expired while reading records within date \"2021-01-03\", it should raise error,\n because Google Ads API doesn't allow filter by datetime.\n \"\"\"\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-01\", \"end_date\": \"2021-01-15\"}\n\n google_api = MockGoogleAdsFails(credentials=config[\"credentials\"])\n incremental_stream_config = dict(\n api=google_api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n end_date=\"2021-04-04\",\n customers=customers,\n )\n stream = ClickView(**incremental_stream_config)\n stream.get_query = Mock()\n stream.get_query.return_value = \"query\"\n\n with pytest.raises(GoogleAdsException):\n list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n\n stream.get_query.assert_called_with({\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-15\"})\n assert stream.get_query.call_count == 2\n\n\ndef mock_response_fails_one_date():\n yield [\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"5\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"6\"},\n ]\n\n raise exception\n\n\nclass MockGoogleAdsFailsOneDate(MockGoogleAds):\n def send_request(self, query: str, customer_id: str):\n return mock_response_fails_one_date()\n\n\ndef test_page_token_expired_it_should_fail_date_range_1_day(mock_ads_client, config, customers):\n \"\"\"\n Page token has expired while reading records within date \"2021-01-03\",\n it should raise error, because Google Ads API doesn't allow filter by datetime.\n Minimum date range is 1 day.\n \"\"\"\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-04\"}\n\n google_api = MockGoogleAdsFailsOneDate(credentials=config[\"credentials\"])\n incremental_stream_config = dict(\n api=google_api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n end_date=\"2021-04-04\",\n customers=customers,\n )\n stream = ClickView(**incremental_stream_config)\n stream.get_query = Mock()\n stream.get_query.return_value = \"query\"\n\n with pytest.raises(GoogleAdsException):\n list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n\n stream.get_query.assert_called_with({\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-04\"})\n assert stream.get_query.call_count == 1\n\n\n@pytest.mark.parametrize(\"error_cls\", (ResourceExhausted, TooManyRequests, InternalServerError, DataLoss))\ndef test_retry_transient_errors(mocker, config, customers, error_cls):\n mocker.patch(\"time.sleep\")\n credentials = config[\"credentials\"]\n credentials.update(use_proto_plus=True)\n api = GoogleAds(credentials=credentials)\n mocked_search = mocker.patch.object(api.ga_service, \"search\", side_effect=error_cls(\"Error message\"))\n incremental_stream_config = dict(\n api=api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n end_date=\"2021-04-04\",\n customers=customers,\n )\n stream = ClickView(**incremental_stream_config)\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-04\"}\n records = []\n with pytest.raises(error_cls):\n records = list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n assert mocked_search.call_count == 5\n assert records == []\n\n\ndef test_cyclic_sieve(caplog):\n original_logger = logging.getLogger(\"test\")\n sieve = cyclic_sieve(original_logger, fraction=10)\n for _ in range(20):\n sieve.info(\"Ground Control to Major Tom\")\n sieve.info(\"Your circuit's dead, there's something wrong\")\n sieve.info(\"Can you hear me, Major Tom?\")\n sieve.bump()\n assert len(caplog.records) == 6 # 20 * 3 / 10\n","sub_path":"dts/airbyte/airbyte-integrations/connectors/source-google-ads/unit_tests/test_streams.py","file_name":"test_streams.py","file_ext":"py","file_size_in_byte":8952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81800671","text":"import pandas as pd \r\nimport numpy as np \r\nfrom collections import OrderedDict\r\n\r\ncorr = pd.read_csv(\"C:/Users/pratyush/Downloads/corr1.csv\",index_col = 0)\r\nstart = 0\r\nleve = 0\r\ncurr = -1\r\ncurr_lev_sorted = []\r\ncompleted_pars = {}\r\n\r\ndef saturate(thres):\r\n\tif thres<=2.5:\r\n\t\treturn False\r\n\telse:\r\n\t\treturn True\r\n\r\n# run each time the student has taken a assignment..\r\ndef get_path(theta1,levels,parents,threshold=2.5):\r\n\tglobal start,leve,curr,curr_lev_sorted,completed_pars\r\n\ttheta = {}\r\n\tinv_th = {}\r\n\tfor i in theta1:\r\n\t\ttheta[i[\"t_id\"]] = i[\"theta\"]\r\n\t\tinv_th[i[\"theta\"][-1]] = i[\"t_id\"]\r\n\tsort_t = sorted(list(inv_th.keys()))\r\n\r\n\t# if starts.. at level 0..\r\n\tif leve==0 and start==0:\r\n\t\tstart = 1\r\n\t\tthet0 = dict([(theta[i][-1],i) for i in levels[0]])\r\n\t\tcurr_lev_sorted = sorted(list(thet0.keys()))\r\n\t\tcurr_lev_sorted = [thet0[i] for i in curr_lev_sorted]\r\n\t\tcurr = curr_lev_sorted[-1]\r\n\t\tcurr_lev_sorted = curr_lev_sorted[:-1]\r\n\r\n\telif leve>=0 and start==1:\r\n\t\tif saturate(theta[curr][-1]) and len(curr_lev_sorted)==0:\r\n\t\t\tcompleted_pars[curr] = (theta[curr][-1]-theta[curr][0])/theta[curr][0]\r\n\t\t\tleve += 1\r\n\t\t\tthet1 = dict([(theta[i][-1],i) for i in levels[leve]])\r\n\t\t\tthet2 = dict([(i,theta[i][-1]) for i in levels[leve]])\r\n\t\t\tthet2 = {k: v for k, v in sorted(thet2.items(), key=lambda item: item[1],reverse = True)}\r\n\t\t\tpars = {}\r\n\t\t\tfor i in levels[leve]:\r\n\t\t\t\tfor j in parents[i]:\r\n\t\t\t\t\tif i in pars:\r\n\t\t\t\t\t\tpars[i] += completed_pars[j]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpars[i] = completed_pars[j]\r\n\r\n\t\t\tpars = {k: v for k, v in sorted(pars.items(), key=lambda item: item[1],reverse = True)}\r\n\t\t\t\r\n\t\t\tprev = list(pars.values())[0]\r\n\t\t\tprev_k =list(pars.keys())[0]\r\n\t\t\tsort = [list(pars.keys())[0]]\r\n\r\n\t\t\tfor i,j in zip(list(pars.keys())[1:],list(pars.values())[1:]):\r\n\t\t\t\tif j==prev:\r\n\t\t\t\t\ttemp = [prev_k]\r\n\t\t\t\t\twhile j==prev:\r\n\t\t\t\t\t\ttemp.append(i)\r\n\t\t\t\t\tfor k,o in zip(list(temp.keys()),list(temp.values())):\r\n\t\t\t\t\t\tprint(k,o)\r\n\t\t\t\t\t\tif k in temp:\r\n\t\t\t\t\t\t\tsort.append(k)\r\n\t\t\t\telse:\r\n\t\t\t\t\tsort.append(i)\r\n\r\n\t\t\tcurr_lev_sorted = sort\r\n\t\t\tcurr = curr_lev_sorted[-1]\r\n\t\t\tcurr_lev_sorted = curr_lev_sorted[:-1]\r\n\t\t\t\r\n\r\n\t\telif saturate(theta[curr][-1]) and len(curr_lev_sorted)!=0:\r\n\t\t\tcompleted_pars[curr] = (theta[curr][-1]-theta[curr][0])/theta[curr][0]\r\n\t\t\tcurr = curr_lev_sorted[-1]\r\n\t\t\tcurr_lev_sorted = curr_lev_sorted[:-1]\r\n\t\telse:\r\n\t\t\tprint(\"continue...\")\r\n\r\n\r\n# run only once\r\ndef pathfinder():\r\n\t# make the graph..\r\n\tcorr.columns = [int(i) for i in corr.columns]\r\n\tcorr.index = [int(i) for i in corr.index]\r\n\ttags = list(corr.columns)\r\n\tqueue = []\r\n\tlevels = {}\r\n\tparents = {}\r\n\r\n\t# parents immediate..\r\n\tfor i in corr.columns:\r\n\t\tfor j,k in zip(corr.index,corr[i]):\r\n\t\t\tif k==1:\r\n\t\t\t\tif i in parents:\r\n\t\t\t\t\tparents[i].append(j)\r\n\t\t\t\telse:\r\n\t\t\t\t\tparents[i]=[j]\r\n\r\n\t# init the levels..\r\n\tfor i in tags:\r\n\t\tlevels[i] = 0\r\n\t\r\n\t# find the level zero nodes..\r\n\tqueue.extend(list(corr.columns[(corr == 0).all()]))\r\n\twhile len(queue)>0:\r\n\t\tnode = queue[0]\r\n\t\tqueue = queue[1:]\r\n\t\tlev = levels[node]\r\n\r\n\t\tdf = corr==1\r\n\t\tdf = df.ix[node]\r\n\t\tdf = list(df[df].index)\r\n\t\tfor i in df:\r\n\t\t\tlevels[i] = lev + 1\r\n\t\t\tqueue.append(i)\r\n\r\n\treturn levels,parents\r\n\r\ninp0 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3]}]\r\n\r\ninp1 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.4,2.4,2.4]}]\r\n\r\ninp2 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.4,2.4,2.4,2.6,2.8,2.7]}]\r\n\r\ninp3 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2,2.0,2.4,2.6]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.4,2.4,2.4,2.6,2.8,2.7]}]\r\n\r\n# inp1 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2,2.0,2.4,2.6]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n# \t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n# \t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n# \t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.6,2.8,2.7]}]\r\n\r\nlevels1,parents = pathfinder()\r\nlevels = {}\r\nfor i in levels1:\r\n\tif levels1[i] in levels:\r\n\t\tlevels[levels1[i]].append(i)\r\n\telse:\r\n\t\tlevels[levels1[i]] = [i]\r\n\r\nprint(levels)\r\n#first run...\r\nget_path(inp0,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\r\nget_path(inp1,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\r\nget_path(inp2,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\r\nget_path(inp3,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\t\t","sub_path":"prep/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"38954718","text":"from pulp import *\nfrom modules.lb_extract import LBExtract\nfrom modules.lb_transform import LBTransform\n\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport my_config as mc\n\nstart_date = '2019/01/01'\nend_date = '2019/12/31'\n\next = LBExtract(start_date, end_date, False)\ntr = LBTransform(start_date, end_date)\n#ext.mock_flag = True\n#ext.set_mock_path()\n\npd.set_option('display.max_columns', 200)\npd.set_option('display.max_rows', 200)\n\ntemp_df = ext.get_raceuma_table_base()\n#temp_df = tr.normalize_raceuma_df(raceuma_base_df)\n\ndf = temp_df[[\"競走コード\", \"馬番\", \"デフォルト得点\", \"確定着順\", \"単勝配当\", \"複勝配当\", \"得点V3\"]]\n\ndict_path = mc.return_base_path(False)\nintermediate_folder = dict_path + 'intermediate/'\nwith open(intermediate_folder + 'lb_v1_lb_v1/raceuma_ens/export_data.pkl', 'rb') as f:\n lb_v1_df = pickle.load(f)\nwith open(intermediate_folder + 'lb_v2_lb_v2/raceuma_ens/export_data.pkl', 'rb') as f:\n lb_v2_df = pickle.load(f)\nwith open(intermediate_folder + 'lb_v3_lb_v3/raceuma_ens/export_data.pkl', 'rb') as f:\n lb_v3_df = pickle.load(f)\n\nmy_df = pd.merge(lb_v1_df, lb_v2_df , on=[\"RACE_KEY\", \"UMABAN\", \"target\"]).rename(columns={\"predict_std_x\": \"偏差v1\", \"predict_std_y\":\"偏差v2\"})\nmy_df = pd.merge(my_df, lb_v3_df , on=[\"RACE_KEY\", \"UMABAN\", \"target\"]).rename(columns={\"predict_std\": \"偏差v3\", \"RACE_KEY\": \"競走コード\", \"UMABAN\": \"馬番\"})\nwin_df = my_df[my_df[\"target\"] == \"WIN_FLAG\"]\njiku_df = my_df[my_df[\"target\"] == \"JIKU_FLAG\"]\nana_df = my_df[my_df[\"target\"] == \"ANA_FLAG\"]\nwin_df.loc[:, \"勝ち偏差\"] = win_df[\"偏差v1\"] * 0.50 + win_df[\"偏差v2\"] * 0.30 + win_df[\"偏差v3\"] * 0.20\njiku_df.loc[:, \"軸偏差\"] = jiku_df[\"偏差v1\"] * 0.50 + jiku_df[\"偏差v2\"] * 0.25 + jiku_df[\"偏差v3\"] * 0.25\nana_df.loc[:, \"穴偏差\"] = ana_df[\"偏差v1\"] * 0.45 + ana_df[\"偏差v2\"] * 0.10 + ana_df[\"偏差v3\"] * 0.45\n\nmy_score_df = pd.merge(win_df[[\"競走コード\", \"馬番\", \"勝ち偏差\"]], jiku_df[[\"競走コード\", \"馬番\", \"軸偏差\"]], on=[\"競走コード\", \"馬番\"])\nmy_score_df = pd.merge(my_score_df, ana_df[[\"競走コード\", \"馬番\", \"穴偏差\"]], on=[\"競走コード\", \"馬番\"])\n\ndf = pd.merge(df, my_score_df, on=[\"競走コード\", \"馬番\"])\ndf.loc[:, \"勝\"] = df[\"確定着順\"].apply(lambda x: 1 if x == 1 else 0)\ndf.loc[:, \"連\"] = df[\"確定着順\"].apply(lambda x: 1 if x in (1, 2) else 0)\ndf.loc[:, \"複\"] = df[\"確定着順\"].apply(lambda x: 1 if x in (1, 2, 3) else 0)\nprint(df.head())\nprint(\"------ check ------\")\nprint(\"df\", df.shape)\n\niter_range = 5\nscore_rate = range(0, 1, iter_range)\nv3_rate = range(0, 1, iter_range)\nwin_rate = range(20, 101, iter_range)\njiku_rate = range(20, 101, iter_range)\nana_rate = range(20, 101, iter_range)\n\ns1_list = []\nv3_list = []\nwin_list = []\njiku_list = []\nana_list = []\n\ncnt_list = []\nav_win_list = []\nav_ren_list = []\nav_fuku_list = []\ntan_ret_list = []\nfuku_ret_list = []\n\n#df = df.head(200)\ntotal_count = len(df)\n\nfor s1 in score_rate:\n print(s1)\n for v3 in v3_rate:\n for win in win_rate:\n for jiku in jiku_rate:\n for ana in ana_rate:\n if s1 + v3 + win + jiku + ana == 100:\n print(\"s1:\" + str(s1) + \" win:\" + str(win) + \" jiku:\" + str(jiku) + \" ana:\" + str(ana))\n temp_df = df\n temp_df.loc[:, \"最適得点\"] = df[\"デフォルト得点\"] * s1/100 + df[\"得点V3\"] * v3/100 + df[\"勝ち偏差\"] * win/100 + df[\"軸偏差\"] * jiku/100 + df[\"穴偏差\"] * ana/100\n target_df = temp_df[temp_df[\"最適得点\"] >= 55]\n cnt_list.append(len(target_df))\n s1_list.append(s1)\n v3_list.append(v3)\n win_list.append(win)\n jiku_list.append(jiku)\n ana_list.append(ana)\n av_win_list.append(round(target_df[\"勝\"].mean() * 100, 2))\n av_ren_list.append(round(target_df[\"連\"].mean() * 100, 2))\n av_fuku_list.append(round(target_df[\"複\"].mean() * 100, 2))\n tan_ret_list.append(round(target_df[\"単勝配当\"].mean(), 2))\n fuku_ret_list.append(round(target_df[\"複勝配当\"].mean(), 2))\n\n\nscore_df = pd.DataFrame(\n data={'score_rate': s1_list, 'win_rate': win_list, 'jiku_rate': jiku_list, 'ana_rate': ana_list,\n 'count': cnt_list, 'v3_rate': v3_list,\n 'av_win': av_win_list, 'av_ren': av_ren_list, 'av_fuku': av_fuku_list, 'tan_return': tan_ret_list , 'fuku_return': fuku_ret_list},\n columns=['score_rate', 'v3_rate', 'win_rate', 'jiku_rate', 'ana_rate', 'count', 'av_win', 'av_ren', 'av_fuku', 'tan_return', 'fuku_return']\n)\nscore_df.loc[:,'tan_return_rank'] = score_df['tan_return'].rank(ascending=False)\nscore_df.loc[:,'fuku_return_rank'] = score_df['tan_return'].rank(ascending=False)\nscore_df.loc[:,'av_win_rank'] = score_df['av_win'].rank(ascending=False)\nscore_df.loc[:,'av_ren_rank'] = score_df['av_ren'].rank(ascending=False)\nscore_df.loc[:,'av_fuku_rank'] = score_df['av_fuku'].rank(ascending=False)\nscore_df.loc[:,'total_rank'] = score_df['tan_return_rank'] + score_df['fuku_return_rank'] \\\n + score_df['av_win_rank'] + score_df['av_ren_rank'] + score_df['av_fuku_rank']\n\nprint(\"----------- tan_return -----------------\")\nprint(score_df.sort_values('tan_return', ascending=False).head())\nprint(\"----------- fuku_return -----------------\")\nprint(score_df.sort_values('fuku_return', ascending=False).head())\nprint(\"----------- av_win -----------------\")\nprint(score_df.sort_values('av_win', ascending=False).head())\nprint(\"----------- av_ren -----------------\")\nprint(score_df.sort_values('av_ren', ascending=False).head())\nprint(\"----------- av_fuku -----------------\")\nprint(score_df.sort_values('av_fuku', ascending=False).head())\n\nprint(\"----------- total_rank -----------------\")\ndump_df = score_df.sort_values('total_rank').head(10)\nprint(dump_df)\nprint(score_df.describe())\n\nwith open(dict_path + 'temp_analysis/output/find_parameter.pkl', 'wb') as f:\n pickle.dump(dump_df, f)\n","sub_path":"temp_analysis/find_parameter.py","file_name":"find_parameter.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"36699713","text":"#!/usr/bin/env python\n\nimport uuid\nimport qrcode\nimport json\nimport locale\nimport time\nimport os\nimport re\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib import rcParams\nfrom matplotlib import colors\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n\nfrom workflow_sw import workflow_software\nfrom workflow_logger import log_progress, log_info, log_version, log_error\nfrom workflow_exec import run_command, run_command_file_handle\n\n__modname__ = \"qc_report_generation_v2.py\"\n\nclass germline_qc_report(object):\n\n def __init__(self, sample_name, output_dir, pipeline, pipeline_name,\n instrument, specimen, reagent_kit, cycle, run_name,\n log_file, workflow_dir, script_dir, final_bam):\n self._sample_name = sample_name\n self._output_dir = output_dir\n self._pipeline = pipeline\n self._pipeline_name = pipeline_name\n\n self._instrument = str(instrument).replace(\"_\", \" \")\n self._specimen = str(specimen).replace(\"_\", \" \")\n self._reagent_kit = str(reagent_kit).replace(\"_\", \" \")\n self._cycle = str(cycle).replace(\"_\", \" \")\n self._run_name = str(run_name).replace(\"_\", \" \")\n\n self._xml_file = \"%s/data/stat/%s.xml\"%(self._output_dir, self._sample_name)\n self._pdf_file = \"%s/data/stat/%s.pdf\"%(self._output_dir, self._sample_name)\n self._log_file = log_file\n self._final_bam = final_bam\n\n self._default_coverage_heatmap = \"%s/modules/images/logo-yellow.png\"%(workflow_dir)\n self._default_control_file = \"%s/assay_reference/control_ampstat.txt\"%(script_dir)\n self._stat_json = \"%s/data/stat/%s.stat.json\"%(self._output_dir, self._sample_name)\n\n self._sw = workflow_software(\"germline\")\n\n pattern=r'^[Cc][Oo][Nn][Tt][Rr][Oo][Ll]'\n if re.match(pattern,sample_name):\n self._is_control=\"yes\"\n else:\n self._is_control=\"no\"\n\n def indent(self, elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n self.indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n def main_page(self):\n main_page = Element(\"main_page\")\n\n ### sample name\n SubElement(main_page, \"sample_name\").text = self._sample_name\n\n ### report ID\n report_id = str(uuid.uuid4()) # Convert UUID format to a Python string.\n report_id = report_id.upper() # Make all characters uppercase.\n report_id = report_id.replace(\"-\",\"\") # Remove the UUID '-'.\n self._report_id = report_id[0:6]\n SubElement(main_page, \"report_id\").text = self._report_id\n\n ### qrcode\n qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)\n validation_url=\"http://www.ngenebio.com\"\n qr.add_data(validation_url)\n qr.make(fit=True)\n qr_img = qr.make_image()\n output = \"%s/data/stat/%s_validation_qr.png\"%(self._output_dir, self._sample_name)\n qr_img.save(output, kind='png')\n SubElement(main_page, \"qrcode_path\").text = output\n\n return main_page\n\n def header_contents(self):\n header_contents = Element(\"header_contents\")\n SubElement(header_contents, \"content\").text = \"%s (%s) Quality Report %s (%s)\"%(self._pipeline_name, self._pipeline, self._sample_name, self._report_id)\n return header_contents\n\n def analysis_information(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Analysis Information\"\n\n sn = Element(\"analysis_information\")\n SubElement(sn, \"name\").text = \"Sample Name\"\n SubElement(sn, \"value\").text = self._sample_name\n qc_contents.append(sn)\n\n at = Element(\"analysis_information\")\n SubElement(at, \"name\").text = \"Analysis Type\"\n if self._specimen == 'FFPE':\n SubElement(at, \"value\").text = \"Somatic Variation\"\n else:\n SubElement(at, \"value\").text = \"Germline Variation\"\n qc_contents.append(at)\n\n assay = Element(\"analysis_information\")\n SubElement(assay, \"name\").text = \"Assay Type\"\n SubElement(assay, \"value\").text = \"%s (%s)\" % (self._pipeline_name, self._pipeline)\n qc_contents.append(assay)\n\n pv = Element(\"analysis_information\")\n SubElement(pv, \"name\").text = \"Pipeline Version\"\n SubElement(pv, \"value\").text = \"v1.0\"\n qc_contents.append(pv)\n\n rd = Element(\"analysis_information\")\n SubElement(rd, \"name\").text = \"Report Date\"\n locale.setlocale(locale.LC_ALL, \"\")\n now = time.localtime()\n format_date = \"%04d-%02d-%02d %02d:%02d:%02d\"%(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\n SubElement(rd, \"value\").text = format_date\n qc_contents.append(rd)\n\n return qc_contents\n\n def sequencing_information(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Sequencing Information\"\n \n it = Element(\"sequencing_information\")\n SubElement(it, \"name\").text = \"Instrument\"\n SubElement(it, \"value\").text = self._instrument\n qc_contents.append(it)\n \n sm = Element(\"sequencing_information\")\n SubElement(sm, \"name\").text = \"Specimen\"\n SubElement(sm, \"value\").text = self._specimen\n qc_contents.append(sm)\n \n #rk = Element(\"sequencing_information\")\n #SubElement(rk, \"name\").text = \"Reagent Kit\"\n #SubElement(rk, \"value\").text = self._reagent_kit\n #qc_contents.append(rk)\n \n #cc = Element(\"sequencing_information\")\n #SubElement(cc, \"name\").text = \"Cycle\"\n #SubElement(cc, \"value\").text = self._cycle\n #qc_contents.append(cc)\n \n rn = Element(\"sequencing_information\")\n SubElement(rn, \"name\").text = \"Run Name\"\n SubElement(rn, \"value\").text = self._run_name\n qc_contents.append(rn)\n \n return qc_contents\n \n def raw_fastq_format(self, js):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Raw FASTQ Format\"\n \n ft = Element(\"raw_fastq_format\")\n SubElement(ft, \"name\").text = \"FASTQ File Type\"\n SubElement(ft, \"value\").text = js['fastqc']['raw_fastqc']['summary']['File type']\n qc_contents.append(ft)\n \n ec = Element(\"raw_fastq_format\")\n SubElement(ec, \"name\").text = \"Quality Encoding\"\n SubElement(ec, \"value\").text = js['fastqc']['raw_fastqc']['summary']['Encoding']\n qc_contents.append(ec)\n \n return qc_contents\n\n def data_summary(self):\n data_summary_file = \"%s/data/stat/%s.panel.txt\"%(self._output_dir, self._sample_name)\n f = open(data_summary_file, \"r\")\n lines = f.readlines()\n \n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Data Summary\"\n ds = Element(\"data_summary\")\n \n for line in lines:\n if line.startswith(\"No\"):\n continue\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = sp[1].replace(\">\", \">\")\n SubElement(row, \"col\").text = sp[2]\n ds.append(row)\n\n qc_contents.append(ds)\n f.close()\n return qc_contents\n\n def raw_read_quality(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Raw Read Quality\"\n \n rrq = Element(\"raw_read_quality\")\n bsq = Element(\"base_sequence_quality\")\n SubElement(bsq, \"r1\").text = \"%s/data/basecall/fastqc/%s_normalize.1_fastqc/Images/per_base_quality.png\"%(self._output_dir, self._sample_name)\n SubElement(bsq, \"r2\").text = \"%s/data/basecall/fastqc/%s_normalize.2_fastqc/Images/per_base_quality.png\"%(self._output_dir, self._sample_name)\n rrq.append(bsq)\n \n sqs = Element(\"base_sequence_quality_score\")\n SubElement(sqs, \"r1\").text = \"%s/data/basecall/fastqc/%s_normalize.1_fastqc/Images/per_sequence_quality.png\"%(self._output_dir, self._sample_name)\n SubElement(sqs, \"r2\").text = \"%s/data/basecall/fastqc/%s_normalize.2_fastqc/Images/per_sequence_quality.png\"%(self._output_dir, self._sample_name)\n rrq.append(sqs)\n \n sld = Element(\"sequence_length_distribution\")\n SubElement(sld, \"r1\").text = \"%s/data/basecall/fastqc/%s_normalize.1_fastqc/Images/sequence_length_distribution.png\"%(self._output_dir, self._sample_name)\n SubElement(sld, \"r2\").text = \"%s/data/basecall/fastqc/%s_normalize.2_fastqc/Images/sequence_length_distribution.png\"%(self._output_dir, self._sample_name)\n rrq.append(sld)\n \n qc_contents.append(rrq)\n return qc_contents\n\n def alignment(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Alignment\"\n \n align = Element(\"alignment\")\n alignment_stat_plot = Element(\"alignment_stat_plot\")\n SubElement(alignment_stat_plot, \"img\").text = \"%s/data/stat/%s.alignment.jpg\"%(self._output_dir, self._sample_name)\n align.append(alignment_stat_plot)\n \n mapping_quality_stat_plot = Element(\"mapping_quality_stat_plot\")\n SubElement(mapping_quality_stat_plot, \"img\").text = \"%s/data/stat/%s.mapqual.jpg\"%(self._output_dir, self._sample_name)\n align.append(mapping_quality_stat_plot)\n \n qc_contents.append(align)\n return qc_contents\n\n def mapping_statistics(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Mapping Statistics\"\n \n map_stat = Element(\"mapping_statistics\")\n SubElement(map_stat, \"sample_name\").text = self._sample_name\n read_stat_plot = Element(\"read_stat_plot\")\n SubElement(read_stat_plot, \"img\").text = \"%s/data/stat/%s.mapstat.png\"%(self._output_dir, self._sample_name)\n map_stat.append(read_stat_plot)\n\n qc_contents.append(map_stat)\n return qc_contents\n\n def softclip(self):\n softclip_file = \"%s/data/stat/%s_softclip.txt\"%(self._output_dir, self._sample_name)\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Soft Clipping Statistics\"\n\n softclip = Element(\"softclip\")\n softclip_png = Element(\"softclip_png\")\n softclip_plot = \"%s/data/stat/%s_softclip.png\"%(self._output_dir, self._sample_name)\n SubElement(softclip_png, \"img\").text = softclip_plot\n softclip.append(softclip_png)\n\n softclip_data = Element(\"softclip_data\")\n\n try:\n f = open(softclip_file, \"r\")\n lines = f.readlines()\n total_count = 0\n front_clip = 0\n end_clip = 0\n both_clip = 0\n\n for line in lines:\n if line.startswith(\"Amplicon\"):\n continue\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n \n total_count += int(sp[1])\n front_clip += int(sp[3])\n end_clip += int(sp[4])\n both_clip += int(sp[5])\n\n row = Element(\"row\")\n for _str in sp:\n SubElement(row, \"col\").text = _str\n softclip_data.append(row)\n\n SubElement(softclip, \"total_count\").text = str(total_count)\n SubElement(softclip, \"front_clip\").text = str(front_clip)\n SubElement(softclip, \"end_clip\").text = str(end_clip)\n SubElement(softclip, \"both_clip\").text = str(both_clip)\n f.close()\n except Exception as ex_str:\n print(ex_str)\n\n softclip.append(softclip_data)\n\n cmd = [\"Rscript\",\n self._sw.workflow_software[\"r_softclip\"],\n softclip_file,\n softclip_plot]\n run_command(__modname__, \" \".join(cmd), self._log_file)\n \n warn_file = \"%s/data/stat/%s_warn.bed\"%(self._output_dir, self._sample_name)\n \n try:\n warn_file_size = os.path.getsize(warn_file)\n \n if (warn_file_size != 0):\n SubElement(softclip, \"is_softclipped\").text = \"true\"\n warn_data = Element(\"warn_data\")\n\n f = open(warn_file, \"r\")\n lines = f.readlines()\n \n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = str(round(float(sp[5]), 2))\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = str(int(sp[1]) + 100)\n SubElement(row, \"col\").text = str(int(sp[2]) - 100)\n warn_data.append(row)\n softclip.append(warn_data)\n f.close()\n else:\n SubElement(softclip, \"is_softclipped\").text = \"false\"\n except Exception as ex_str:\n print(ex_str)\n\n qc_contents.append(softclip)\n return qc_contents\n\n def coverage_and_depth(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Coverage and Depth\"\n\n cov_plot1 = \"%s/data/stat/%s_BRCA1_coverage.png\"%(self._output_dir, self._sample_name)\n cov_plot2 = \"%s/data/stat/%s_BRCA2_coverage.png\"%(self._output_dir, self._sample_name)\n\n cmd1 = [\"Rscript\",\n self._sw.workflow_software[\"r_coverage_and_depth\"],\n self._sw.workflow_software[\"ngb_txdb\"],\n self._final_bam,\n \"41196000\",\n \"41277450\",\n \"chr17\",\n cov_plot1]\n run_command(__modname__, \" \".join(cmd1), self._log_file)\n\n cmd2 = [\"Rscript\",\n self._sw.workflow_software[\"r_coverage_and_depth\"],\n self._sw.workflow_software[\"ngb_txdb\"],\n self._final_bam,\n \"32889500\",\n \"32974000\",\n \"chr13\",\n cov_plot2]\n run_command(__modname__, \" \".join(cmd2), self._log_file)\n \n cov_and_depth = Element(\"coverage_and_depth\")\n brca1_coverage_plot = Element(\"brca1_coverage_plot\")\n SubElement(brca1_coverage_plot, \"img\").text = cov_plot1\n cov_and_depth.append(brca1_coverage_plot)\n brca2_coverage_plot = Element(\"brca2_coverage_plot\")\n SubElement(brca2_coverage_plot, \"img\").text = cov_plot2\n cov_and_depth.append(brca2_coverage_plot)\n\n qc_contents.append(cov_and_depth)\n return qc_contents\n\n def amplicon_coverage(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Amplicon Coverage\"\n\n if self._pipeline == '447':\n amp_cov = Element(\"amplicon_coverage_brca_plus\")\n else:\n amp_cov = Element(\"amplicon_coverage\")\n amp_stat_file = \"%s/data/stat/%s_picard_ampstat.txt\"%(self._output_dir, self._sample_name)\n brca1 = Element(\"brca1\")\n brca2 = Element(\"brca2\")\n\n try:\n f = open(amp_stat_file, \"r\")\n lines = f.readlines()\n\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n \n if (sp[0].startswith(\"name\")):\n continue\n else:\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = str(round(float(sp[1]), 2))\n SubElement(row, \"col\").text = sp[2]\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = sp[4]\n if (sp[0].startswith(\"BRCA1\")):\n brca1.append(row)\n elif (sp[0].startswith(\"BRCA2\")):\n brca2.append(row)\n f.close()\n except Exception as ex_str:\n print(ex_str)\n\n amp_cov.append(brca1)\n amp_cov.append(brca2)\n\n # has control\n control_file = \"%s/data/stat/control_ampstat.txt\"%(self._output_dir)\n if os.path.exists(control_file):\n has_control = \"yes\"\n else:\n has_control = \"no\"\n SubElement(amp_cov, \"has_control\").text = has_control\n\n # mapped amplicon coverage (1) plot\n amp_cov_plot = \"%s/data/stat/%s_mapped_amp_cov.png\"%(self._output_dir, self._sample_name)\n\n #if self._specimen == 'FFPE':\n if self._pipeline == '447':\n is_control = 'yes'\n else:\n is_control = self._is_control\n\n cmd = [\"Rscript\",\n self._sw.workflow_software[\"r_mapped_amplicon_coverage\"],\n is_control,\n amp_stat_file,\n self._default_control_file,\n amp_cov_plot]\n run_command(__modname__, \" \".join(cmd), self._log_file)\n\n mapped_amplicon_coverage = Element(\"mapped_amplicon_coverage\")\n SubElement(mapped_amplicon_coverage, \"img\").text = amp_cov_plot\n amp_cov.append(mapped_amplicon_coverage)\n\n # mapped amplicon coverage (2) heatmap\n if self._pipeline == '445':\n mapped_amplicon_coverage_heatmap = Element(\"mapped_amplicon_coverage_heatmap\")\n SubElement(mapped_amplicon_coverage_heatmap, \"img\").text = self.generate_coverage_heatmap()\n amp_cov.append(mapped_amplicon_coverage_heatmap)\n\n qc_contents.append(amp_cov)\n return qc_contents\n\n def generate_coverage_heatmap(self):\n amp_picard_file = \"%s/data/stat/%s_picard_ampstat.txt\"%(self._output_dir, self._sample_name)\n\n df_control = pd.read_table(self._default_control_file)\n df_control = df_control.set_index('Amplicon')\n control = df_control[['Count']].apply(lambda x: (x - x.min()) / (x.max() - x.min()))\n control.columns = ['control_coverage']\n\n df_sample = pd.read_table(amp_picard_file)\n df_sample.columns = ['Amplicon', 'coverage', 'chrom', 'start', 'end']\n df_sample = df_sample.set_index('Amplicon')\n sample = df_sample[['coverage']].apply(lambda x: (x - x.min()) / (x.max() - x.min()))\n\n merged_cov = pd.merge(control, sample, left_index=True, right_index=True, how='inner')\n merged_cov['diff'] = merged_cov['control_coverage'] - merged_cov['coverage']\n\n plt.figure(figsize=(26, 38))\n plt.rcParams['font.size'] = 20\n plt.rcParams['axes.labelsize'] = 30\n plt.rcParams['axes.labelweight'] = 'bold'\n plt.rcParams['xtick.labelsize'] = 20\n plt.rcParams['ytick.labelsize'] = 20\n plt.rcParams['legend.fontsize'] = 20\n plt.rcParams['figure.titlesize'] = 24\n\n if self._is_control == 'yes':\n sns.heatmap(sample, square=False, annot=True, annot_kws={\"size\": 14}, cmap='Reds')\n else:\n sns.heatmap(merged_cov, square=False, annot=True, annot_kws={\"size\": 14}, cmap='Reds')\n \n hfont = {'fontname':'Droid Sans'}\n plt.xticks(fontsize=20,**hfont)\n plt.yticks(fontsize=20,**hfont)\n plt.ylabel(\"Amplicon Regions\",**hfont)\n plt.xlabel(\"Coverages\",**hfont)\n\n heat_map = '%s/data/stat/%s_heatmap.png'%(self._output_dir, self._sample_name)\n plt.savefig(heat_map)\n return heat_map\n\n def warning(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Warning\"\n warning = Element(\"warning\")\n warn_file = \"%s/data/stat/%s_warn.bed\"%(self._output_dir, self._sample_name)\n \n try:\n warn_file_size = os.path.getsize(warn_file)\n \n if (warn_file_size != 0):\n SubElement(warning, \"is_softclipped\").text = \"true\"\n warn_data = Element(\"warn_data\")\n f = open(warn_file, \"r\")\n lines = f.readlines()\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = str(round(float(sp[5]), 2))\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = str(int(sp[1]) + 100)\n SubElement(row, \"col\").text = str(int(sp[2]) - 100)\n warn_data.append(row)\n warning.append(warn_data)\n f.close()\n else:\n SubElement(warning, \"is_softclipped\").text = \"false\"\n except Exception as ex_str:\n print(ex_str)\n\n primer_del_file = \"%s/data/stat/%s.primer.del.bed\"%(self._output_dir, self._sample_name)\n\n try:\n primer_del_file_size = os.path.getsize(primer_del_file)\n \n if (primer_del_file_size != 0):\n SubElement(warning, \"is_primer_del\").text = \"true\"\n primer_del_data = Element(\"primer_del_data\")\n f = open(primer_del_file, \"r\")\n lines = f.readlines()\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = sp[1]\n SubElement(row, \"col\").text = sp[2]\n primer_del_data.append(row)\n warning.append(primer_del_data)\n f.close()\n else:\n SubElement(warning, \"is_primer_del\").text = \"false\"\n except Exception as ex_str:\n print(ex_str) \n \n qc_contents.append(warning)\n return qc_contents\n\n def variants(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Variants\"\n\n if self._specimen == 'FFPE':\n variants = Element(\"variants_FFPE\")\n else:\n variants = Element(\"variants\")\n json_file = \"%s/data/variant/%s.json\"%(self._output_dir, self._sample_name)\n variant_count = 0\n variants_data = Element(\"variants_data\")\n\n with open(json_file) as data_file:\n _file = data_file.readlines()\n\n for line in _file:\n _data = json.loads(line)\n data = _data.get(_data.keys()[0])\n exac_format = str(data.get(\"variant_information\").get(\"exac_format\"))\n sp1 = exac_format.split(\"-\")\n chrom = sp1[0]\n pos = sp1[1]\n ref_allele = sp1[2]\n alt_allele = sp1[3]\n allele_fraction = str(round(float(data.get(\"allele\").get(\"allele_fraction\")), 2))\n zygosity = str(data.get(\"allele\").get(\"zygosity\"))\n type_of_allele = str(data.get(\"allele\").get(\"type_of_allele\"))\n\n row = Element(\"row\")\n SubElement(row, \"col\").text = chrom\n SubElement(row, \"col\").text = pos\n SubElement(row, \"col_\").text = ref_allele\n SubElement(row, \"col_\").text = alt_allele\n SubElement(row, \"col\").text = allele_fraction\n\n if self._specimen == 'FFPE':\n pass\n else:\n SubElement(row, \"col\").text = zygosity\n SubElement(row, \"col\").text = type_of_allele\n variants_data.append(row)\n variant_count += 1\n\n SubElement(variants_data, \"total_variant\").text = str(variant_count)\n variants.append(variants_data)\n qc_contents.append(variants)\n return qc_contents\n\n def software_reference_list(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Software Reference Database List\"\n software = Element(\"software\")\n software_data = Element(\"software_data\")\n software_list = {}\n f = open(self._log_file, \"r\")\n for line in f:\n if line.find(\"[VERSION] >\") != -1:\n line = line.replace(\"\\n\", \"\")\n sp1 = line.split(\"[VERSION] >\")\n sp2 = sp1[1].strip().split(\",\")\n software_list[str(sp2[1])] = sp1[1].strip()\n f.close()\n for i in software_list:\n sp = software_list[i].split(\",\")\n \n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[1]\n SubElement(row, \"col\").text = sp[2]\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = sp[5]\n SubElement(row, \"col_\").text = sp[4]\n software_data.append(row)\n \n software.append(software_data)\n qc_contents.append(software)\n return qc_contents\n\n def run(self):\n\n log_progress(__modname__, \"Germline QC report generation start\", f=self._log_file)\n\n ### stat json\n f = open(self._stat_json)\n js = json.loads(f.read())\n f.close()\n\n ### root element\n qc_report = Element(\"qc_report\")\n\n ### main page\n log_progress(__modname__, \"Create main page\", f=self._log_file)\n main_page = self.main_page()\n qc_report.append(main_page)\n\n ### header contents\n log_progress(__modname__, \"Insert header contents\", f=self._log_file)\n header_contents = self.header_contents()\n qc_report.append(header_contents)\n\n ### contents\n log_progress(__modname__, \"Insert analysis information\", f=self._log_file)\n analysis_information = self.analysis_information()\n qc_report.append(analysis_information)\n\n log_progress(__modname__, \"Insert sequencing information\", f=self._log_file)\n sequencing_information = self.sequencing_information()\n qc_report.append(sequencing_information)\n\n log_progress(__modname__, \"Insert raw fastq format\", f=self._log_file)\n raw_fastq_format = self.raw_fastq_format(js)\n qc_report.append(raw_fastq_format)\n\n log_progress(__modname__, \"Insert data summary\", f=self._log_file)\n data_summary = self.data_summary()\n qc_report.append(data_summary)\n\n log_progress(__modname__, \"Insert raw read quality\", f=self._log_file)\n raw_read_quality = self.raw_read_quality()\n qc_report.append(raw_read_quality)\n\n log_progress(__modname__, \"Insert alignment\", f=self._log_file)\n alignment = self.alignment()\n qc_report.append(alignment)\n\n log_progress(__modname__, \"Insert mapping_statistics\", f=self._log_file)\n mapping_statistics = self.mapping_statistics()\n qc_report.append(mapping_statistics)\n\n log_progress(__modname__, \"Insert softclip\", f=self._log_file)\n softclip = self.softclip()\n qc_report.append(softclip)\n\n log_progress(__modname__, \"Insert coverage and depth\", f=self._log_file)\n coverage_and_depth = self.coverage_and_depth()\n qc_report.append(coverage_and_depth)\n\n log_progress(__modname__, \"Insert amplicon coverage\", f=self._log_file)\n amplicon_coverage = self.amplicon_coverage()\n qc_report.append(amplicon_coverage)\n\n log_progress(__modname__, \"Insert warnings\", f=self._log_file)\n warning = self.warning()\n qc_report.append(warning)\n\n log_progress(__modname__, \"Insert variants\", f=self._log_file)\n variants = self.variants()\n qc_report.append(variants)\n\n log_progress(__modname__, \"Insert softwares\", f=self._log_file)\n software_reference_list = self.software_reference_list()\n qc_report.append(software_reference_list)\n\n ### generate xml file\n log_progress(__modname__, \"Generate XML file for data sources\", f=self._log_file)\n self.indent(qc_report)\n #dump(qc_report)\n ElementTree(qc_report).write(self._xml_file)\n\n ### Generate pdf file\n log_progress(__modname__, \"Generate final PDF file\", f=self._log_file)\n cmd = [self._sw.workflow_software[\"fop\"],\n \"-c\", self._sw.workflow_software[\"fop_config\"],\n \"-xml\", self._xml_file,\n \"-xsl\", self._sw.workflow_software[\"qc_report_template\"],\n \"-pdf\", self._pdf_file]\n run_command(__modname__, \" \".join(cmd), self._log_file)\n \n log_progress(__modname__, \"Germline QC report generation finished\", f=self._log_file)\n","sub_path":"pipelines/DNA_Germline/pipelines/tmp/qc_report_generation_v2.py","file_name":"qc_report_generation_v2.py","file_ext":"py","file_size_in_byte":29381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"519489392","text":"# -*- coding: utf-8 -*-\nfrom PyQt5.QtWidgets import QApplication, QVBoxLayout, QMainWindow, QTableWidgetItem\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass Layout(QVBoxLayout):\n def __init__(self, root):\n super().__init__()\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n # self.toolbar = NavigationToolbar(self.canvas, root)\n # self.addWidget(self.toolbar)\n self.addWidget(self.canvas)\n\n def plot_dnt_current(self, graph_data): # graph_data в формате [[\"Имя1, ед.изм.\", [data]], [\"Имя2, ед.изм.\", [data]]...]\n try:\n # отрисуем график\n self.figure.clear()\n # create an axis\n axes = self.figure.add_subplot(111)\n axes_twinx = axes.twinx()\n # plot data\n time = graph_data[0][1]\n current = graph_data[3][1]\n current_pos_list = []\n current_neg_list = []\n for var in current:\n current_pos = var if var > 1E-12 else 1E-12\n current_neg = -var if var < -1E-12 else 1E-12\n current_pos_list.append(current_pos)\n current_neg_list.append(current_neg)\n axes.plot(time, current_pos_list, line_type_from_index(0), label=u\"Ток +, А\")\n axes.plot(time, current_neg_list, line_type_from_index(1), label=u\"Ток -, А\")\n # подсчет статистических данных\n current_pos_mean = float(np.mean(current_pos_list))\n current_neg_mean = float(np.mean(current_neg_list))\n current_pos_std = float(np.std(current_pos_list))\n current_neg_std = float(np.std(current_neg_list))\n #\n data_text = \" mean_pos=%.3E; std_pos=%.2E;\\n mean_neg=%.3E; std_neg=%.2E;\" \\\n % (current_pos_mean, current_pos_std, current_neg_mean, current_neg_std)\n self.figure.text(0.01, 0.95, data_text)\n #\n axes.set_title(\"График показаний ДНТ\")\n axes.set_xlabel(\"Время, с\")\n axes.set_ylim(bottom=1E-12)\n axes.set_yscale(\"log\")\n axes.legend(loc=2)\n axes.grid()\n # refresh canvas\n self.canvas.draw()\n except Exception as error:\n print(\"plot_dnt_current \" + error)\n pass\n\n def plot_osc_dnt(self, graph_data, osc_data_type=0):\n try:\n # отрисуем график\n self.figure.clear()\n # create an axis\n axes = self.figure.add_subplot(111)\n # plot data\n time = graph_data[0][1]\n read_flag = 0\n for num, var in enumerate(graph_data[1:]):\n if var[1]:\n read_flag = 1\n axes.plot(time, var[1], line_type_from_index(num), label=var[0])\n if read_flag:\n axes.set_title(\"Осциллограмма ДНТ\")\n axes.set_xlabel(\"Время, с\")\n axes.legend(loc=0)\n axes.grid()\n # refresh canvas\n self.canvas.draw()\n except Exception as error:\n print(error)\n\n\ndef line_type_from_index(n):\n color_line = [\"r\", \"b\", \"g\", \"c\", \"m\", \"y\", \"k\"]\n style_line = [\"-\", \"--\", \"-.\", \":\"]\n try:\n color = color_line[n % len(color_line)]\n style = style_line[n // len(color_line)]\n return style + color\n except IndexError:\n return \"-r\"\n","sub_path":"dnt_graph.py","file_name":"dnt_graph.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"584449959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 09:19:22 2020\n\n@author: Shivadhar SIngh\n\"\"\"\n\n\ndef word_frequency():\n from urllib.request import urlopen\n fileobj = urlopen(\"https://cs.anu.edu.au/courses/comp1730/labs/data/wordlist.txt\")\n d = dict()\n for byteseq in fileobj:\n line = byteseq.decode()\n # process line of text\n d[line] = len(line.strip())\n fileobj.close()\n ls = list(d.values())\n ls.sort()\n return ls[-1:-10:-1]\n ","sub_path":"word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"193132728","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport torchvision\nfrom torch.autograd import Variable\nimport itertools\nfrom sklearn.metrics import mean_squared_error, accuracy_score, hamming_loss, roc_curve, auc, f1_score\n\n\ndef to_var(x, requires_grad=True):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, requires_grad=requires_grad)\n\n\ndef auc_roc(Pr, Tr):\n fpr, tpr, _ = roc_curve(Tr, Pr, pos_label=1.0)\n return auc(fpr, tpr), fpr, tpr\n\n\nclass MetaModule(nn.Module):\n # adopted from: Adrien Ecoffet https://github.com/AdrienLE\n def params(self):\n for name, param in self.named_params(self):\n yield param\n\n def named_leaves(self):\n return []\n\n def named_submodules(self):\n return []\n\n def named_params(self, curr_module=None, memo=None, prefix=''):\n if memo is None:\n memo = set()\n\n if hasattr(curr_module, 'named_leaves'):\n for name, p in curr_module.named_leaves():\n if p is not None and p not in memo:\n memo.add(p)\n yield prefix + ('.' if prefix else '') + name, p\n else:\n for name, p in curr_module._parameters.items():\n if p is not None and p not in memo:\n memo.add(p)\n yield prefix + ('.' if prefix else '') + name, p\n\n for mname, module in curr_module.named_children():\n submodule_prefix = prefix + ('.' if prefix else '') + mname\n for name, p in self.named_params(module, memo, submodule_prefix):\n yield name, p\n\n def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):\n if source_params is not None:\n for tgt, src in zip(self.named_params(self), source_params):\n name_t, param_t = tgt\n # name_s, param_s = src\n # grad = param_s.grad\n # name_s, param_s = src\n grad = src\n if first_order:\n grad = to_var(grad.detach().data)\n tmp = param_t - lr_inner * grad\n self.set_param(self, name_t, tmp)\n else:\n\n for name, param in self.named_params(self):\n if not detach:\n grad = param.grad\n if first_order:\n grad = to_var(grad.detach().data)\n tmp = param - lr_inner * grad\n self.set_param(self, name, tmp)\n else:\n param = param.detach_()\n self.set_param(self, name, param)\n\n def set_param(self,curr_mod, name, param):\n if '.' in name:\n n = name.split('.')\n module_name = n[0]\n rest = '.'.join(n[1:])\n for name, mod in curr_mod.named_children():\n if module_name == name:\n self.set_param(mod, rest, param)\n break\n else:\n setattr(curr_mod, name, param)\n\n def detach_params(self):\n for name, param in self.named_params(self):\n self.set_param(self, name, param.detach())\n\n def copy(self, other, same_var=False):\n for name, param in other.named_params():\n if not same_var:\n param = to_var(param.data.clone(), requires_grad=True)\n self.set_param(name, param)\n\n\nclass MetaLinear(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.Linear(*args, **kwargs)\n\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n\n def forward(self, x):\n return F.linear(x, self.weight, self.bias)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass MetaConv2d(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.Conv2d(*args, **kwargs)\n\n self.stride = ignore.stride\n self.padding = ignore.padding\n self.dilation = ignore.dilation\n self.groups = ignore.groups\n\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n\n if ignore.bias is not None:\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n else:\n self.register_buffer('bias', None)\n\n def forward(self, x):\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass MetaConvTranspose2d(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.ConvTranspose2d(*args, **kwargs)\n\n self.stride = ignore.stride\n self.padding = ignore.padding\n self.dilation = ignore.dilation\n self.groups = ignore.groups\n\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n\n if ignore.bias is not None:\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n else:\n self.register_buffer('bias', None)\n\n def forward(self, x, output_size=None):\n output_padding = self._output_padding(x, output_size)\n return F.conv_transpose2d(x, self.weight, self.bias, self.stride, self.padding,\n output_padding, self.groups, self.dilation)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass MetaBatchNorm2d(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.BatchNorm2d(*args, **kwargs)\n\n self.num_features = ignore.num_features\n self.eps = ignore.eps\n self.momentum = ignore.momentum\n self.affine = ignore.affine\n self.track_running_stats = ignore.track_running_stats\n\n if self.affine:\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n\n if self.track_running_stats:\n self.register_buffer('running_mean', torch.zeros(self.num_features))\n self.register_buffer('running_var', torch.ones(self.num_features))\n else:\n self.register_parameter('running_mean', None)\n self.register_parameter('running_var', None)\n\n\n def forward(self, x):\n return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,\n self.training or not self.track_running_stats, self.momentum, self.eps)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass BasicBlock(MetaModule):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n MetaBatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(MetaModule):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n self.conv3 = MetaConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = MetaBatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n MetaBatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(MetaModule):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = MetaConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = MetaBatchNorm2d(64)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, num_blocks[0])\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = MetaLinear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride = 1):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.maxpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n # print('size befor avg pooling: ', out.size())\n out = F.avg_pool2d(out, out.size(2))\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out.squeeze()\n\n\ndef ResNet18(num_classes=10):\n return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes)\n\ndef ResNet34(num_classes=10):\n return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n\n\nclass PreActBlock(MetaModule):\n '''Pre-activation version of the BasicBlock.'''\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(PreActBlock, self).__init__()\n self.bn1 = MetaBatchNorm2d(in_planes)\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n out += shortcut\n return out\n\n\nclass PreActBottleneck(MetaModule):\n '''Pre-activation version of the original Bottleneck module.'''\n expansion = 4\n def __init__(self, in_planes, planes, stride=1):\n super(PreActBottleneck, self).__init__()\n self.bn1 = MetaBatchNorm2d(in_planes)\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn3 = MetaBatchNorm2d(planes)\n self.conv3 = MetaConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n out = self.conv3(F.relu(self.bn3(out)))\n out += shortcut\n return out\n\n\nclass PreActResNet(MetaModule):\n def __init__(self, block, num_blocks, num_classes=10):\n super(PreActResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = MetaConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, num_blocks[0])\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = MetaLinear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride = 1):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.maxpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n # print('size befor avg pooling: ', out.size())\n out = F.avg_pool2d(out, out.size(2))\n out2 = out.view(out.size(0), -1)\n out = self.linear(out2)\n return out.squeeze()\n\n\ndef PreActResNet18(num_classes=10):\n return PreActResNet(PreActBlock, [2,2,2,2], num_classes)\n\ndef PreActResNet34(num_classes=10):\n return PreActResNet(PreActBlock, [3,4,6,3], num_classes)\n\ndef test_2():\n net = PreActResNet34(1)\n y = net((torch.randn(2,3,224,224)))\n print(y.size())\n\n# test_2()\n\n\ndef test():\n net = ResNet18(num_classes=10)\n y = net(torch.randn(16, 1,32,32))\n print(y.size())\n\n#test()\n\n\ndef noise_matrix(x = 0.8, p0 = 0.02, p1 = 0.4):\n t00 = (1 - p0)*x\n t01 = p1*(1 - x)\n t0 = t00 + t01\n t00 = t00/t0\n t01 = t01/t0\n\n t10 = p0*x\n t11 = (1 - p1)*(1 - x)\n t1 = t10 + t11\n t10 = t10/t1\n t11 = t11/t1\n T = np.array([[t00, t01], [t10, t11]])\n if torch.cuda.is_available():\n return torch.from_numpy(T).type(torch.FloatTensor).cuda()\n return torch.from_numpy(T).type(torch.FloatTensor)\n\n\ndef get_mean_and_std_batch(dataset, bs = 4096):\n pop_mean = []\n pop_std0 = []\n pop_std1 = []\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=bs, shuffle=True, num_workers=8)\n for i, data in enumerate(dataloader, 0):\n # shape (batch_size, 3, height, width)\n print('{}/{}'.format(i, len(dataloader)))\n sys.stdout.flush()\n numpy_image, _ = data\n numpy_image = numpy_image.numpy()\n\n # shape (3,)\n batch_mean = np.mean(numpy_image, axis=(0, 2, 3))\n batch_std0 = np.std(numpy_image, axis=(0, 2, 3))\n batch_std1 = np.std(numpy_image, axis=(0, 2, 3), ddof=1)\n print(batch_mean, batch_std0, batch_std1)\n\n pop_mean.append(batch_mean)\n pop_std0.append(batch_std0)\n pop_std1.append(batch_std1)\n\n # shape (num_iterations, 3) -> (mean across 0th axis) -> shape (3,)\n pop_mean = np.array(pop_mean).mean(axis=0)\n pop_std0 = np.array(pop_std0).mean(axis=0)\n pop_std1 = np.array(pop_std1).mean(axis=0)\n print('mean/std0/std1:', pop_mean, pop_std0, pop_std1)\n return pop_mean, pop_std0, pop_std1\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for ind, (inputs, targets) in enumerate(dataloader):\n if ind % 100 == 0:\n print('Processing {}/{}'.format(ind, len(dataloader)))\n\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n\n print('mean/std: ', mean, std)\n return mean, std\n","sub_path":"training_codes/model_paad.py","file_name":"model_paad.py","file_ext":"py","file_size_in_byte":16769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"639193451","text":"# python argtest.py --fin datain --fout dataout\n\n\nimport sys\n\ndef genSortKey(col,up):\n def key(x):\n if up == '+':\n return x[col]\n elif up == '-':\n return -x[col]\n return key\n\ndef main():\n FIN=\"\"\n FOUT=\"\"\n COL=\"\"\n DIR=\"\"\n nargs=len(sys.argv)\n skip=False\n for i in range(1,nargs):\n if not skip:\n arg=sys.argv[i]\n print(\"INFO: processing\",arg)\n if arg == \"--fin\":\n if i != nargs-1:\n FIN=sys.argv[i+1]\n skip=True\n elif arg == \"--fout\":\n if i != nargs-1:\n FOUT=sys.argv[i+1]\n skip=True\n elif arg == \"--col\":\n if i != nargs-1:\n COL=sys.argv[i+1]\n skip=True\n elif arg == \"--dir\":\n if i != nargs-1:\n DIR=sys.argv[i+1]\n skip=True\n else:\n print(\"ERR: unknown arg:\",arg)\n else:\n skip=False\n\n print(\"INFO: FIN\",FIN)\n print(\"INFO: FOUT\",FOUT)\n print(\"INFO: COL\",COL)\n print(\"INFO: DIR\",DIR)\n accum = []\n try:\n f=open(FIN,'r')\n except:\n print(\"ERR: file\",FIN,\"does not exist or cannot be opened\")\n return False\n try:\n g=open(FOUT,'w')\n except:\n print(\"ERR: file\",FOUT,\"could not be created\")\n try:\n COL = int(COL)\n except:\n print(\"ERR: input\",COL,\"is a non-integer\")\n return False\n if ((DIR != \"+\") and (DIR != \"-\")):\n print(\"ERR: dir\",DIR,\"is invalid\")\n return False\n sortKey = genSortKey(COL,DIR)\n\n #try:\n # f=open(FIN,'r')\n #except:\n # print(\"ERR: file\",FIN,\"does not exist or cannot be opened\")\n # return False\n lines = f.readlines()\n try:\n for line in lines:\n j = line.split('\\n')[0]\n k = j.split(',')\n r = []\n for i in k:\n r += [float(i)]\n accum += [r]\n except:\n print(\"ERR: non-numeric values in\",FIN)\n return False\n #try:\n # g = open(FOUT,'w')\n #except:\n # print(\"ERR: file\",FOUT,\"could not be created\")\n # return False\n for i in accum:\n if COL > len(i)-1:\n print(\"ERR: --col\",COL,\"out of range\")\n g.write(\"\")\n return False\n \n sortedList = sorted(accum,key=sortKey)\n csv = []\n for row in range(0,len(sortedList),1):\n csv += [\"\"]\n for i in sortedList[row]:\n csv[row] += str(i) + \",\"\n csv[row]=csv[row][0:len(csv[row])-1]\n csv[row] += \"\\n\"\n for i in csv:\n g.write(str(i))\n return True\n\nmain()\n","sub_path":"Python/Bubble Sort/sortCSV.py","file_name":"sortCSV.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"42313584","text":"#!C:\\Users\\60067527\\Anaconda3\\envs\\py36\n#-*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport os, io\nimport logging\nimport re\n\nimport tensorflow as tf\n\nfrom six import b\nimport numpy as np\n\n\n\nSCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))\nDEFAULT_LABEL_FILE = os.path.join(SCRIPT_PATH,\n #'../labels/bank_labelsSW.txt')\n '../labels/bank_labels.txt')\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef generate(annotations_path, output_path, log_step=5000,\n force_uppercase=True, save_filename=False):\n\n logging.info('Building a dataset from %s.', annotations_path)\n logging.info('Output file: %s', output_path)\n\n writer = tf.python_io.TFRecordWriter(output_path)\n longest_label = ''\n idx = 0\n\n\t\t\n with open(annotations_path, 'r', encoding='utf-8') as annotations:\n word=[]\t \n \n for idx, line in enumerate(annotations):\n line = line.rstrip('\\n')\n\n # Split the line on the first whitespace character and allow empty values for the label\n # NOTE: this does not allow whitespace in image paths\n line_match = re.match(r'(\\S+)\\s(.*)', line)\n #print('line ', line)\t\t\t\n if line_match is None:\n logging.error('missing filename or label, ignoring line %i: %s', idx+1, line)\n continue\n (img_path, label) = line_match.groups()\n #print(img_path, label)\t\t\t\n\n with open(img_path, 'rb') as img_file:\n img = img_file.read()\n\n# if force_uppercase:\n# label = label.upper()\n \n try:\n word= convert_lex(label)\n \n except IOError:\n pass # ignore error images\t\t\n\n if len(label) > len(longest_label):\n longest_label = label\n\t\t\t\t\n ''' \n feature = {}\n feature['image'] = _bytes_feature(img)\n feature['label'] = _bytes_feature(b(label))\n '''\n label = word\n label=''.join(map(str,label))\n\t\t\t\n feature = {}\n feature['image'] = _bytes_feature(img)\n feature['label'] = _bytes_feature(b(label))\n \n \t\t\t\n if save_filename:\n feature['comment'] = _bytes_feature(b(img_path))\n\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n writer.write(example.SerializeToString())\n\n if idx % log_step == 0:\n logging.info('Processed %s pairs.', idx+1)\n\t\t\t\t\n\n\t\t\t\t\n if idx:\t\t\t\n logging.info('Dataset is ready: %i pairs.', idx+1)\n logging.info('Longest label (%i): %s', len(longest_label), longest_label)\n\n writer.close()\n\ndef convert_lex( lex):\n\n #if sys.version_info >= (3,):\n # lex = lex.decode('utf-8')\n #lex = lex.decode('iso-8859-1')\n \n #assert len(lex) < self.bucket_specs[-1][1]\n\t\t#return np.array(\n # [self.GO_ID] + [self.CHARMAP.index(char) for char in lex] + [self.EOS_ID],\n # dtype=np.int32)\n\n GO_ID = 1\n EOS_ID = 2\n CHR_BR = 3\n\t\n label_file = DEFAULT_LABEL_FILE\n with io.open(label_file, 'r', encoding='utf-8') as f:\n labels = f.read().splitlines()\n #print(labels)\n \t\n l_id=[] \n k=3\n s=\"\"\t\n n=0\n for i, l in enumerate(labels):\n n=i+k\n s=str(n)\t\t\t\t\n #print('i l k n s ', i , l, k, n, s)\n while ('1' in s) or('2' in s) or ('3' in s):\t\t\t\t\n k+=1\n n=i+k\n s=str(n)\t\t\t\t\t\t\t\t\t\t\t\n #print('while i l n k s: ', i, l , k, n, s)\n l_id.append(n)\t\n #print('i l k n s l_id', i , l, k, n, s, l_id)\t\t\n\n label_list=list(zip( (j for j in range(0,i+1)),l_id, labels)) \n #print('label_list, ' , label_list)\n\t\n lex_new=[] \n j=0\n for c in lex:\n #print('c ord(c) lex', c, ord(c), lex)\n for j, l_id, label in label_list:\t\t\n #for i, l in enumerate(labels):\n #print('c j l_id label', c, j , l_id, label)\t\n if c == label:\t\t\t\n lex_new.append(l_id)\n lex_new.append(3)\t\t\t\t \n \n \t\n return lex_new\n ''' \n return np.array(\n #[i for i in lex_new],\n [i for i in lex_new],\t \n #[GO_ID] + [i for i in lex_new] + [EOS_ID],\t \n #[GO_ID] + [EOS_ID],\t \t \n dtype=np.int32)\n\t''' ","sub_path":"aocr36/util/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"159180461","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom statistics import *\nimport warnings\nwarnings.filterwarnings('ignore')\nd=pd.read_csv('E:\\\\cartrue.csv')\n\n\n# In[2]:\n\n\nd1 = d.drop(['Unnamed: 0', 'Description','ExteriorColor','InteriorColor','FuelType','CabType','BedLength','City'], axis = 1)\n\n\n# In[3]:\n\n\nd1['Price']=d1['Price'].str.replace(\"$\",\"\")\nd1['Price']=d1['Price'].str.replace(\",\",\"\")\nd1['Miles']=d1['Miles'].str.replace(\",\",\"\")\n\n\n# In[4]:\n\n\naccident =[]\nowner=[]\nusetype=[]\nfor q in range(0,9993):\n con = d['Condition'][q].split(\",\")\n if len(con)==5:\n can1 = con[0].replace(\"'\",\"\")\n acc = can1.replace(\"[\",\"\")\n accident.append(acc)\n own = con[2].replace(\" '\",\"\")\n owner.append(own)\n can2 = con[4].replace(\" '\",\"\")\n can3 = can2.replace(\"'\",\"\")\n ust = can3.replace(\"]\",\"\")\n usetype.append(ust)\n else:\n can1 = con[0].replace(\"'\",\"\")\n acc = can1.replace(\"[\",\"\")\n accident.append(acc)\n own = None\n owner.append(own)\n can2 = con[2].replace(\" '\",\"\")\n can3 = can2.replace(\"'\",\"\")\n ust = can3.replace(\"]\",\"\")\n usetype.append(ust)\n\n\nd1['Accidents']=accident\nd1['NoOfOwners']=owner\nd1['UseType']=usetype\n\n\n# In[5]:\n\n\nd1 = d1.drop(['Condition'], axis = 1)\nd2=d1\n\n\n# In[6]:\n\n\nfor v in range(0,9993):\n if d1['MPG'][v]=='6.2L V-8 Gas' or d1['MPG'][v]=='1.5L Inline-4 Plug-In Hybrid':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='4.3L V-6 Gas' or d1['MPG'][v]=='6.7L V-8 Diesel Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='6.0L V-8 Gas' or d1['MPG'][v]=='2.0L Inline-4 Hybrid Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='2.0L Inline-4 Plug-In Hybrid' or d1['MPG'][v]=='6.4L V-8 Gas':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='L - Hydrogen' or d1['MPG'][v]=='6.6L V-8 Diesel Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='2.0L Inline-4 Plug-In Hybrid Turbocharged' or d1['MPG'][v]=='6.7L V-6 Diesel Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='2.1L Inline-4 Diesel Turbocharged' or d1['MPG'][v]=='1.4L Inline-4 Plug-In Hybrid':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='5.7L V-8 Gas' or d1['MPG'][v]=='3.0L V-6 Plug-In Hybrid Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='3.0L V-6 Diesel Turbocharged' or d1['MPG'][v]=='6.8L V-10 Gas':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='7.2L V-8 Gas':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n\n\n# In[7]:\n\n\nmpg= d2['MPG'].str.split(\"/\", expand=True)\nd2['MPG_cty']=mpg[0]\nd2['MPG_hwy']=mpg[1]\nd2 = d2.drop(['MPG'], axis = 1)\nd2['MPG_cty']=d1['MPG_cty'].str.replace(\" cty\",\"\")\nd2['MPG_hwy']=d1['MPG_hwy'].str.replace(\" hwy\",\"\")\n\n\n# In[8]:\n\n\neng = d2['Engine'].str.split(\"L\", expand=True)\nde = eng[1]\ngas = de.str.split(\"Gas\", expand=True)\nfor e in range(0,9993):\n if eng[0][e]=='':\n eng[0][e]='0.5'\n\nd2['Engine_L']=eng[0]\nd2['Engine_Gas']=gas[0]\nd2=d2.drop(['Engine'], axis = 1)\n\n\n# In[9]:\n\n\nfor t in range(0,9993):\n if d1['Transmission'][t]=='Crew Cab' or d1['Transmission'][t]=='Standard':\n d2['Transmission'][t]=d['FuelType'][t]\n elif d1['Transmission'][t]=='Extended Cab' or d1['Transmission'][t]=='Regular Cab':\n d2['Transmission'][t]=d['FuelType'][t]\n\n\n# In[10]:\n\n\ntype(d2['MPG_cty'][0])\n\n\n# In[11]:\n\n\nd2['CarBrand']=d2['CarBrand'].str.lower()\n#d2['City']=d2['City'].str.lower()\nd2['State']=d2['State'].str.lower()\nd2['ExteColor']=d2['ExteColor'].str.lower()\nd2['InterColor']=d2['InterColor'].str.lower()\nd2['style']=d2['style'].str.lower()\nd2['Transmission']=d2['Transmission'].str.lower()\nd2['UseType']=d2['UseType'].str.lower()\nd2['Engine_Gas']=d2['Engine_Gas'].str.lower()\nd2['Model']=d2['Model'].str.lower()\nd3=d2\n\n\n# In[12]:\n\n\nd2.replace([None],np.nan,inplace=True)\n\n\n# In[13]:\n\n\nTransmission_map = {'automatic':1,\n 'manual':0,\n}\n\nNoOfOwners_map = {'1 Owner':8,\n '2 Owners':7,\n '3 Owners':6,\n '4 Owners':5,\n '5 Owners':4,\n '6 Owners':3,\n '7 Owners':2,\n '8 Owners':1,\n '9 Owners':0,\n}\n\nd3['Transmission']=d3.Transmission.map(Transmission_map)\nd3['NoOfOwners']=d3.NoOfOwners.map(NoOfOwners_map)\n\n\n# In[14]:\n\n\n# KNN_imputation\nfrom sklearn.impute import KNNImputer\nimputer = KNNImputer(n_neighbors=3)\nd3_imputed = imputer.fit_transform(d3[['Transmission', 'NoOfOwners', 'MPG_cty', 'MPG_hwy']])\nd3_imputed\n\n\n# In[15]:\n\n\nd3_imputed.tolist()\nTran_imp=[]\nNoOf_imp=[]\nMPG_imp=[]\nMPGimp=[]\nfor i in range(0,9993):\n Tran_imp.append(round(d3_imputed[i][0]))\n NoOf_imp.append(round(d3_imputed[i][1]))\n MPG_imp.append(round(d3_imputed[i][2]))\n MPGimp.append(round(d3_imputed[i][3]))\nd3['Transmission']=Tran_imp\nd3['NoOfOwners']=NoOf_imp\nd3['MPG_cty']=MPG_imp\nd3['MPG_hwy']=MPGimp\n\nd3['fueleconomy'] =round((0.55 * d3['MPG_cty']) + (0.45 * d3['MPG_hwy']),2)\nd3 = d3.drop(['MPG_cty','MPG_hwy'], axis = 1)\n\n\n# import seaborn as sns\n# import matplotlib.pyplot as plt\n# sns.boxplot(d3['Miles'])\n# plt.title('Boxplot')\n# plt.show()\n# \n# import seaborn as sns\n# import matplotlib.pyplot as plt\n# sns.boxplot(d3['fueleconomy'])\n# plt.title('Boxplot')\n# plt.show()\n\n# In[16]:\n\n\nd3['Price']=d3['Price'].astype('int64')\nd3['Miles']=d3['Miles'].astype('int64')\nd3['Year']=d3['Year'].astype('string')\nd3['NoOfOwners']=d3['NoOfOwners'].astype('string')\n#d3['MPG_cty']=d3['MPG_cty'].astype('int')\n#d3['MPG_hwy']=d3['MPG_hwy'].astype('int')\nd3['Engine_L']=d3['Engine_L'].astype('string')\nd3['Transmission']=d3['Transmission'].astype('string')\n\nd3['Year']=d3['Year'].astype('object')\nd3['Engine_L']=d3['Engine_L'].astype('object')\nd3['NoOfOwners']=d3['NoOfOwners'].astype('object')\nd3['Transmission']=d3['Transmission'].astype('object')\n#d3['MPG_cty']=d3['MPG_cty'].astype('object')\n#d3['MPG_hwy']=d3['MPG_hwy'].astype('object')\n#d3.isnull().sum()\n\n\n# In[17]:\n\n\nd3.columns\n\n\n# In[ ]:\n\n\n\n\n\n# from sklearn.preprocessing import MinMaxScaler\n# from sklearn.preprocessing import StandardScaler\n# \n# scaler = MinMaxScaler()\n# num_vars = ['Miles']\n# d3[num_vars] = scaler.fit_transform(d3[num_vars])\n\n# In[18]:\n\n\ntype(d3.fueleconomy[0])\n\n\n# In[19]:\n\n\nfrom sklearn.model_selection import train_test_split\nnp.random.seed(0)\ndf_train, df_test = train_test_split(d3, train_size = 0.8, test_size = 0.2, random_state = 100)\n\n\n# In[20]:\n\n\ny_train = df_train.pop('Price')\nX_train = df_train\ny_test = df_test.pop('Price')\nX_test = df_test\n\n\n# In[21]:\n\n\ndf_train.shape\n\n\n# import seaborn as sns\n# import matplotlib.pyplot as plt\n# sns.boxplot(X_train['Miles'])\n# plt.title('Boxplot')\n# plt.show()\n# \n# \n# sns.boxplot(X_train['fueleconomy'])\n# plt.title('Boxplot')\n# plt.show()\n\n# In[22]:\n\n\nfrom feature_engine.outliers import Winsorizer\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfor i in X_train:\n if X_train[i].dtype==\"object\":\n continue\n else:\n windsoriser = Winsorizer(capping_method='gaussian',tail='both',fold=1.5,variables=i)\n X_train[i]= windsoriser.fit_transform(X_train[[i]])\n\n # we can inspect the minimum caps and maximum caps\n windsoriser.right_tail_caps_,windsoriser.left_tail_caps_\n\n # lets see boxplot\n sns.boxplot(X_train[i])\n plt.title('Boxplot')\n plt.show()\n\n\n# In[23]:\n\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import FunctionTransformer\n\nscaler = FunctionTransformer(np.log2, validate = True)\nnum_vars = ['Miles']\nX_train[num_vars] = scaler.fit_transform(X_train[num_vars])\nX_test[num_vars] = scaler.fit_transform(X_test[num_vars])\n\n\n# In[24]:\n\n\nX_train.Miles[0]\n\n\n# In[42]:\n\n\ncatego=['CarBrand', 'Model','Year','State','ExteColor','InterColor','style', 'DriveType', 'Accidents', 'UseType', 'Engine_Gas']\nfrom catboost import CatBoostRegressor\n\ncatboo = CatBoostRegressor(iterations=99,\n random_state = 2021, od_type = 'Iter',\n eval_metric=\"R2\",learning_rate=0.085,depth=16,l2_leaf_reg=5,bagging_temperature=1\n ,border_count=255,grow_policy='Lossguide',max_leaves=500)\ncatboo.fit(X_train, y_train,cat_features=catego,eval_set=(X_test, y_test),plot=True)\n\n\n# In[43]:\n\n\nfrom sklearn.metrics import r2_score\nx_pred = catboo.predict(X_train)\nr2_score(y_train,x_pred)\n\n\n# In[44]:\n\n\ny_pred = catboo.predict(X_test)\nr2_score(y_test,y_pred)\n\n\n# In[45]:\n\n\nparams = {'depth':[3,1,2,6,4,5,7,8,9,10],\n 'iterations':[250,100,500],\n 'learning_rate':[0.03,0.001,0.01,0.1,0.2,0.3], \n 'l2_leaf_reg':[3,1,5,10,100],\n 'border_count':[32,5,10,20,50],\n 'thread_count':[4]}\n\n\n# grid_cat = GridSearchCV(estimator = catboo, param_grid = params, scoring=\"neg_mean_squared_error\", cv = 3, verbose = 2)\n# grid_cat.fit(X_train, y_train, cat_features=catego,eval_set=(X_test, y_test),plot=True)\n\n# grid_cat.best_estimator_\n\n# In[46]:\n\n\nimport pickle\nfilename = 'prediction'\npickle.dump(catboo,open(filename,'wb'))\n\n\n# In[47]:\n\n\n(0.55*20)+(0.45*26)\n\n\n# In[51]:\n\n\nout = catboo.predict(np.array([['toyota', 'highlander', '2019', 10.3000, 'tx', 'blue',\n 'black', 'suv', 'FWD', 1, 'No accidents',\n 8, 'personal use', '3.0', 'inline-4', 22.7]]))\n\n\n# In[52]:\n\n\nout[0]\n\n\n# In[33]:\n\n\npd.DataFrame()\nd3['Miles'] = scaler.fit_transform(d3[['Miles']])\n\n\n# In[34]:\n\n\nd3['Miles'][0]\n\n\n# In[35]:\n\n\nplt.scatter(y_pred,y_test,color=\"blue\")\nplt.plot(x_pred,y_train,color=\"red\")\nx_pred = x_pred.reshape(-1,1)\n\n\n# In[ ]:\n\n\n\n\n\n# In[36]:\n\n\ntype('Miles')\n\n\n# In[37]:\n\n\nmodel = pickle.load(open('pricepred','rb'))\n\n\n# In[ ]:\n\n\nout1 = model.predict(np.array([['volvo', 'xc60', '2018', 40670, 'tx', 'white',\n 'black', 'suv', 'AWD', '1', 'No accidents',\n '7', 'personal use', '2.0', 'inline-4', '23.700000']]))\n\n\n# In[ ]:\n\n\nout1\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"car price prediction.py","file_name":"car price prediction.py","file_ext":"py","file_size_in_byte":10674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"114640442","text":"import random\nimport os\nimport sys\nimport time\n\nclass Parameters:# Static parameters class\n purple = '\\033[95m'\n blue = '\\033[94m'\n cyan = '\\033[96m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n red = '\\033[91m'\n white = '\\033[0m'\n bold = '\\033[1m'\n reset = '\\u001b[0m'\n underline = '\\033[4m'\n abc = {0:\"A\",1:\"B\",2:\"C\"}\n abc_to_digit = {\"A\": 0, \"B\":1, \"C\":2}\n abc_list = [\"A\",\"B\",\"C\"]\n\nclass Global: # Global calass with global functions\n loading_time = 0.01\n a_i_time_sleep = 2\n\n end_count = 0\n\n def clear_screen(self):\n os.system(\"cls || clear\")\n \n def check_input(self,text):\n while True:\n user_input = input(text)\n if user_input.lower() == \"quit\":\n quit()\n break\n return user_input\n \n def print_board(self,board):\n print(\" 1 2 3\")\n for i in range(len(board)):\n if i < 2:\n print(Parameters.abc[i] + \" \" + board[i][0].symbol + \" | \" + board[i][1].symbol + \" | \" + board[i][2].symbol + \"\\n ---+---+---\")\n else:\n print(Parameters.abc[i] + \" \" + board[i][0].symbol + \" | \" + board[i][1].symbol + \" | \" + board[i][2].symbol + \"\\n\")\n\n def loading_game(self):\n procent = 0\n print(\"Loading...\")\n while procent <= 100:\n sys.stdout.write(u\"\\u001b[1000D\" + str(procent) + \"%\")\n sys.stdout.flush()\n time.sleep(self.loading_time)\n procent += 1\n print(\"\\nCompleted\")\n time.sleep(1)\n os.system(\"cls || clear\")\n \n def change_color(self,text,color):\n return color + text + Parameters.reset\n\nclass Inteligence(Global): # Parent inteligence class for Human and AI\n symbol = \".\"\n name = \"\"\n\n def finish(self,winner,tie,board):# Method is calling when someone win or medked tie\n self.clear_screen()\n self.print_board(board)\n if winner:\n print(self.name + \" is s Winner!\")\n elif tie:\n print(\"Tie\")\n Global.end_count = 0\n\n def get_file_path(self,file_name):# Method which get file path\n file_dir = os.path.dirname(os.path.abspath(__file__))\n my_file = os.path.join(file_dir, file_name)\n return my_file\n \n def mark(self, board , row, col):# Mark a game board\n if board[row][col] == Inteligence:\n board[row][col] = self\n Global.end_count += 1\n return board\n \n def check_finish(self,board,enemy):# Check the type game-end\n winner = self.has_won(board,enemy)\n tie = False\n if Global.end_count == 9:\n tie = True\n return winner,tie\n \n def has_won(self,board,player):# Сheck for possible winnings\n check_list1 = []\n check_list2 = []\n board_len = len(board)\n for i in range(board_len):\n for j in range(board_len):\n check_list1.append(board[i][j])\n check_list2.append(board[j][i])\n if (Inteligence not in check_list1 and player not in check_list1) or (Inteligence not in check_list2 and player not in check_list2):\n return True\n check_list1,check_list2 = [],[]\n for i in range(board_len):\n for j in range(board_len):\n if i == j:\n check_list1.append(board[i][j])\n if Inteligence not in check_list1 and player not in check_list1:\n return True\n check_list1 = []\n count = 2\n for i in range(board_len):\n check_list1.append(board[i][count])\n count -= 1\n if Inteligence not in check_list1 and player not in check_list1:\n return True\n return False\n\n\nclass Human(Inteligence): # Human class\n def __init__(self, name):\n self.name = name\n pass\n def get_move(self):# Get human move\n col = None\n row = None\n while col == None or row == None or col > 3:\n while True:\n row = input(\"Player \" + self.name + \" choose row (A-C): \" ).upper()\n if row not in Parameters.abc_list:\n continue\n break\n while True:\n try:\n col = int(input(\"Player \" + self.name + \" choose column (1-3): \"))\n if col > 3:\n continue\n else:\n break\n except ValueError:\n print(\"Please write a digit!\")\n return Parameters.abc_to_digit[row], col-1\n\n def move(self, enemy, board):# Human move\n self.clear_screen()\n self.print_board(board)\n row,col = self.get_move()\n board = self.mark(board,row,col)\n winner,tie = self.check_finish(board,enemy)\n return board,winner,tie\n\nclass Artificial_Intelligence(Inteligence): # AI class\n\n lines_file_name = \"\"\n\n def __init__(self):\n self.name = \"Compukter\"\n self.lines_file_name = self.get_file_path(\"Phrases.txt\")\n\n def check_two_cell(self, board, player):# Return True and coordinates of sell if the player has marked two out of three cells in the same row\n check_list1,check_list2 = [],[]\n board_len = len(board)\n for i in range(board_len):\n for j in range(board_len):\n check_list1.append(board[i][j])\n check,col,row = self.enemy_looking_two_cell(board,player,check_list1,\"first_simple\",i)\n if check:\n return check,row,col\n check_list2.append(board[j][i])\n check,row,col = self.enemy_looking_two_cell(board,player,check_list2,\"first_simple\",i)\n if check:\n return check,row,col\n check_list1,check_list2 = [],[]\n for i in range(board_len):\n for j in range(board_len):\n if i == j:\n check_list1.append(board[i][j])\n check,row,col = self.enemy_looking_two_cell(board,player,check_list1,\"second_simple\")\n if check:\n return check,row,col\n check_list1 = []\n count = 3\n for i in range(board_len):\n count -= 1\n check_list1.append(board[i][count])\n check,row,col = self.enemy_looking_two_cell(board,player,check_list1,\"third_simple\")\n if check:\n return check,row,col\n return False,0,0\n\n def enemy_looking_two_cell(self, board, player, check_list, mode, col = None):# Help method for \"check_two_cell\"\n if check_list.count(player) == 2 and Inteligence in check_list:\n if mode == \"first_simple\":\n return True,check_list.index(Inteligence),col\n elif mode == \"second_simple\":\n return True,check_list.index(Inteligence),check_list.index(Inteligence)\n elif mode == \"third_simple\":\n col = check_list.index(Inteligence)\n if col == 0:\n col = 2\n elif col == 2:\n col = 0\n return True,check_list.index(Inteligence),col\n return False,None,None\n \n def get_ai_move(self, board, enemy):# Get AI move\n row, col = 0, 0\n check, row, col = self.check_two_cell(board, self)\n if check:\n return row, col\n check, row, col = self.check_two_cell(board,enemy)\n if check:\n return row, col\n check, row, col = self.check_coreners(enemy,board)\n if check:\n return row, col\n while True:\n row = Parameters.abc_to_digit[Parameters.abc[random.randint(0,2)]]\n col = random.randint(0,2)\n if board[row][col] != Inteligence:\n continue\n else:\n return row, col\n \n def check_coreners(self, enemy, board):\n row, col = 0, 0\n right_list = []\n left_list = []\n for i in range(len(board)):\n right_list.append(board[0][i])\n left_list.append(board[i][0])\n if i == 2:\n for j in range(1,3):\n right_list.append(board[j][i])\n left_list.append(board[i][j])\n if enemy not in left_list or enemy not in right_list:\n if board[0][0] == Inteligence:\n return True, 0,0\n elif board[2][2] == Inteligence:\n return True, 2,2\n else:\n if board[0][0] == Inteligence:\n return True, 0,0\n elif board[2][2] == Inteligence:\n return True, 2,2\n if enemy not in left_list and Inteligence in left_list:\n if board[2][0] == Inteligence:\n return True, 2,0\n elif enemy not in right_list and Inteligence in right_list:\n if board[0][2] == Inteligence:\n return True, 0, 2\n\n return False, row, col\n\n \n def move(self,enemy,board):# AI move\n self.clear_screen()\n print(self.name + \": \" + self.get_random_line())\n self.print_board(board)\n time.sleep(self.a_i_time_sleep)\n row,col = self.get_ai_move(board,enemy)\n board = self.mark(board,row,col)\n winner,tie = self.check_finish(board,enemy)\n return board,winner,tie\n\n\n def get_random_line(self):# Get random line from file\n with open(self.lines_file_name) as file_word:\n return random.choice(list(file_word))\n\n","sub_path":"Clases.py","file_name":"Clases.py","file_ext":"py","file_size_in_byte":9510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"68654362","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef kernel(x, y, sigma=1):\n k = np.exp(-(np.linalg.norm((x-y), ord=2)**2)/(2*sigma**2))\n return k\n\ndef logReg(x, theta):\n # g = 1/(1 + np.exp(-kernel(x, theta)))\n g = 1/(1 + np.exp(-np.matmul(a=x, b=theta)))\n # g = np.matmul(a=x, b=theta)\n return g\n\ndef reshapeData(x, deg=0):\n m = len(x)\n x = np.append(arr=np.ones([m, 1]), values=x, axis=1)\n for i in range(deg-1):\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 1], i+2), newshape=[m, 1]), axis=1)\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 2], i+2), newshape=[m, 1]), axis=1)\n x = np.append(arr=x, values=np.reshape(x[::, 1]*x[::, 0], newshape=[m, 1]), axis=1)\n for j in range(deg-1):\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 1], i+2)*np.power(x[::, 0], j+3), newshape=[m, 1]), axis=1)\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 0], i+2)*np.power(x[::, 1], j+3), newshape=[m, 1]), axis=1)\n return x\n\ndef gradDesc(x, y, n_iter=1000, alpha=0.01, regParam=1):\n m = len(x)\n x = reshapeData(x)\n theta = [np.ones([int(np.size(x)/np.size(y))])*0]\n regParam = np.ones([int(np.size(x)/np.size(y))])*regParam\n for i in range(n_iter):\n theta.append(theta[i] - alpha/m*(np.matmul(a=x.T, b=(logReg(x, theta[i]) - y)) - regParam))\n return theta[-1]\n\ndef classify(x, theta):\n x = reshapeData(x)\n pred = np.round(logReg(x, theta), 0)\n return pred\n\ndef getScore(x, theta):\n x = reshapeData(x)\n score = logReg(x, theta)\n return score\n\n\nif __name__ == '__main__':\n n0 = 10\n n1 = 10\n ntest = 100\n dim = 2\n x0 = np.random.randn(n0, dim) + 1 * np.ones([n0, dim])\n x1 = np.random.randn(n1, dim) - 1 * np.ones([n1, dim])\n x = np.concatenate([x0, x1])\n y = np.concatenate([np.zeros(n0), np.ones(n1)])\n # y[np.sqrt(x[::, 0]**2 + x[::, 1]**2)<1] = 0\n # y[np.sqrt(x[::, 0]**2 + x[::, 1]**2)>=1] = 1\n test = np.random.randn(ntest, dim)\n\n theta = gradDesc(x, y)\n pred = classify(x=test, theta=theta)\n \"\"\"\n from sklearn.linear_model import LogisticRegression\n model = LogisticRegression(C=1)\n model.fit(x, y)\n pred = model.predict(test)\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(x[y==0, 0], x[y==0, 1], c='r', zorder=2, marker='o', alpha=0.3)\n ax.scatter(x[y==1, 0], x[y==1, 1], c='b', zorder=2, marker='o', alpha=0.3)\n ax.scatter(test[pred==0, 0], test[pred==0, 1], c='r', zorder=2, marker='x')\n ax.scatter(test[pred==1, 0], test[pred==1, 1], c='b', zorder=2, marker='x')\n\n xlim = (-5, 5)\n ylim = (-5, 5)\n xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 100),\n np.linspace(ylim[0], ylim[1], 100))\n Z = getScore(np.c_[xx.ravel(), yy.ravel()], theta)\n Z = Z.reshape(xx.shape)\n ax.contour(xx, yy, Z, [0.5], colors='k')\n # Z = model._predict_proba_lr(np.c_[xx.ravel(), yy.ravel()])\n # Z = Z[::, 1].reshape(xx.shape)\n # ax.contour(xx, yy, Z, [0.5], colors='k', alpha=0.10)\n plt.contourf(xx, yy, Z, cmap='jet', alpha=0.7)\n plt.colorbar()\n\n plt.show()\n","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"451256104","text":"#!/usr/bin/env python3\n\nimport datetime\nimport unittest\nimport urllib.request\n\nimport hw4 as t\n\n\nTEST_LOG = 'ftp://shannon.usu.edu.ru/python/hw4/test.log'\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n with urllib.request.urlopen(TEST_LOG) as f:\n self.data = f.read().decode('utf-8').split('\\n')\n\n self.stat = t.make_stat()\n\n def test(self):\n for line in filter(lambda s: 'OPTION' not in s, self.data):\n self.stat.add_line(line)\n\n self.assertDictEqual(self.stat.results(), TEST)\n\n\nTEST = {\n 'FastestPage': '/img/r.png',\n 'MostActiveClient': '192.168.12.155',\n 'MostActiveClientByDay': {datetime.date(2012, 7, 8): '192.168.12.155'},\n 'MostPopularBrowser': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; '\n 'Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR '\n '3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; '\n 'Tablet PC 2.0; .NET4.0C; .NET4.0E; InfoPath.3; '\n 'MS-RTC LM 8)',\n 'MostPopularPage': '/img/ao.gif',\n 'SlowestAveragePage': '/call_centr.php',\n 'SlowestPage': '/menu-top.php'}\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"518681325","text":"import report\n\ndictionary=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/dict.txt\")\ncandidates=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/report_3_repeat_character\")\nblends=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/blends.txt\")\ncandidatesList=[]\nwordList=[]\nblendList=[]\nresultList=[]\nblendword = []\nfor token in candidates.readlines():\n token = token.rsplit()[0]\n candidatesList.append(token)\nfor word in dictionary.readlines():\n word = word.rsplit()[0]\n wordList.append(word)\n\nfor blend in blends.readlines():\n x=blend.split('\\t')\n blendword.append(x[0])\n x[2]=x[2].rsplit()[0]\n blendList.append(x)\n\njoResult=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/3GramTotal.txt\",\"w\")\njaroResult = []\nfor token in candidatesList:\n\n maxDistance = 99\n maxWord =\"\"\n similar = []\n for word in wordList:\n dis = report.NGramDistance(token, word,3)\n if dis 0:\n # print(command)\n # print(res)\n mostRecent = sorted(res, key=itemgetter(1),reverse=True)[0]\n # print(mostRecent)\n return mostRecent\n else:\n return None\n # panFrame = pd.read_sql_query(command, self.conn)\n #return lanes open, lane time, lane delay\n\n pass\n\n\n\n\n\n\n","sub_path":"sqlLiteActions.py","file_name":"sqlLiteActions.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"391333261","text":"#!/usr/bin/env python3\n\"\"\"\nfuzzable.py\n\n Binary Ninja helper plugin for fuzzable target discovery.\n\"\"\"\nimport os\n\nimport binaryninja\nimport binaryninja.log as log\nimport binaryninja.interaction as interaction\n\nfrom binaryninja.enums import SymbolType\nfrom binaryninja.plugin import BackgroundTaskThread, PluginCommand\nfrom binaryninja.settings import Settings\n\nfrom .analysis import FuzzableAnalysis\n\n# configurable settings to tune\nSettings().register_group(\"fuzzable\", \"Fuzzable\")\nSettings().register_setting(\n \"fuzzable.depth_threshold\",\n \"\"\"\n {\n \"title\" : \"Callgraph depth threshold\",\n \"description\" : \"Minimum number of levels in callgraph to be considered optimal for fuzzing.\",\n \"type\" : \"string\",\n \"default\" : \"100\"\n }\n\"\"\",\n)\n\nSettings().register_setting(\n \"fuzzable.loop_increase_score\",\n \"\"\"\n {\n \"title\" : \"Don't score natural loop presence\",\n \"description\" : \"Don't include natural loop as part of the fuzzability score\",\n \"type\" : \"boolean\",\n \"default\" : false\n }\n\"\"\",\n)\n\nSettings().register_setting(\n \"fuzzable.skip_stripped\",\n \"\"\"\n {\n \"title\" : \"Skip stripped functions for analysis\",\n \"description\" : \"Turn on if stripped functions are abundant and costly to analyze, and known to be irrelevant.\",\n \"type\" : \"boolean\",\n \"default\" : false\n }\n\"\"\",\n)\n\n\nclass WrapperTask(BackgroundTaskThread):\n def __init__(self, view):\n super(WrapperTask, self).__init__(\n \"Finding fuzzable targets in current binary view\"\n )\n self.view = view\n\n def run(self):\n funcs = self.view.functions\n log.log_info(f\"Starting target discovery against {len(funcs)} functions\")\n\n # final markdown table to be presented to user, with headers created first\n markdown_result = \"# Fuzzable Targets\\n | Function Name | Fuzzability | Coverage Depth | Has Loop? | Recursive Func? |\\n| :--- | :--- | :--- | :--- |\\n\"\n\n # append to CSV buffer if user chooses to export after analysis\n csv_out = '\"Name\", \"Stripped\", \"Interesting Name\", \"Interesting Args\", \"Depth\", \"Cycles\", \"Fuzzability\"\\n'\n\n # stores all parsed analysis objects\n parsed = []\n\n # iterate over each symbol\n for func in funcs:\n name = func.name\n symbol = func.symbol.type\n\n # ignore imported functions from other libraries, ie glibc or win32api\n if (symbol is SymbolType.ImportedFunctionSymbol) or (\n symbol is SymbolType.LibraryFunctionSymbol\n ):\n log.log_info(f\"Skipping analysis for known function {name}\")\n continue\n\n # ignore targets with patterns that denote some type of profiling instrumentation, ie stack canary\n if name.startswith(\"_\"):\n log.log_info(f\"Skipping analysis for function {name}\")\n continue\n\n # if set, ignore all stripped functions for faster analysis\n if (\"sub_\" in name) and Settings().get_bool(\"fuzzable.skip_stripped\"):\n log.log_info(f\"Skipping analysis for stripped function {name}\")\n continue\n\n # instantiate analysis of the given target\n analysis = FuzzableAnalysis(func)\n\n # if a loop is detected in the target, and it exists as part a callgraph,\n # set has_loop for that parent as well\n # TODO: cleanup and encapsulate in FuzzableAnalysis\n for prev in parsed:\n if analysis.has_loop and analysis.name in prev.visited:\n prev.has_loop = True\n\n parsed += [analysis]\n\n # sort parsed by highest fuzzability score and coverage depth\n parsed = sorted(parsed, key=lambda x: (x.fuzzability, x.depth), reverse=True)\n\n # add ranked results as rows to final markdown table and CSV if user chooses to export\n for analysis in parsed:\n markdown_result += analysis.markdown_row()\n csv_out += analysis.csv_row()\n\n # store CSV output to memory\n self.view.store_metadata(\"csv\", csv_out)\n\n # output report back to user\n self.view.show_markdown_report(\"Fuzzable targets\", markdown_result)\n\n\ndef run_fuzzable(view):\n \"\"\"Callback used to instantiate thread and start analysis\"\"\"\n task = WrapperTask(view)\n task.start()\n\n\ndef run_export_report(view):\n \"\"\"Generate a report from a previous analysis, and export as CSV\"\"\"\n log.log_info(\"Attempting to export results to CSV\")\n try:\n csv_output = view.query_metadata(\"csv\")\n except KeyError:\n interaction.show_message_box(\n \"Error\", \"Cannot export without running an analysis first.\"\n )\n return\n\n # write last analysis to filepath\n csv_file = interaction.get_save_filename_input(\"Filename to export as CSV?\", \"csv\")\n csv_file = csv_file.decode(\"utf-8\") + \".csv\"\n\n log.log_info(f\"Writing to filepath {csv_file}\")\n with open(csv_file, \"w+\") as fd:\n fd.write(csv_output)\n\n interaction.show_message_box(\"Success\", f\"Done, exported to {csv_file}\")\n\n\ndef run_harness_generation(view, func):\n \"\"\"Experimental automatic fuzzer harness generation support\"\"\"\n\n template_file = os.path.join(binaryninja.user_plugin_path(), \"fuzzable\")\n if view.view_type == \"ELF\":\n template_file += \"/templates/linux.cpp\"\n else:\n interaction.show_message_box(\n \"Error\",\n \"Experimental harness generation is only supported for ELFs at the moment\",\n )\n return\n\n # parse out template based on executable format, and start replacing\n with open(template_file, \"r\") as fd:\n template = fd.read()\n\n log.log_info(\"Replacing elements in template\")\n template = template.replace(\"{NAME}\", func.name)\n template = template.replace(\"{RET_TYPE}\", str(func.return_type))\n\n harness = interaction.get_save_filename_input(\"Filename to write to?\", \"cpp\")\n harness = csv_file.decode(\"utf-8\") + \".cpp\"\n\n log.log_info(\"Writing new template to workspace\")\n with open(harness, \"w+\") as fd:\n fd.write(template)\n\n interaction.show_message_box(\"Success\", f\"Done, wrote fuzzer harness to {harness}\")\n\n\nPluginCommand.register(\n \"Fuzzable\\\\Analyze fuzzable targets\",\n \"Identify and generate targets for fuzzing\",\n run_fuzzable,\n)\n\nPluginCommand.register(\n \"Fuzzable\\\\Export fuzzability report as CSV\",\n \"Identify and generate targets for fuzzing\",\n run_export_report,\n)\n\nPluginCommand.register_for_function(\n \"Fuzzable\\\\Generate fuzzing harness (EXPERIMENTAL, C/C++ ONLY)\",\n \"For a target function, generate a AFL/libFuzzer C++ harness\",\n run_harness_generation,\n)\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"300540905","text":"from networking_p4.services.service_drivers.default.services.abstract import AbstractService\nfrom oslo_log import log as logging\n\nLOG = logging.getLogger(__name__)\n\n\nclass UnconfigureModuleService(AbstractService):\n\n def __init__(self, rpc_client):\n super(UnconfigureModuleService, self).__init__(rpc_client)\n\n def handle(self, context):\n configuration = context.additional_context\n\n LOG.info(\"Handling ConfigureModule\")\n LOG.info(\"Config: \" + str(configuration['flow_rules']))\n for flow_rule in configuration['flow_rules']:\n table_id = flow_rule['table_id']\n table_entry = flow_rule['entry']\n self.rpc_client.ask_agent_to_delete_table_entry(self.rpc_ctx, table_id=table_id, table_entry=table_entry)\n","sub_path":"networking_p4/services/service_drivers/default/services/unconfigure_module.py","file_name":"unconfigure_module.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"641458725","text":"import torch, numpy\nimport os\nimport torch.nn as nn\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable \nfrom PIL import Image\nmodel = models.resnet34(pretrained=True)\nlayer = model._modules.get('avgpool')\nmodel.eval()\nscaler = transforms.Scale((224, 224))\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\nto_tensor = transforms.ToTensor()\n\ndef get_vector(image_name):\n\timg = Image.open(image_name).convert('RGB')\n\tt_img = Variable(normalize(to_tensor(scaler(img))).unsqueeze(0))\n\tmy_embedding = torch.zeros(1, 512, 1, 1)\n\tdef copy_data(m, i, o):\n\t\tmy_embedding.copy_(o.data)\n\th = layer.register_forward_hook(copy_data)\n\tmodel(t_img)\n\th.remove()\n\treturn my_embedding\n\nmodel = models.resnet34(pretrained=True)\nlayer = model._modules.get('avgpool')\nmodel.eval()\nscaler = transforms.Resize((224, 224))\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\nto_tensor = transforms.ToTensor()\nfor file in os.listdir('testing7/English')[21:]:\n print(file)\n if (\"json\" not in file):\n l = []\n with open('testing7/English/'+file+'/word.txt') as w:\n word = w.readline().strip().replace(' ', '_')\n print(word)\n for f in os.listdir('testing7/English/'+file):\n if (\"json\" not in f) and (\"txt\" not in f):\n try:\n x = get_vector('testing7/English/'+file+'/'+f).data.numpy()[0, :, 0, 0]\n l.append(x)\n with open('100/'+word, 'a') as f:\n numpy.savetxt(f, x.reshape(1, 512), fmt=\"%s\")\n except:\n continue\n average = numpy.zeros(512,)\n for embedding in l:\n average += embedding\n average = average / len(l)\n with open('100avg/'+word, 'a') as f:\n numpy.savetxt(f, x.reshape(1, 512), fmt=\"%s\")\n\n\n\n\t\t\n","sub_path":"extractembeddings.py","file_name":"extractembeddings.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"8857968","text":"import glob\nimport os\n\nf = open(\"data/test.txt\", \"a+\")\nf2 = open(\"data/train.txt\", \"a+\")\n\nfor files in glob.glob(\"data/test/*.jpg\"):\n #print(files)\n f.write(\"%s\\n\" % files.strip().replace(\"test\\\\\", \"images/\"))\n\nfor files in glob.glob(\"data/img/*.jpg\"):\n #print(files)\n f2.write(\"%s\\n\" % files.strip().replace(\"\\\\\", \"/\"))\n","sub_path":"x64/Release/label_generator.py","file_name":"label_generator.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"585966643","text":"import base64\r\nwith open(\"object-detection.jpg\", \"rb\") as img_file:\r\n my_string = base64.b64encode(img_file.read())\r\n#print(\"data:image/jpeg;base64,\"+str(my_string))\r\nimport requests\r\nphonenumber = \"917845671280\",\"919003366217\",\"919884915977\"\r\nbody = str(my_string,'utf-8')\r\nfor phone in phonenumber:\r\n print (phone)\r\n url = \"https://api.chat-api.com/instance241711/sendFile?token=xtomvz79mpfs1fer\"\r\n data = {\r\n \"phone\": phone,\r\n \"body\": \"data:image/jpeg;base64,\"+body,\r\n \"caption\": \"RED ALERT!! Intruder detected Please check the mail for more info\",\r\n \"filename\": \"detection.jpg\"\r\n }\r\n req = requests.post(url, json=data)\r\n print(req)","sub_path":"whatsappwithatt.py","file_name":"whatsappwithatt.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"539137888","text":"#!\"C:\\Program Files (x86)\\Ampps\\python\\python.exe\"\nprint (\"Content-type: text/html\\n\\n\")\n\nfrom functions import *\nimport os\n\nclass MyHTMLParser(HTMLParser):\n lsStartTags = list()\n lsEndTags = list()\n lsStartEndTags = list()\n lsComments = list()\n lsData = list()\n\n\n inRow=False\n inCell=False\n rows = []\n course = []\n schedule = []\n currentRow = []\n currentRowType = ''\n currentCRN=''\n colNum=0\n currentTag=''\n currentCell=''\n lineNum=0\n\n subDiv = 0\n inDiv = False\n currentDiv = ''\n currentDivNum = 0\n divs = []\n\n def handle_starttag(self, tag, attrs):\n print(\"Starttag\", tag)\n if self.inDiv:\n if tag==\"div\":\n self.subDiv+=1\n print(\"subdiv add:\", self.subDiv)\n\n self.currentTag=tag\n # self.lsStartTags.append(tag)\n if tag==\"div\":\n self.inDiv=True\n\n\n if self.inDiv:\n print(\"Attrs\", attrs)\n print(\"current div\", self.currentDiv)\n strAttrs=\"\"\n for a in attrs:\n strAttrs=a[0] + \"='\" + a[1] + \"'\"\n self.currentDiv += \"<\"+tag+\" \" + strAttrs+ \">\"\n self.currentDivNum+=1\n\n # if tag==\"tr\":\n # self.currentRow=[]\n # self.inRow=True\n # self.colNum=0\n # self.currentRowType=''\n # myPrint('IN ROW, length: ', len(self.rows))\n #\n # if self.inRow:\n # if tag==\"td\":\n # self.inCell=True\n #\n # for attr in attrs:\n # self.lsStartTags.append(tag)\n #\n # if tag == \"div class\":\n # input(\"found href....\\n\")\n\n def handle_endtag(self, tag):\n # myPrint(\"End tag :\", tag)\n\n if self.inDiv:\n self.currentDiv+= \"\"\n\n if tag==\"div\":\n if self.inDiv:\n if self.subDiv > 10:\n self.subDiv -= 1\n print(\"Subdiv minus:\", self.subDiv)\n else:\n self.divs.append(self.currentDiv)\n self.currentDiv=\"\"\n\n\n def handle_data(self, data):\n # print(\"Data:\", data)\n ignoreWhole = ['\\n', 'Click name to see CV', 'MM', 'DD', '/', '(', ')', 'P', '']\n ignoreIn = ['=', '\\n', '\\t', '/',\n 'Search']\n self.currentDiv+=data\n\n divNum=-1\n if self.inDiv:\n divNum=self.currentDivNum\n\n\n self.lsData.append((divNum, data))\n\n # myPrint(\"Data :\", data)\n if self.inCell:\n # input(\"here1:\" + data)\n self.currentCell += data.strip() #remove whitespace from begin and end of data\n # data=fixArray(data, ignoreIn, ignoreWhole)\n # self.currentRow = fixArray(self.currentRow, ignoreIn, ignoreWhole)\n # if 1 < len(data) < 20:\n # input(\"Press enter to continue...\")\n\n def handle_comment(self, data):\n # myPrint(\"Comment :\", data)\n pass\n\n\nhtmlFile = '0.html'\ndatFile = '0.dat'\n\nparser = MyHTMLParser()\nfixHTML(datFile)\n\nwith open (htmlFile) as f:\n htmlText = f.read()\n\n# website.batchValue(os.getcwd().replace('\\\\', '\\\\\\\\'))\n\nparser.feed(htmlText)\n\ndata = [x[1] for x in parser.lsData]\ndataNum = [x[0] for x in parser.lsData]\n\n\nremove = [\"\\n\"]\n\ndata = [x.replace(\"\\n\", \"\") for x in data]\ndata = fixArray(data, ['\\n', \"$\", \"@\"], [' ', '', '\\t', '\\t\\t'])\nmyPrint(data)\nmyPrint(len(data))\n# print(data)\n\n\nbool = False\n\nlookfor = ['jan', \"Jan\", \"feb\", \"Feb\", \"mar\", \"Mar\", \"apr\", \"Apr\", \"may\", \"May\", \"jun\", \"Jun\", \"jul\", \"Jul\", 'aug', \"Aug\", \"sep\", \"Sep\", 'oct', \"Oct\", 'nov', \"Nov\", \"dec\", \"Dec\", \"1/\", \"2/\", \"3/\", \"4/\", \"5/\", \"6/\", \"7/\", \"8/\", \"9/\", \"10/\", \"11/\", \"12/\"]\ndateList = []\n\n# print(\"Len Divs:\", len(parser.divs))\n\nfor counter ,text in enumerate(data):\n if (bool == False) and ((\"due\" in text) or (\"Due\" in text)):\n bool = True\n\n if bool:\n for month in lookfor: # Start point\n if month in text:\n # print(\"Count:\", counter)\n # dateList.append(text + \", \" + parser.divs[counter-10])\n dateList.append(text)\n bool = False\n\nmyPrint(dateList)\nmyPrint(len(dateList))\n\nfor i in dateList:\n print(i)\n\nprint()\nprint()\n\n\ndef getDateList():\n return dateList\n# print(parser.lsStartTags)\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"170003442","text":"import tweepy\n\nfrom bot import timetool, loggingservice, grabber\nfrom secret import keys\n\nbot_username = 'Tagesenergie-Twitterbot'\nlogfile_name = bot_username + \".log\"\n\n\ndef create_tweet():\n \"\"\"Creates the text of the tweet.\"\"\"\n\n try:\n text = \"Die Tagesenergie-Werte vom \" + timetool.get_date()\n text = text + \"\\nMagie-O-Meter: \" + grabber.get_magicvalue()\n text = text + \"\\nEnergie Impulswert: \" + grabber.get_energyimpulsvalue()\n text = text + \"\\nBewusstwerdungsindex: \" + grabber.get_consiousvalue()\n except AttributeError as ae:\n loggingservice.log(repr(ae), logfile_name)\n text = grabber.get_errortext()\n return text\n\n\ndef tweet(text):\n \"\"\"Send out the text as a tweet.\"\"\"\n # Twitter authentication\n auth = tweepy.OAuthHandler(keys.CONSUMER_KEY, keys.CONSUMER_SECRET)\n auth.set_access_token(keys.ACCESS_TOKEN, keys.ACCESS_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n loggingservice.log(repr(e), logfile_name)\n else:\n loggingservice.log(\"Tweeted:\\n\" + text + \"\\n\", logfile_name)\n\n\nif __name__ == \"__main__\":\n tweet_text = create_tweet()\n tweet(tweet_text)\n","sub_path":"bot/botmain.py","file_name":"botmain.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"13197555","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom parse import parse\nimport sys\nimport os\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport uuid\nimport io\nimport tarfile\nimport shutil\nimport numpy as np\nfrom . import cfg\n\nfrom .util import docker_images_available, is_running_in_docker, \\\n get_docker_client\n\nclass MissingRequiredParameterError(Exception):\n\n \"\"\"Required parameter is not provided in feature function call.\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return str(self.value)\n\n\nclass MissingRequiredReturnKeyError(Exception):\n\n \"\"\"Required return value is not provided in feature definition.\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return str(self.value)\n\n\nclass myFeature(object):\n\n \"\"\"Decorator for custom-defined time series feature(s) function.\n\n Applies function wrapper that ensures required parameters and\n return values are present before executing, raising an exception if\n not.\n\n Attributes\n ----------\n requires : list\n List of names of features required for decorated function to\n execute.\n provides : list\n List of names of features generated by decorated function.\n\n \"\"\"\n\n def __init__(self, requires, provides):\n \"\"\"Instantiates object, sets args as attributes.\n\n Parameters\n ----------\n requires : list\n List of variable names required by the function.\n provides : list\n List of the key names of the returned dictionary - the\n features calculated by a particular function.\n\n \"\"\"\n self.requires = requires\n self.provides = provides\n\n def __call__(self, f):\n \"\"\"Wrap decorated function.\n\n Wrap decorated function with a check to ensure that required\n parameters (specified in decorator expression) are provided\n upon function call (raises MissingRequiredParameterError if\n not) and that all features reportedly returned (specified in\n decorator expression) are in fact returned (raises\n MissingRequiredReturnKeyError if not).\n\n Returns\n -------\n function\n The wrapped function.\n\n \"\"\"\n def wrapped_f(*args, **kwargs):\n for required_arg in self.requires:\n if required_arg not in args and required_arg not in kwargs:\n raise MissingRequiredParameterError(\n \"Required arg %s not provided in function call.\" %\n required_arg)\n result_dict = f(*args, **kwargs)\n for provided in self.provides:\n if provided not in result_dict:\n raise MissingRequiredReturnKeyError(\n \"Key %s not present in function return value.\" %\n provided)\n return result_dict\n return wrapped_f\n\n\nclass DummyFile(object):\n\n \"\"\"Used as a file object to temporarily redirect/suppress output.\"\"\"\n\n def write(self, x):\n pass\n\n\ndef parse_csv_file(fname, sep=',', skip_lines=0):\n \"\"\"Parse 2- or 3-column CSV file and return a list of its columns.\n\n Parameters\n ----------\n fname : str\n Absolute path to the CSV file.\n sep : str, optional\n Delimiter in TS data file, defaults to \",\".\n skip_lines : int, optional\n Number of leading lines to skip in file, defaults to 0.\n\n Returns\n -------\n list of list\n Two- or three-element list of lists of each of the columns. If\n `fname` is not a 2- or 3-column CSV file, returns list of three\n empty lists.\n\n \"\"\"\n with open(fname, \"r\") as f:\n ts_data = np.loadtxt(f, delimiter=\",\", skiprows=skip_lines)\n ts_data = ts_data[:, :3].tolist() # Only using T, M, E; convert to list\n for row in ts_data:\n if len(row) < 2:\n raise custom_exceptions.DataFormatError(\n \"Incomplete or improperly formatted time \"\n \"series data file provided.\")\n tme = list(map(list, zip(*ts_data))) # Need t, m, and e in separate lists\n if len(tme) == 2:\n tme.append([]) # Add empty err col\n return tme\n\n\ndef parse_for_req_prov_params(script_fpath):\n \"\"\"\n \"\"\"\n with open(script_fpath, \"r\") as f:\n all_lines = f.readlines()\n fnames_req_prov_dict = {}\n all_required_params = []\n all_provided_params = []\n for i in range(len(all_lines) - 1):\n if \"@myFeature\" in all_lines[i] and \"def \" in all_lines[i + 1]:\n reqs_provs_1 = parse(\n \"@myFeature(requires={requires}, provides={provides})\",\n all_lines[i].strip())\n func_name = parse(\n \"def {funcname}({args}):\", all_lines[i + 1].strip())\n fnames_req_prov_dict[func_name.named['funcname']] = {\n \"requires\": eval(reqs_provs_1.named[\"requires\"]),\n \"provides\": eval(reqs_provs_1.named[\"provides\"])}\n all_required_params = list(set(\n all_required_params +\n list(set(eval(reqs_provs_1.named[\"requires\"])))))\n all_provided_params = list(set(\n all_provided_params +\n list(set(eval(reqs_provs_1.named[\"provides\"])))))\n return (fnames_req_prov_dict, all_required_params, all_provided_params)\n\n\ndef listify_feats_known_dict(features_already_known):\n \"\"\"\n \"\"\"\n if isinstance(features_already_known, dict):\n return [features_already_known]\n elif isinstance(features_already_known, list):\n return features_already_known\n else:\n raise ValueError(\"custom_feature_tools.py - features_already_known\"\n \" is of an invalid type (%s).\" %\n str(type(features_already_known)))\n\n\ndef call_custom_functions(features_already_known_list, all_required_params,\n all_provided_params, fnames_req_prov_dict):\n \"\"\"\n \"\"\"\n # import the custom feature defs\n try:\n from .custom_feature_scripts import custom_feature_defs\n except ImportError:\n try:\n import custom_feature_defs\n except ImportError:\n raise\n\n # temporarily redirect stdout:\n save_stdout = sys.stdout\n sys.stdout = DummyFile()\n\n all_extracted_features_list = []\n for features_already_known in features_already_known_list:\n all_required_params_copy = [x for x in all_required_params\n if x not in features_already_known]\n for reqd_param in all_required_params_copy:\n if reqd_param not in all_provided_params:\n raise Exception((\n \"Not all of the required parameters are provided by the \"\n \"functions in this script (required parameter '%s').\") %\n str(reqd_param))\n funcs_round_1 = []\n func_queue = []\n funcnames = list(fnames_req_prov_dict.keys())\n i = 0\n func_rounds = {}\n all_extracted_features = {}\n while len(funcnames) > 0:\n func_rounds[str(i)] = []\n for funcname in funcnames:\n reqs_provs_dict = fnames_req_prov_dict[funcname]\n reqs = reqs_provs_dict['requires']\n provs = reqs_provs_dict['provides']\n if len(set(all_required_params_copy) & set(reqs)) > 0:\n func_queue.append(funcname)\n else:\n func_rounds[str(i)].append(funcname)\n all_required_params_copy = [x for x in all_required_params_copy\n if x not in provs]\n arguments = {}\n for req in reqs:\n if req in features_already_known:\n arguments[req] = features_already_known[req]\n elif req in all_extracted_features:\n arguments[req] = all_extracted_features[req]\n func_result = getattr(\n custom_feature_defs, funcname)(**arguments)\n all_extracted_features = dict(\n list(all_extracted_features.items()) +\n list(func_result.items()))\n funcnames.remove(funcname)\n i += 1\n all_extracted_features_list.append(all_extracted_features)\n # revert to original stdout\n sys.stdout = save_stdout\n return all_extracted_features_list\n\n\ndef execute_functions_in_order(\n script_fpath,\n features_already_known={\n \"t\": [1, 2, 3], \"m\": [1, 23, 2], \"e\": [0.2, 0.3, 0.2],\n \"coords\": [22, 33]},\n multiple_sources=False):\n \"\"\"Generate custom features defined in script_fpath.\n\n Parses the script (which must have function definitions with\n decorators specifying the required parameters and those which are\n provided by each function) and executes the functions defined in\n that script such that all functions whose outputs are required\n as inputs of other functions are called first, if possible,\n otherwise raises an Exception.\n\n Parameters\n ----------\n script_fpath : str\n Path to custom feature definitions script.\n features_already_known : dict\n Dictionary providing all time-series data (time (\"t\"), magnitude\n (\"m\"), error (\"e\") as keys) and any meta-features.\n Example:\n {\"t\": [1, 2, 3], \"m\": [10.32, 11.41, 11.06],\n \"e\": [0.2015,0.3134,0.2953], \"coords\": [22.55,33.01]}\n\n Returns\n -------\n dict\n Dictionary of all extracted features (key-value pairs are\n feature name and feature value respectively).\n\n \"\"\"\n # For when run inside Docker container:\n try:\n sys, os\n except NameError:\n import sys\n import os\n\n fnames_req_prov_dict, all_required_params, all_provided_params = \\\n parse_for_req_prov_params(script_fpath)\n features_already_known_list = listify_feats_known_dict(\n features_already_known)\n\n all_extracted_features_list = call_custom_functions(\n features_already_known_list, all_required_params, all_required_params,\n fnames_req_prov_dict)\n\n return all_extracted_features_list\n\n\ndef parse_tsdata_to_lists(ts_data):\n \"\"\"\n \"\"\"\n tme = []\n if isinstance(ts_data, list):\n if len(ts_data) > 0:\n if isinstance(ts_data[0], (list, tuple)):\n # ts_data already in desired format\n tme = ts_data\n elif isinstance(ts_data[0], (str, type(u''))) and \\\n \",\" in ts_data[0]:\n for el in ts_data:\n if str(el) not in [\"\\n\", \"\"]:\n tme.append(el.split(\",\"))\n else:\n raise ValueError(\"ts_data is an empty list\")\n elif isinstance(ts_data, (str, unicode)):\n all_lines = str(ts_data).strip().split(\"\\n\")\n for i in range(len(all_lines)):\n if all_lines[i].strip() == \"\":\n continue\n else:\n tme.append([x.strip()\n for x in all_lines[i].strip().split(\",\")])\n else:\n try:\n all_lines = str(ts_data).strip().split(\"\\n\")\n for i in range(len(all_lines)):\n if all_lines[i].strip() == \"\":\n continue\n else:\n tme.append([x.strip()\n for x in all_lines[i].strip().split(\",\")])\n except:\n pass\n return tme\n\n\ndef parse_tsdata_from_file(ts_datafile_path):\n \"\"\"\n \"\"\"\n with open(ts_datafile_path, \"r\") as f:\n ts_data = np.loadtxt(f, delimiter=\",\")\n ts_data = ts_data[:, :3].tolist() # Only using T, M, E; convert to list\n for row in ts_data:\n if len(row) < 2:\n raise custom_exceptions.DataFormatError(\n \"Incomplete or improperly formatted time \"\n \"series data file provided.\")\n return ts_data\n\n\ndef add_tsdata_to_feats_known_dict(features_already_known_list,\n ts_datafile_paths, ts_data_list):\n \"\"\"\n \"\"\"\n if ts_datafile_paths is None:\n ts_datafile_paths = [None] * len(features_already_known_list)\n elif ts_data_list is None:\n ts_data_list = [None] * len(features_already_known_list)\n for i in range(len(features_already_known_list)):\n if \"t\" not in features_already_known_list[i] or \\\n \"m\" not in features_already_known_list[i]:\n # Get TS data and put into features_already_known_list\n if ts_datafile_paths[i] is None and ts_data_list[i] is None:\n raise ValueError(\"No time series data provided! ts_datafile_paths \"\n \"is None and ts_data_list is None !!\")\n if ts_datafile_paths[i] is not None: # path to ts data file\n # parse ts data and put t,m(,e) into features_already_known\n tme = parse_tsdata_from_file(ts_datafile_paths[i])\n else: # ts_data passed directly\n tme = parse_tsdata_to_lists(ts_data_list[i])\n if len(tme) > 0:\n if all(len(this_tme) == 3 for this_tme in tme):\n T, M, E = list(zip(*tme))\n T = [float(el) for el in T]\n M = [float(el) for el in M]\n E = [float(el) for el in E]\n features_already_known_list[i][\"t\"] = T\n features_already_known_list[i][\"m\"] = M\n features_already_known_list[i][\"e\"] = E\n elif all(len(this_tme) == 2 for this_tme in tme):\n T, M = list(zip(*tme))\n T = [float(el) for el in T]\n M = [float(el) for el in M]\n features_already_known_list[i][\"t\"] = T\n features_already_known_list[i][\"m\"] = M\n else:\n raise Exception(\"custom_feature_tools.py - \"\n \"docker_extract_features() - not all elements \"\n \"of tme are the same length.\")\n\n\ndef make_tmp_dir():\n \"\"\"\n \"\"\"\n if os.path.exists(cfg.PROJECT_PATH_LINK):\n proj_path = cfg.PROJECT_PATH_LINK\n else:\n proj_path = cfg.PROJECT_PATH\n path_to_tmp_dir = os.path.join(proj_path, \"tmp\",\n str(uuid.uuid4())[:10])\n os.makedirs(path_to_tmp_dir)\n return path_to_tmp_dir\n\n\ndef generate_random_str():\n \"\"\"Generate random 10-character string using uuid.uuid4.\n \"\"\"\n return str(uuid.uuid4())[:10]\n\n\ndef copy_data_to_tmp_dir(path_to_tmp_dir, script_fpath,\n features_already_known_list):\n \"\"\"\n \"\"\"\n shutil.copy(script_fpath,\n os.path.join(path_to_tmp_dir, \"custom_feature_defs.py\"))\n with open(os.path.join(path_to_tmp_dir, \"features_already_known_list.pkl\"),\n \"wb\") as f:\n pickle.dump(features_already_known_list, f, protocol=2)\n # Create __init__.py file so that custom feats script can be imported\n open(os.path.join(path_to_tmp_dir, \"__init__.py\"), \"w\").close()\n return\n\n\ndef docker_copy(docker_client, container_id, path, target=\".\"):\n \"\"\"Copy file from docker container to host machine.\n\n Parameters\n ----------\n docker_client : docker.Client object\n The connected Docker client.\n container_id : str\n ID of the container to copy from.\n path : str\n Path to the file in the container.\n target : str\n Folder where to put the file.\n\n \"\"\"\n response = docker_client.copy(container_id, path)\n buffer = io.BytesIO()\n buffer.write(response.data)\n buffer.seek(0)\n tar = tarfile.open(fileobj=buffer, mode='r|')\n tar.extractall(path=target)\n\n\ndef extract_feats_in_docker_container(container_name, path_to_tmp_dir):\n \"\"\"\n \"\"\"\n tmp_data_dir = path_to_tmp_dir\n try:\n # Spin up Docker contain and extract custom feats\n # Instantiate Docker client\n client = get_docker_client()\n\n # Use symlink if one was created (in which case this is probably\n # being run in a Disco worker)\n if os.path.exists(cfg.PROJECT_PATH_LINK):\n proj_mount_path = cfg.PROJECT_PATH_LINK\n else:\n proj_mount_path = cfg.PROJECT_PATH\n # Create container\n cont_id = client.create_container(\n image=\"mltsp/base_disco\",\n command=\"python {}/run_script_in_container.py --{} --tmp_dir={}\".format(\n proj_mount_path, \"extract_custom_feats\", tmp_data_dir),\n tty=True,\n volumes=\"{}:{}\".format(\"\", proj_mount_path))[\"Id\"]\n\n # Start container\n client.start(cont_id,\n binds={proj_mount_path: {\"bind\": proj_mount_path,\n \"ro\": True}})\n # Wait for process to complete\n client.wait(cont_id)\n stdout = client.logs(container=cont_id, stdout=True)\n stderr = client.logs(container=cont_id, stderr=True)\n if str(stderr).strip() != \"\" and stderr != b'':\n print(\"\\n\\ndocker container stderr:\\n\\n\", str(stderr).strip(), \"\\n\\n\")\n # Copy pickled results data from Docker container to host\n docker_copy(client, cont_id, \"/tmp/results_list_of_dict.pkl\",\n target=path_to_tmp_dir)\n print(\"/tmp/results_list_of_dict.pkl copied to host machine.\")\n # Load pickled results data\n with open(os.path.join(path_to_tmp_dir, \"results_list_of_dict.pkl\"),\n \"rb\") as f:\n results_list_of_dict = pickle.load(f)\n return results_list_of_dict\n except:\n raise\n finally:\n # Kill and remove the container\n try:\n client.remove_container(container=cont_id, force=True)\n except UnboundLocalError:\n print(\"Error occurred in running Docker container.\")\n\n\ndef remove_tmp_files(path_to_tmp_dir):\n \"\"\"\n \"\"\"\n # Remove tmp dir\n shutil.rmtree(path_to_tmp_dir, ignore_errors=True)\n for tmp_file in (os.path.join(cfg.TMP_CUSTOM_FEATS_FOLDER,\n \"custom_feature_defs.py\"),\n os.path.join(cfg.TMP_CUSTOM_FEATS_FOLDER,\n \"custom_feature_defs.pyc\"),\n os.path.join(cfg.TMP_CUSTOM_FEATS_FOLDER,\n \"__init__.pyc\")):\n try:\n os.remove(tmp_file)\n except OSError:\n pass\n return\n\n\ndef docker_extract_features(\n script_fpath, features_already_known_list=[{}],\n ts_datafile_paths=None, ts_data_list=None):\n \"\"\"Extract custom features in a Docker container.\n\n Spins up a docker container in which custom script\n excecution/feature extraction is done inside. Resulting data are\n copied to host machine and returned as a dict.\n\n Parameters\n ----------\n script_fpath : str\n Path to script containing custom feature definitions.\n features_already_known_list : list of dict, optional\n List of dictionaries containing time series data (t,m,e) and\n any meta-features to be used in generating custom features.\n Defaults to []. NOTE: If omitted, or if \"t\" or \"m\" are not\n among contained dict keys, either (a) respective element of\n `ts_datafile_paths` or (b) `ts_data_list` (see below) MUST not\n be None, otherwise raises ValueError.\n ts_datafile_paths : list of str, optional\n List of paths to time-series CSV files. Defaults to None. NOTE:\n If None, either (a) corresponding element of\n `features_already_known_list` (see above) must contain \"t\"\n (time) and \"m\" (magnitude, or the measurement at each time)\n among its keys, OR (b) `ts_data_list` (see below) must be\n provided, otherwise raises ValueError.\n ts_data_list : list of list OR str, optional\n List of either (a) list of lists/tuples each containing t,m(,e)\n for each epoch, or (b) string containing equivalent comma-\n separated lines, each line being separated by a newline\n character (\"\\n\"). Defaults to None. NOTE: If None, either\n `ts_datafile_paths` must not be None or \"t\" (time) and \"m\"\n (magnitude/measurement) must be among the keys of\n respective element of `features_already_known_list` (see\n above), otherwise raisesValueError.\n\n Returns\n -------\n list of dict\n List of dictionaries of all generated features.\n\n \"\"\"\n if isinstance(features_already_known_list, dict):\n features_already_known_list = [features_already_known_list]\n add_tsdata_to_feats_known_dict(features_already_known_list,\n ts_datafile_paths, ts_data_list)\n container_name = generate_random_str()\n path_to_tmp_dir = make_tmp_dir()\n\n copy_data_to_tmp_dir(path_to_tmp_dir, script_fpath,\n features_already_known_list)\n\n try:\n results_list_of_dict = extract_feats_in_docker_container(\n container_name, path_to_tmp_dir)\n except:\n raise\n finally:\n remove_tmp_files(path_to_tmp_dir)\n return results_list_of_dict\n\n\ndef assemble_test_data():\n \"\"\"\n \"\"\"\n features_already_known_list = []\n fname = os.path.join(cfg.SAMPLE_DATA_PATH, \"dotastro_215153.dat\")\n t, m, e = parse_csv_file(fname)\n features_already_known_list.append(\n {\"t\": t, \"m\": m, \"e\": e, \"coords\": [0, 0]})\n features_already_known_list.append(\n {\"t\": [1, 2, 3], \"m\": [50, 51, 52], \"e\": [0.3, 0.2, 0.4],\n \"coords\": [-11, -55]})\n features_already_known_list.append(\n {\"t\": [1], \"m\": [50], \"e\": [0.3], \"coords\": 2})\n return features_already_known_list\n\n\ndef verify_new_script(script_fpath, docker_container=False):\n \"\"\"Test custom features script and return generated features.\n\n Performs test run on custom feature def script with trial time\n series data sets and returns list of dicts containing extracted\n features if successful, otherwise raises an exception.\n\n Parameters\n ----------\n script_fpath : str\n Path to custom feature definitions script.\n docker_container : bool, optional\n Boolean indicating whether function is being called from within\n a Docker container.\n\n Returns\n -------\n list of dict\n List of dictionaries of extracted features for each of the trial\n time-series data sets.\n\n \"\"\"\n features_already_known_list = assemble_test_data()\n\n all_extracted_features_list = []\n if docker_images_available():\n print(\"Extracting features inside docker container...\")\n all_extracted_features_list = docker_extract_features(\n script_fpath=script_fpath,\n features_already_known_list=features_already_known_list)\n else:\n print(\"Docker not installed - running custom features script could be \"\n \"unsafe. Skipping generation of custom features.\")\n return []\n return all_extracted_features_list\n\n\ndef list_features_provided(script_fpath):\n \"\"\"Parses script and returns a list of all features it provides.\n\n Parses decorator expression in custom feature definitions script,\n returning a list of all feature names generated by the various\n definitions in that script.\n\n Parameters\n ----------\n script_fpath : str\n Path to custom features definition script.\n\n Returns\n -------\n list of str\n List of feature names that the script will generate.\n\n \"\"\"\n with open(script_fpath, \"r\") as f:\n all_lines = f.readlines()\n fnames_req_prov_dict = {}\n all_required_params = []\n all_provided_params = []\n for i in range(len(all_lines) - 1):\n if \"@myFeature\" in all_lines[i] and \"def \" in all_lines[i + 1]:\n reqs_provs_1 = parse(\n \"@myFeature(requires={requires}, provides={provides})\",\n all_lines[i].strip())\n func_name = parse(\n \"def {funcname}({args}):\", all_lines[i + 1].strip())\n fnames_req_prov_dict[func_name.named['funcname']] = {\n \"requires\": eval(reqs_provs_1.named[\"requires\"]),\n \"provides\": eval(reqs_provs_1.named[\"provides\"])}\n all_required_params = list(set(\n all_required_params +\n list(set(eval(reqs_provs_1.named[\"requires\"])))))\n all_provided_params = list(set(\n all_provided_params +\n list(set(eval(reqs_provs_1.named[\"provides\"])))))\n return all_provided_params\n\n\ndef generate_custom_features(\n custom_script_path, path_to_csv=None, features_already_known={},\n ts_data=None):\n \"\"\"Generate custom features for provided TS data and script.\n\n Parameters\n ----------\n custom_script_path : str\n Path to custom features script.\n path_to_csv : str, optional\n Path to CSV file containing time-series data. Defaults to None.\n If None, ts_data (see below) must not be None, otherwise\n raises an Exception.\n features_already_known : dict, optional\n List of dicts containing any meta-features associated with\n provided time-series data. Defaults to [].\n ts_data : list OR tuple, optional\n List (or tuple) of lists (or tuples) containing time,\n measurement (and optionally associated error values) data.\n Defaults to None. If None, path_to_csv must not be None,\n otherwise raises an Exception.\n\n Returns\n -------\n list of dict\n List of dictionaries containing newly-generated features.\n\n \"\"\"\n if path_to_csv:\n t, m, e = parse_csv_file(path_to_csv)\n elif ts_data:\n if len(ts_data[0]) == 3:\n t, m, e = list(zip(*ts_data))\n if len(ts_data[0]) == 2:\n t, m = list(zip(*ts_data))\n elif \"t\" not in features_already_known or \"m\" not in features_already_known:\n print(\"predict_class.predict:\")\n print(\"path_to_csv:\", path_to_csv)\n print(\"ts_data:\", ts_data)\n raise Exception(\"Neither path_to_csv nor ts_data provided...\")\n if \"t\" not in features_already_known:\n features_already_known['t'] = t\n if \"m\" not in features_already_known:\n features_already_known['m'] = m\n if e and len(e) == len(m) and \"e\" not in features_already_known:\n features_already_known['e'] = e\n\n if is_running_in_docker():\n all_new_features = execute_functions_in_order(\n features_already_known=features_already_known,\n script_fpath=custom_script_path)\n else:\n if docker_images_available():\n print(\"Generating custom features inside docker container...\")\n all_new_features = docker_extract_features(\n script_fpath=custom_script_path,\n features_already_known_list=features_already_known)\n else:\n print(\"Generating custom features WITHOUT docker container...\")\n all_new_features = execute_functions_in_order(\n features_already_known=features_already_known,\n script_fpath=custom_script_path)\n\n return all_new_features\n","sub_path":"mltsp/custom_feature_tools.py","file_name":"custom_feature_tools.py","file_ext":"py","file_size_in_byte":27333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"141239718","text":"import os.path\nfrom os.path import basename\nimport math\nimport numpy as np\nimport pandas as pd\nimport logging\n\nfrom keras.models import Model\nfrom keras.layers import Input, Dense\nfrom keras import optimizers\n\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom dfpl import options\nfrom dfpl import history as ht\nfrom dfpl import settings\n\n\ndef define_ac_model(\n input_size: int = 2048,\n encoding_dim: int = 256,\n my_loss: str = \"binary_crossentropy\",\n my_lr: float = 0.001,\n my_decay: float = 0.01) -> (Model, Model):\n \"\"\"\n This function provides an autoencoder model to reduce a certain input to a compressed version.\n\n :param encoding_dim: Size of the compressed representation. Default: 85\n :param input_size: Size of the input. Default: 2048\n :param my_loss: Loss function, see Keras Loss functions for potential values. Default: binary_crossentropy\n :param my_lr:\n :param my_decay:\n :return: a tuple of autoencoder and encoder models\n \"\"\"\n\n ac_optimizer = optimizers.Adam(learning_rate=my_lr,\n decay=my_decay)\n\n # get the number of meaningful hidden layers (latent space included)\n hidden_layer_count = round(math.log2(input_size / encoding_dim))\n\n # the input placeholder\n input_vec = Input(shape=(input_size,))\n\n # 1st hidden layer, that receives weights from input layer\n # equals bottle neck layer, if hidden_layer_count==1!\n encoded = Dense(units=int(input_size / 2),\n activation='relu')(input_vec)\n\n if hidden_layer_count > 1:\n # encoding layers, incl. bottle neck\n for i in range(1, hidden_layer_count):\n factor_units = 2 ** (i + 1)\n # print(f'{factor_units}: {int(input_size / factor_units)}')\n encoded = Dense(units=int(input_size / factor_units),\n activation='relu')(encoded)\n\n # 1st decoding layer\n factor_units = 2 ** (hidden_layer_count - 1)\n decoded = Dense(units=int(input_size / factor_units),\n activation='relu')(encoded)\n\n # decoding layers\n for i in range(hidden_layer_count - 2, 0, -1):\n factor_units = 2 ** i\n # print(f'{factor_units}: {int(input_size/factor_units)}')\n decoded = Dense(units=int(input_size / factor_units),\n activation='relu')(decoded)\n\n # output layer\n # The output layer needs to predict the probability of an output which needs\n # to either 0 or 1 and hence we use sigmoid activation function.\n decoded = Dense(units=input_size,\n activation='sigmoid')(decoded)\n\n else:\n # output layer\n decoded = Dense(units=input_size,\n activation='sigmoid')(encoded)\n\n autoencoder = Model(input_vec, decoded)\n encoder = Model(input_vec, encoded)\n\n autoencoder.summary(print_fn=logging.info)\n encoder.summary(print_fn=logging.info)\n\n # We compile the autoencoder model with adam optimizer.\n # As fingerprint positions have a value of 0 or 1 we use binary_crossentropy as the loss function\n autoencoder.compile(optimizer=ac_optimizer,\n loss=my_loss)\n\n return autoencoder, encoder\n\n\ndef autoencoder_callback(checkpoint_path: str) -> list:\n \"\"\"\n Callbacks for fitting the autoencoder\n\n :param checkpoint_path: The output directory to store the checkpoint weight files\n :return: List of ModelCheckpoint and EarlyStopping class.\n \"\"\"\n\n # enable this checkpoint to restore the weights of the best performing model\n checkpoint = ModelCheckpoint(checkpoint_path,\n verbose=1,\n period=settings.ac_train_check_period,\n save_best_only=True,\n mode='min',\n save_weights_only=True)\n\n # enable early stopping if val_loss is not improving anymore\n early_stop = EarlyStopping(patience=settings.ac_train_patience,\n min_delta=settings.ac_train_min_delta,\n verbose=1,\n restore_best_weights=True)\n\n return [checkpoint, early_stop]\n\n\ndef train_full_ac(df: pd.DataFrame, opts: options.TrainOptions) -> Model:\n \"\"\"\n Train an autoencoder on the given feature matrix X. Response matrix is only used to\n split meaningfully in test and train data set.\n\n :param opts: Command line arguments as defined in options.py\n :param df: Pandas dataframe that contains the smiles/inchi data for training the autoencoder\n :return: The encoder model of the trained autoencoder\n \"\"\"\n\n # Set up the model of the AC w.r.t. the input size and the dimension of the bottle neck (z!)\n (autoencoder, encoder) = define_ac_model(input_size=opts.fpSize,\n encoding_dim=opts.encFPSize)\n\n # define output file for autoencoder and encoder weights\n if opts.ecWeightsFile == \"\":\n logging.info(\"No AC encoder weights file specified\")\n base_file_name = os.path.splitext(basename(opts.inputFile))[0]\n logging.info(f\"(auto)encoder weights will be saved in {base_file_name}.[auto]encoder.hdf5\")\n ac_weights_file = os.path.join(opts.outputDir, base_file_name + \".autoencoder.hdf5\")\n ec_weights_file = os.path.join(opts.outputDir, base_file_name + \".encoder.hdf5\")\n else:\n logging.info(f\"AC encoder will be saved in {opts.ecWeightsFile}\")\n base_file_name = os.path.splitext(basename(opts.ecWeightsFile))[0]\n ac_weights_file = os.path.join(opts.outputDir, base_file_name + \".autoencoder.hdf5\")\n ec_weights_file = os.path.join(opts.outputDir, opts.ecWeightsFile)\n\n # collect the callbacks for training\n callback_list = autoencoder_callback(checkpoint_path=ac_weights_file)\n\n # Select all fps that are valid and turn them into a numpy array\n # This step is crucial for speed!!!\n fp_matrix = np.array(df[df[\"fp\"].notnull()][\"fp\"].to_list(),\n dtype=settings.ac_fp_numpy_type,\n copy=settings.numpy_copy_values)\n logging.info(f\"Training AC on a matrix of shape {fp_matrix.shape} with type {fp_matrix.dtype}\")\n\n # split data into test and training data\n x_train, x_test = train_test_split(fp_matrix,\n test_size=0.2,\n random_state=42)\n logging.info(f\"AC train data shape {x_train.shape} with type {x_train.dtype}\")\n logging.info(f\"AC test data shape {x_test.shape} with type {x_test.dtype}\")\n\n auto_hist = autoencoder.fit(x_train, x_train,\n callbacks=callback_list,\n epochs=opts.epochs,\n batch_size=256,\n verbose=opts.verbose,\n validation_data=(x_test, x_test))\n logging.info(f\"Autoencoder weights stored in file: {ac_weights_file}\")\n\n ht.store_and_plot_history(base_file_name=os.path.join(opts.outputDir, base_file_name + \".AC\"),\n hist=auto_hist)\n\n encoder.save_weights(ec_weights_file)\n logging.info(f\"Encoder weights stored in file: {ec_weights_file}\")\n\n return encoder\n\n\ndef compress_fingerprints(dataframe: pd.DataFrame,\n encoder: Model) -> pd.DataFrame:\n \"\"\"\n Adds a column of the compressed version of the fingerprints to the original dataframe.\n\n :param dataframe: Dataframe containing a column named 'fp' with the fingerprints\n :param encoder: The trained autoencoder that is used for compressing the fingerprints\n :return: The input dataframe extended by a column containing the compressed version of the fingerprints\n \"\"\"\n logging.info(\"Adding compressed fingerprints\")\n idx = dataframe[dataframe[\"fp\"].notnull()].index\n fp_matrix = np.array(dataframe[dataframe[\"fp\"].notnull()][\"fp\"].to_list(),\n dtype=settings.ac_fp_numpy_type,\n copy=settings.numpy_copy_values)\n logging.info(f\"Using input matrix of shape {fp_matrix.shape} with type {fp_matrix.dtype}\")\n logging.info(\"Compressed fingerprints are added to input dataframe.\")\n dataframe['fpcompressed'] = pd.DataFrame({'fpcompressed': [s for s in encoder.predict(fp_matrix)]}, idx)\n\n return dataframe\n","sub_path":"dfpl/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"163223545","text":"import sys\r\n\r\nsys.path.append('./')\r\nimport os\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nos.environ['KMP_WARNINGS'] = '0'\r\nimport logging\r\n\r\nlogging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\r\n\r\nfrom src_gat_fusion2.data_loader.patient_loader_supra_hiv import PatientLoader\r\nfrom src_gat_fusion2.models.gat_fusion2 import GAT\r\nfrom src_gat_fusion2.trainers.fusion2_trainer import GraphTrainer\r\nfrom src_gat_fusion2.utils.config import get_config_from_json, update_config_by_summary, update_config_by_datasize\r\nfrom src_gat_fusion2.utils.dirs import create_dirs\r\nfrom src_gat_fusion2.utils.logger import Logger\r\nfrom src_gat_fusion2.utils.utils import get_args\r\nfrom pathlib import Path\r\nimport shutil\r\nimport pickle as pkl\r\n\r\nimport tensorflow as tf\r\n\r\ntf.compat.v1.random.set_random_seed(1234)\r\n\r\n\r\ndef main():\r\n # capture the config path from the run arguments\r\n # then process the json configuration file\r\n\r\n args = get_args()\r\n print(\"getting config from {}\".format(args.config))\r\n config, _ = get_config_from_json(args.config)\r\n config = update_config_by_summary(config) # add summary and model directory\r\n # if remove the previous results, set -d 1\r\n print(\"If delete previous checkpoints {}\".format(args.delete))\r\n if args.delete == '1':\r\n # delete existing model and summaries\r\n print('Deleting existing models and logs from:')\r\n # best_model_dir is under model dir\r\n print(config.summary_dir, config.model_dir, config.best_model_dir)\r\n path = Path(config.summary_dir)\r\n shutil.rmtree(path)\r\n path = Path(config.model_dir)\r\n shutil.rmtree(path)\r\n path = Path(config.best_model_dir)\r\n shutil.rmtree(path)\r\n\r\n # create the experiments dirs\r\n # summary dir, model dir defined in json ?\r\n create_dirs([config.summary_dir, config.model_dir, config.best_model_dir])\r\n\r\n # create your data generator to load train data\r\n\r\n print(\"Training using {}\".format(config.model_version))\r\n\r\n Model = GAT\r\n Trainer = GraphTrainer\r\n\r\n feature_path = config.exp_dir + config.ind_feature_path\r\n train_mask_path = config.exp_dir + config.train_mask_path\r\n test_mask_path = config.exp_dir + config.test_mask_path\r\n sex_adj_path = config.sex_adj_path\r\n venue_adj_path = config.venue_adj_path\r\n graph_feature_path = config.exp_dir + config.graph_feature_path\r\n psk2index_path = config.exp_dir + config.psk2index_path\r\n\r\n # 10 random realizations of train-test split and average, no valid is needed\r\n train_loader = PatientLoader(config, feature_path, sex_adj_path, venue_adj_path, train_mask_path,\r\n graph_feature_path, psk2index_path, is_train=True)\r\n train_loader.load()\r\n\r\n test_loader = PatientLoader(config, feature_path, sex_adj_path, venue_adj_path, test_mask_path, graph_feature_path,\r\n psk2index_path, is_train=False)\r\n test_loader.load()\r\n\r\n # add num_iter_per_epoch to config for trainer\r\n config = update_config_by_datasize(config, train_loader.get_datasize(),\r\n test_loader.get_datasize(),\r\n train_loader.get_feature_size())\r\n\r\n # tfconfig = tf.ConfigProto(device_count={'CPU': 0})\r\n tfconfig = tf.ConfigProto()\r\n tfconfig.gpu_options.allow_growth = True\r\n tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.4\r\n\r\n # create tensorflow session\r\n with tf.Session(config=tfconfig) as sess:\r\n # create an instance of the model you want\r\n model = Model(config)\r\n # create tensorboard logger\r\n logger = Logger(sess, config)\r\n # create trainer and pass all the previous components to it\r\n trainer = Trainer(sess, model, train_loader, test_loader, config, logger)\r\n # load model if exists\r\n # model.load(sess)\r\n # here you train your model\r\n trainer.train()\r\n\r\n # tester\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"src_gat_fusion2/mains/fusion2_main_for_hiv.py","file_name":"fusion2_main_for_hiv.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"512568131","text":"import unittest\nfrom lost_hat_login_tests import LostHatLoginTests\nfrom lost_hat_front_page_tests import LostHatFrontPageTests\n\n\ndef sanity_suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(LostHatLoginTests('test_logging_positive'))\n test_suite.addTest(unittest.makeSuite(LostHatFrontPageTests))\n return test_suite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(sanity_suite())\n","sub_path":"pt4_selenium_tests/testsuite_sanity_tests.py","file_name":"testsuite_sanity_tests.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"184159351","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019-11-07 09:40\n# @Author : Lqq/linqingqing\n# @Site : \n# @File : web_chinaz.py\n# @Software: PyCharm\n\nfrom urllib.parse import urlencode\n\nclass WebUrl:\n def __init__(self):\n # 设置请求头,模拟浏览器访问\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n }\n\n def get_url(self, keyword, page):\n data = {\n 'wd': 'site:' + keyword, # 修改关键字\n 'pn': page * 10, # 页数\n }\n # 把字典对象转化为url的请求参数\n url = 'https://www.baidu.com/s?' + urlencode(data)\n return url\n\nif __name__ == \"__main__\":\n WebUrl().get_web()","sub_path":"util/web_subdomain.py","file_name":"web_subdomain.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"178071316","text":"import inspect\nimport os\nimport scipy.io as sio\nimport subprocess\nimport xarray as xr\n\nfrom pathlib import Path\n\nimport aurora\nimport mt_metadata\n\ninit_file = inspect.getfile(aurora)\nAURORA_PATH = Path(init_file).parent.parent\nTEST_PATH = AURORA_PATH.joinpath(\"tests\")\nSANDBOX = AURORA_PATH.joinpath(\"aurora\", \"sandbox\")\nCONFIG_PATH = AURORA_PATH.joinpath(\"aurora\", \"config\")\nBAND_SETUP_PATH = CONFIG_PATH.joinpath(\"emtf_band_setup\")\nDATA_PATH = SANDBOX.joinpath(\"data\")\nDATA_PATH.mkdir(exist_ok=True, parents=True)\nFIGURES_PATH = DATA_PATH.joinpath(\"figures\")\nFIGURES_PATH.mkdir(exist_ok=True, parents=True)\nTEST_BAND_FILE = DATA_PATH.joinpath(\"bandtest.nc\")\nmt_metadata_init = inspect.getfile(mt_metadata)\nMT_METADATA_DATA = Path(mt_metadata_init).parent.parent.joinpath(\"data\")\n\n\ndef execute_subprocess(cmd, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n cmd : string\n command as it would be typed in a terminal\n kwargs\n\n Returns\n -------\n\n \"\"\"\n \"\"\"\n A wrapper for subprocess.call\n \"\"\"\n exit_status = subprocess.call([cmd], shell=True, **kwargs)\n if exit_status != 0:\n raise Exception(\"Failed to execute \\n {}\".format(cmd))\n return\n\n\ndef execute_command(cmd, **kwargs):\n \"\"\"\n Executes command in terminal from script.\n\n Parameters:\n cmd (str): command to exectute from a terminal\n kwargs: exec_dir (str): the directory from which to execute\n kwargs: no_exception: suppress output if exception\n\n Other Parameters:\n exit_status: :code:`0` is good, otherwise there is some problem\n\n .. note:: When executing :code:`rm *` this crashes if the directory we are removing\n from is empty\n\n .. note:: if you can you should probably use execute_subprocess() instead\n \"\"\"\n exec_dir = kwargs.get(\"exec_dir\", os.path.expanduser(\"~/\"))\n allow_exception = kwargs.get(\"allow_exception\", True)\n print(\"executing from {}\".format(exec_dir))\n cwd = os.getcwd()\n os.chdir(exec_dir)\n exit_status = os.system(cmd)\n if exit_status != 0:\n print(f\"exit_status of {cmd} = {exit_status}\")\n if allow_exception:\n raise Exception(f\"Failed to successfully execute \\n {cmd}\")\n os.chdir(cwd)\n\n\n# \ndef save_complex(data_array, *args, **kwargs):\n \"\"\"\n netcdf and h5 do not handle complex values. This method is a workaround.\n https://stackoverflow.com/questions/47162983/how-to-save-xarray-dataarray-with-complex128-data-to-netcdf\n Example Usage:\n band_da is an xarray\n save_complex(band_da, TEST_BAND_FILE)\n band_da = read_complex(TEST_BAND_FILE)\n\n Parameters\n ----------\n data_array\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n ds = xr.Dataset({\"real\": data_array.real, \"imag\": data_array.imag})\n return ds.to_netcdf(*args, **kwargs)\n\n\ndef read_complex(*args, **kwargs):\n ds = xr.open_dataset(*args, **kwargs)\n return ds[\"real\"] + ds[\"imag\"] * 1j\n\n\n# \n\n\ndef save_to_mat(data, variable_name, filename):\n \"\"\"\n Example Usage:\n x = X.to_array(dim=\"channel\")\n save_to_mat(x.data, \"x\", \"x.mat\")\n\n Reading into matlab or Octave:\n tmp = load(\"x.mat\");\n data = tmp.x;\n\n Parameters\n ----------\n data : numpy array\n the data to save to file. its fine if this is complex-valued.\n variable_name : string\n The name that we use to reference the variable within the struct in the matfile.\n filename : string\n The filepath to output\n\n Returns\n -------\n\n \"\"\"\n sio.savemat(filename, {variable_name: data})\n return\n","sub_path":"aurora/general_helper_functions.py","file_name":"general_helper_functions.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"303777303","text":"# -*- coding: utf-8 -*-\n# @Time : Thu Mar 7 14:52:23 2019\n# @Author : Yao Qiang\n# @Email : qiangyao1988wsu@gmail.com\n# @File : TrainSet.py\n# @Software: Spyder\n# @Pythpon Version: python3.6\n\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\n\nclass TrainSet(data.Dataset):\n '''\n Create data loader\n '''\n def __init__(self, eval=False):\n \n # load data and label\n datas = np.load('../dataset/data.npy')\n labels = np.load('../dataset/label.npy')\n \n index = np.arange(0, len(datas), 1, dtype=np.int)\n \n # set random seed to make sure everytime we get the same subset\n np.random.seed(123)\n np.random.shuffle(index)\n \n # if eval is true, get 10% of data as cross validation dataset\n if eval:\n index = index[:int(len(datas) * 0.1)]\n else:\n index = index[int(len(datas) * 0.1):]\n \n self.data = datas[index]\n self.label = labels[index]\n np.random.seed()\n\n def __getitem__(self, index):\n return torch.from_numpy(self.data[index]),torch.from_numpy(self.label[index])\n\n def __len__(self):\n return len(self.data)\n ","sub_path":"scripts/TrainSet.py","file_name":"TrainSet.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"205528741","text":"from nnf import Var,true\nfrom lib204 import Encoding\n\nfrom nnf import NNF\nfrom nnf.operators import iff\n\ndef implication(l, r):\n return l.negate() | r\n\ndef neg(f):\n return f.negate()\n\nNNF.__rshift__ = implication\nNNF.__invert__ = neg\n\ndef iff(left, right):\n return (left.negate() | right) & (right.negate() | left)\n\n# Variable Declerations of the three possible outcomes in a model of Connect Four\nBlackWin = Var(\"Black has Won the Game\")\nRedWin = Var(\"Red has Won the Game\")\nNoWin = Var(\"No one has Won the Game\")\n\n# ConnectFour Game Board Dimestions\nrowNum = 6\ncolumnNum = 7\n\n# Creating variable boards for each color peice, empty peice, and partialCount variables. \nblackBoard=[]\nredBoard=[]\nemptyBoard=[]\nblackPartialCount=[]\nredPartialCount=[]\nfor i in range(rowNum): \n blackBoard.append([])\n redBoard.append([])\n emptyBoard.append([])\n blackPartialCount.append([])\n redPartialCount.append([])\n for j in range(columnNum):\n blackBoard[i].append(Var(f\"Black({i},{j})\"))\n redBoard[i].append(Var(f\"Red({i},{j})\"))\n emptyBoard[i].append(Var(f\"Empty({i},{j})\"))\n blackPartialCount[i].append([])\n redPartialCount[i].append([])\n for k in range(rowNum * columnNum + 1):\n blackPartialCount[i][j].append(Var(f\"Black Count at({i},{j}) is {k}\"))\n redPartialCount[i][j].append(Var(f\"Red Count at({i},{j}) is {k}\"))\n\n# Creating total piece count varaiable board\ntotalCount=[] \nfor i in range(rowNum * columnNum + 1):\n totalCount.append(Var(f\"Total Black Count is {i}\"))\n\n# Creating red and black row wins varaiable boards\nblackRow=[]\nredRow=[]\nfor i in range(rowNum): \n blackRow.append([])\n redRow.append([])\n for j in range(columnNum - 3):\n blackRow[i].append(Var(f\"BlackWinningRow({i},{j})\"))\n redRow[i].append(Var(f\"RedWinningRow({i},{j})\"))\n\n\n# Creating red and black diagonal wins varaiable boards\nleftBlackDiagonal=[]\nrightBlackDiagonal=[]\nleftRedDiagonal=[]\nrightRedDiagonal=[]\nfor i in range(rowNum- 3): \n leftBlackDiagonal.append([])\n rightBlackDiagonal.append([])\n leftRedDiagonal.append([])\n rightRedDiagonal.append([])\n for j in range(columnNum - 3):\n leftBlackDiagonal[i].append(Var(f\"LeftBlackWinningDiagonal({i},{j})\"))\n rightBlackDiagonal[i].append(Var(f\"RightBlackWinningDiagonal({i},{j})\"))\n leftRedDiagonal[i].append(Var(f\"LeftRedWinningDiagonal({i},{j})\"))\n rightRedDiagonal[i].append(Var(f\"RightRedWinningDiagonal({i},{j})\"))\n\n# Creating red and black column wins varaiable boards\nblackColumn=[]\nredColumn=[]\nfor i in range(rowNum- 3): \n blackColumn.append([])\n redColumn.append([])\n for j in range(columnNum):\n blackColumn[i].append(Var(f\"BlackWinningColumn({i},{j})\"))\n redColumn[i].append(Var(f\"RedWinningColumn({i},{j})\"))\n\n# Adds/creates constriants for a row of color, boardColor \ndef rowWin(E, winRowColor, boardColor):\n for i in range(rowNum):\n for j in range(columnNum - 3):\n #Winning row and its position of either 4 red or 4 black slots within the row. \n E.add_constraint(iff(winRowColor[i][j], (boardColor[i][j] & boardColor[i][j + 1] & boardColor[i][j + 2] & boardColor[i][j + 3]))) \n\n #Checks that there is at least one possible route to play in order to win by a row unless top row\n if (i > 0):\n E.add_constraint(winRowColor[i][j] >> (emptyBoard[i-1][j] | emptyBoard[i-1][j + 1] | emptyBoard[i-1][j + 2] | emptyBoard[i-1][j + 3]))\n\n\n #Checks that only a single row channel can be a winning row channel\n special = winRowColor[i][j]\n false = ~true\n for i2 in range(rowNum):\n for j2 in range(columnNum - 3):\n if (i != i2):\n false |= winRowColor[i2][j2]\n E.add_constraint(special >> ~false)\n return E\n \n# Adds/creates constriants for a column of color, boardColor\ndef columnWin(E, winColumnColor, boardColor):\n for i in range(rowNum - 3):\n for j in range(columnNum):\n # Winning column and its position of either 4 red or 4 black slots within the column. \n E.add_constraint(iff(winColumnColor[i][j], (boardColor[i][j] & boardColor[i+1][j] & boardColor[i+2][j] & boardColor[i+3][j])))\n\n # Checks that there is a possible route to play in order to win by a column unless top row\n if (i > 0):\n E.add_constraint(winColumnColor[i][j] >> (emptyBoard[i-1][j]))\n \n #Checks that only one column can win\n special = winColumnColor[i][j]\n false = ~true\n for i2 in range(rowNum - 3):\n for j2 in range(columnNum):\n if (i != i2) | (j != j2):\n false |= winColumnColor[i2][j2]\n E.add_constraint(special >> ~false)\n return E\n\n# Adds/creates contraints that limits the position where a color's column can be, \n# if other rows or diagonals are also true for that same color. \ndef columnRules(E, winColumnColor, winRowColor, leftWinDiagonalColor, rightWinDiagonalColor):\n for i in range(rowNum - 3):\n for j in range(columnNum):\n special = winColumnColor[i][j]\n false = ~true\n for i2 in range(rowNum):\n for j2 in range(columnNum):\n if (j2 < columnNum - 3):\n if ((i != i2) | ((j - j2) >= 4)):\n false |= winRowColor[i2][j2] # Last piece of color's column must be somewhere in color's row\n if (j2 < (columnNum - 3)):\n if (i2 < rowNum - 3):\n if (((i != i2) | (j != j2)) & ((i-1 != i2) | (j-1 != j2)) & ((i-2 != i2) | (j-2 != j2)) & ((i-3 != i2) | (j-3 != j2))):\n false |= leftWinDiagonalColor[i2][j2] # Last piece of color's column must be somewhere in color's left diagonal\n\n if (((i != i2) | (j != j2 + 3)) & ((i-1 != i2) | (j+1 != j2 + 3)) & ((i-2 != i2) | (j+2 != j2 + 3)) & ((i-3 != i2) | (j+3 != j2 + 3))):\n false |= rightWinDiagonalColor[i2][j2] # Last piece of color's column must be somewhere in color's right diagonal\n E.add_constraint(special >> ~false)\n return E\n\n# Adds/creates constriants for a right facing diagonal of color, boardColor\ndef leftDiagonalWin(E, leftWinDiagonalColor, boardColor):\n for i in range(rowNum - 3):\n for j in range(columnNum - 3):\n\n #Winning diagonal going right and down.\n E.add_constraint(iff(leftWinDiagonalColor[i][j], (boardColor[i][j] & boardColor[i+1][j+1] & boardColor[i+2][j+2] & boardColor[i+3][j+3])))\n\n # Checks that there is a possible route to play in order to win by a left diagonal unless top row\n if (i > 0):\n E.add_constraint(leftWinDiagonalColor[i][j] >> (emptyBoard[i-1][j] | emptyBoard[i][j+1] | emptyBoard[i+1][j+2] | emptyBoard[i+2][j+3])) \n \n #Only one left facing diagonal channel can be a winning diagonal channel\n special = leftWinDiagonalColor[i][j]\n false = ~true\n for i2 in range(rowNum - 3):\n for j2 in range(columnNum - 3):\n if (i != i2) | (j != j2):\n if (i + 1 != i2) | (j + 1 != j2):\n if (i + 2 != i2) | (j + 2 != j2):\n if (i - 1 != i2) | (j - 1 != j2):\n if (i - 2 != i2) | (j - 2 != j2):\n false |= leftWinDiagonalColor[i2][j2]\n E.add_constraint(special >> ~false)\n return E\n\n# Adds/creates constriants for a right facing diagonal of color, boardColor\ndef rightDiagonalWin(E, rightWinDiagonalColor, boardColor):\n for i in range(rowNum - 3):\n for j in range(columnNum - 4, columnNum):\n #Winning diagonal going left and down.\n E.add_constraint(iff(rightWinDiagonalColor[i][j-3], (boardColor[i][j] & boardColor[i+1][j-1] & boardColor[i+2][j-2] & boardColor[i+3][j-3])))\n\n # Checks that there is a possible route to play in order to win by a right diagonal unless top row\n if (i > 0):\n E.add_constraint(rightWinDiagonalColor[i][j-3] >> (emptyBoard[i-1][j] | emptyBoard[i][j-1] | emptyBoard[i+1][j - 2] | emptyBoard[i+2][j - 3]))\n\n # Only one right facing diagonal channel can be a winning diagonal channel\n special = rightWinDiagonalColor[i][j-3]\n false = ~true\n for i2 in range(rowNum - 3):\n for j2 in range(columnNum - 4, columnNum):\n if (i != i2) | (j != j2):\n if (i - 1 != i2) | (j + 1 != j2):\n if (i - 2 != i2) | (j + 2 != j2):\n if (i + 1 != i2) | (j - 1 != j2):\n if (i + 2 != i2) | (j - 2 != j2):\n false |= rightWinDiagonalColor[i2][j2-3]\n E.add_constraint(special >> ~false)\n return E\n\n# Add constriants to check if all occupied position below current is occupied position\ndef gravityRule(i, j):\n f = true\n for slot in range(i + 1, rowNum):\n f &= ~emptyBoard[slot][j]\n return f\n\n# Holds constraints/rules that make up a valid connect four board.\ndef validBoard(E):\n for i in range(rowNum):\n for j in range(columnNum): \n\n # If position(i, j) is empty, then neither black or red can occupy position(i, j).\n E.add_constraint(emptyBoard[i][j] >> (~redBoard[i][j] & ~blackBoard[i][j]))\n\n # If position(i, j) is red, then neither black and empty can occupy position(i, j)\n # Calls gravity constraint\n E.add_constraint(redBoard[i][j] >> (~blackBoard[i][j] & ~emptyBoard[i][j] & gravityRule(i, j)))\n\n # If position(i, j) is black, then neither red and empty can occupy position(i, j)\n # Calls gravity constraint\n E.add_constraint(blackBoard[i][j] >> (~redBoard[i][j] & ~emptyBoard[i][j] & gravityRule(i, j)))\n\n # Here to make sure implication works properly above, exactly one has to be true.\n E.add_constraint(emptyBoard[i][j] | redBoard[i][j] | blackBoard[i][j])\n\n # General: ColorWin if and only if there is ColorRowWin, or \n # ColorColumnWin, or ColorDiagonalWin.\n E.add_constraint(iff(BlackWin, (BlackColumnWin() | BlackRowWin() | leftBlackDiagonalWin() | rightBlackDiagonalWin())))\n E.add_constraint(iff(RedWin, (RedColumnWin() | RedRowWin() | leftRedDiagonalWin() | rightRedDiagonalWin())))\n\n # General: NoWin if and only if there is notColorRowWin, and \n # notColorColumnWin, and notColorDiagonalWin.\n E.add_constraint(iff(NoWin, ((~BlackColumnWin() & ~BlackRowWin() & ~leftBlackDiagonalWin() & ~rightBlackDiagonalWin()) & (~RedColumnWin() & ~RedRowWin() & ~leftRedDiagonalWin() & ~rightRedDiagonalWin()))))\n \n #All posibilities of Connect Four Game outcome\n E.add_constraint(iff(BlackWin, (~RedWin & ~NoWin)))\n E.add_constraint(iff(RedWin, (~BlackWin & ~NoWin)))\n E.add_constraint(iff(NoWin, (~RedWin & ~BlackWin)))\n\n return E\n\n# Checks if any black row is true, return false if one is not found\ndef BlackRowWin():\n f = ~true\n for i in range(rowNum): \n for j in range(columnNum - 3):\n f |= blackRow[i][j]\n return f\n\n# Checks if any red row is true, return false if one is not found\ndef RedRowWin():\n f = ~true\n for i in range(rowNum): \n for j in range(columnNum - 3):\n f |= redRow[i][j]\n return f \n\n# Checks if any black column is true, return false if one is not found\ndef BlackColumnWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum):\n f |= blackColumn[i][j]\n return f \n\n# Checks if any red column is true, return false if one is not found\ndef RedColumnWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum):\n f |= redColumn[i][j]\n return f \n\n# Checks if any left black diagonal is true, return false if one is not found\ndef leftBlackDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= leftBlackDiagonal[i][j]\n return f\n\n# Checks if any right black diagonal is true, return false if one is not found\ndef rightBlackDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= rightBlackDiagonal[i][j]\n return f\n\n# Checks if any left red diagonal is true, return false if one is not found\ndef leftRedDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= leftRedDiagonal[i][j]\n return f \n\n# Checks if any right red diagonal is true, return false if one is not found\ndef rightRedDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= rightRedDiagonal[i][j]\n return f \n\n\n# Prints a Connect Four board using computer assigned values from .solve dictionary\ndef printBoard(dic):\n board=[]\n for i in range(rowNum): \n board.append([])\n for j in range(columnNum):\n board[i].append(\"-\")\n if dic == None:\n print(\"NonSatisfiable Board\")\n return []\n else:\n for key, value in dic.items():\n if (key[:6] == \"Black(\") and (value == True):\n xVal = int(key[-4])\n yVal = int(key[-2])\n board[xVal][yVal] = \"B\"\n elif (key[:4] == \"Red(\") and (value == True):\n xVal = int(key[-4])\n yVal = int(key[-2])\n board[xVal][yVal] = \"R\"\n elif (key == \"Black has Won the Game\") and (value == True):\n print(\"Black has Won the Game with:\")\n elif (key == \"Red has Won the Game\") and (value == True):\n print(\"Red has Won the Game with:\")\n elif (key == \"No one has Won the Game\") and (value == True):\n print(\"No one has Won the Game!\")\n for row in board:\n print(row)\n\n# Builds an example full theory of Connect Four for our setting and returns it.\ndef connectFour():\n E = Encoding()\n E = validBoard(E)\n E = rowWin(E, blackRow, blackBoard)\n E = rowWin(E, redRow, redBoard)\n E = columnWin(E, blackColumn, blackBoard)\n E = columnWin(E, redColumn, redBoard)\n E = leftDiagonalWin(E, leftBlackDiagonal, blackBoard)\n E = leftDiagonalWin(E, leftRedDiagonal, redBoard)\n E = rightDiagonalWin(E, rightBlackDiagonal, blackBoard)\n E = rightDiagonalWin(E, rightRedDiagonal, redBoard)\n E = columnRules(E, blackColumn, blackRow, leftBlackDiagonal, rightBlackDiagonal)\n E = columnRules(E, redColumn, redRow, leftRedDiagonal, rightRedDiagonal)\n E = sameCount(E, blackPartialCount, blackBoard)\n E = sameCount(E, redPartialCount, redBoard)\n return E\n\n# Counting the number of peices of a single color,\n# and making the number of black peices equal to the number of red peices on the board.\ndef sameCount(E, partialCount, boardColor):\n\n # Final partial counts should be equal to the full count\n for c in range(rowNum * columnNum + 1):\n E.add_constraint(iff(totalCount[c], partialCount[rowNum- 1][columnNum - 1][c]))\n\n # You can't have more pieces than you've already seen\n for i in range(rowNum):\n for j in range(columnNum):\n for c in range((i * 7) + j + 2,rowNum * columnNum + 1):\n E.add_constraint(~partialCount[i][j][c])\n\n # First index: only black piece or red piece could possibly be true\n E.add_constraint(iff(partialCount[0][0][0], ~boardColor[0][0]))\n E.add_constraint(iff(partialCount[0][0][1], boardColor[0][0]))\n\n #General pattern: Looks at the other color pieces to decide the current color piece.\n for x in range(1, rowNum * columnNum):\n i = x // columnNum\n j = x % columnNum\n E.add_constraint(iff(partialCount[i][j][0], partialCount[(i-1) if (j==0) else i][(columnNum-1) if (j==0) else (j-1)][0] & ~boardColor[i][j]))\n for c in range(1,x+2):\n increased = partialCount[(i-1) if (j==0) else i][(columnNum-1) if (j==0) else (j-1)][c-1] & boardColor[i][j]\n stay_same = partialCount[(i-1) if (j==0) else i][(columnNum-1) if (j==0) else (j-1)][c] & ~boardColor[i][j]\n E.add_constraint(iff(partialCount[i][j][c], increased | stay_same))\n return E\n\n# Function exploring Black wins in our model of Connect Four.\ndef numBlackWins(E):\n E.add_constraint(BlackWin)\n return E.count_solutions()\n\n# Function exploring Red wins in our model of Connect Four.\ndef numRedWins(E):\n E.add_constraint(RedWin)\n return E.count_solutions()\n\n# Function exploring No wins in our model of Connect Four.\ndef numNoWins(E):\n E.add_constraint(NoWin)\n return E.count_solutions()\n\nif __name__ == \"__main__\":\n\n E = connectFour()\n\n print(\"\\nSatisfiable: %s\" % E.is_satisfiable())\n\n # Uncomment if wanting to explore number of Black wins in our model of ConnectFour\n #print(\"# Solutions: %d\" % numBlackWins())\n\n # Uncomment if wanting to explore number of Red wins in our model of ConnectFour\n #print(\"# Solutions: %d\" % numRedWins())\n\n # Uncomment if wanting to explore number of No wins in our model of ConnectFour\n #print(\"# Solutions: %d\" % numNoWins())\n\n dic = E.solve()\n print(\" Solution: %s \\n\" % dic)\n printBoard(dic)\n \n # print(\"\\nVariable likelihoods:\")\n # print(\" %s: %.2f\" % (BlackWin, E.likelihood(BlackWin)))\n # print()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":16600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"346071114","text":"import logging\nimport json\nfrom flask import (flash,redirect,send_file,jsonify,make_response,url_for,session,abort)\nfrom ._compat import as_unicode\nfrom .filemanager import uuid_originalname\nfrom .widgets import GroupFormListWidget,ListMasterWidget\nfrom .baseviews import BaseView,BaseCRUDView,BaseFormView,expose,expose_api\nfrom .security.decorators import has_access,permission_name,has_access_api\nfrom .urltools import *\nfrom .const import FLAMSG_ERR_SEC_ACCESS_DENIED\n\nlog = logging.getLogger(__name__)\n\n\nclass IndexView(BaseView):\n route_base = ''\n default_view = 'index'\n index_template = 'appbuilder/index.html'\n\n @expose('/')\n def index(self):\n self.update_redirect()\n return self.render_template(self.index_template,appbuilder=self.appbuilder)\n\n\nclass UtilView(BaseView):\n route_base = ''\n default_view = 'back'\n\n @expose('/back')\n def back(self):\n return redirect(self.get_redirect())\n\n\nclass SimpleFormView(BaseFormView):\n @expose(\"/form\",methods=['GET'])\n @has_access\n def this_form_get(self):\n self._init_vars()\n form = self.form.refresh()\n\n self.form_get(form)\n widgets = self._get_edit_widget(form=form)\n self.update_redirect()\n return self.render_template(self.form_template,\n title = self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n @expose(\"/form\",methods=['POST'])\n @has_access\n def this_form_post(self):\n self._init_vars()\n form = self.form.refresh()\n\n if form.validate_on_submit():\n response = self.form_post(form)\n if not response:\n return redirect(self.get_redirect())\n return response\n else:\n widgets = self._get_edit_widget(form=form)\n return self.render_template(self.form_template,\n title=self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n\nclass PublicFormView(BaseFormView):\n\n @expose(\"/form\",methods=['GET'])\n def this_form_get(self):\n self._init_vars()\n form = self.form.refresh()\n self.form_get(form)\n widgets = self._get_edit_widget(form=form)\n self.update_redirect()\n return self.render_template(self.form_template,\n title=self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n @expose(\"/form\",methods=['POST'])\n def this_form_post(self):\n self._init_vars()\n form = self.form.refresh()\n if form.validate_on_submit():\n response = self.form_post(form)\n if not response:\n return redirect(self.get_redirect())\n return response\n else:\n widgets = self._get_edit_widget(form=form)\n return self.render_template(self.form_template,\n title=self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n\nclass RestCRUDView(BaseCRUDView):\n \"\"\"\n This class view exposes REST method for CRUD operations on you models\n \"\"\"\n\n def _search_form_json(self):\n pass\n\n def _get_api_urls(self,api_urls=None):\n\n view_name = self.__class__.__name__\n api_urls = api_urls or {}\n api_urls['read'] = url_for(view_name + \".api_read\")\n api_urls['delete'] = url_for(view_name + \".api_delete\",pk=\"\")\n api_urls['create'] = url_for(view_name + \".api_create\")\n api_urls['update'] = url_for(view_name + \".api_update\",pk=\"\")\n return api_urls\n\n def _get_modelview_urls(self,modelview_urls=None):\n view_name = self.__class__.__name__\n modelview_urls = modelview_urls or {}\n modelview_urls['show'] = url_for(view_name + \".show\",pk=\"\")\n modelview_urls['add'] = url_for(view_name + \".add\")\n modelview_urls['edit'] = url_for(view_name + \".edit\",pk=\"\")\n return modelview_urls\n\n @expose('/api',methods=['GET'])\n @has_access_api\n @permission_name('list')\n def api(self):\n view_name = self.__class__.__name__\n api_urls = self._get_api_urls()\n modelview_urls = self._get_modelview_urls()\n\n #Collects the CRUD permissions\n can_show = self.appbuilder.sm.has_access('can_show',view_name)\n can_edit = self.appbuilder.sm.has_access('can_edit',view_name)\n can_add = self.appbuilder.sm.has_access('can_add',view_name)\n can_delete = self.appbuilder.sm.has_access('can_delete',view_name)\n\n #Prepares the form with the search fields make it JSON serializable\n form_fields = {}\n search_filters = {}\n dict_filters = self._filters.get_search_filters()\n form = self.search_form.refresh()\n for col in self.search_columns:\n form_fields[col] = form[col]()\n search_filters[col] = [as_unicode(flt.name) for flt in dict_filters[col]]\n\n ret_json = jsonify(can_show=can_show,\n can_add=can_add,\n can_edit=can_edit,\n can_delete=can_delete,\n label_columns=self._label_columns_json(),\n list_columns=self.list_columns,\n order_columns=self.order_columns,\n page_size=self.page_size,\n modelview_name=view_name,\n api_urls=api_urls,\n search_filters=search_filters,\n search_fields=form_fields,\n modelview_urls=modelview_urls)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='read',url='/api/read',methods=['GET'])\n @has_access_api\n @permission_name('list')\n def api_read(self):\n\n #Get arguments for ordering\n if get_order_args().get(self.__class__.__name__):\n order_column,order_direction = get_order_args().get(self.__cllass__.__name__)\n else:\n order_column,order_direction = '',''\n page = get_page_args().get(self.__class__.__name__)\n page_size = get_page_size_args().get(self.__class__.__name__)\n get_filter_args(self._filters)\n joined_filters = self._filters.get_joined_filters(self._base_filters)\n count,lst = self.datamodel.query(joined_filters,order_column,order_direction,page=page,page_size=page_size)\n result = self.datamodel.get_values_json(lst,self.list_columns)\n pks = self.datamodel.get_keys(lst)\n ret_json = jsonify(label_columns=self._label_columns_json(),\n list_columns=self.list_columns,\n order_columns=self.order_columns,\n page=page,\n page_size=page_size,\n count=count,\n modelview_name=self.__class__.__name__,\n pks=pks,\n result=result)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='get',url='/api/get/',methods=['GET'])\n @has_access_api\n @permission_name('show')\n def api_get(self,pk):\n\n item = self.datamodel.get(pk,self._base_filters)\n if not item:\n abort(404)\n _item = dict()\n for col in self.show_columns:\n _item[col] = str(getattr(item,col))\n\n ret_json = jsonify(pk=pk,\n label_columns=self._label_columns_json(),\n include_columns=self.show_columns,\n modelview_name=self.__class__.__name__,\n result=_item)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='create',url='/api/create',methods=['POST'])\n @has_access_api\n @permission_name('add')\n def api_create(self):\n is_valid_form = True\n get_filter_args(self._filters)\n exclude_cols = self._filters.get_relation_cols()\n form = self.add_form.refresh()\n\n self._fill_form_exclude_cols(exclude_cols,form)\n if form.validate():\n item = self.datamodel.obj()\n form.populate_obj(item)\n self.pre_add(item)\n if self.datamodel.add(item):\n self.post_add(item)\n http_return_code = 200\n else:\n http_return_code = 500\n else:\n is_valid_form = False\n\n if is_valid_form:\n response = make_response(jsonify({'message': self.datamodel.message[0],\n 'severity': self.datamodel.message[1]}), http_return_code)\n else:\n # TODO return dict with errors\n response = make_response(jsonify({'message': 'Invalid form',\n 'severity': 'warning'}), 500)\n return response\n\n @expose_api(name='update',url='/api/update/',methods=['PUT'])\n @has_access_api\n @permission_name('edit')\n def api_update(self,pk):\n is_valid_form = True\n get_filter_args(self._filters)\n exclude_cols = self._filters.get_relation_cols()\n\n item = self.datamodel.get(pk,self._base_filters)\n if not item:\n abort(404)\n pk = self.datamodel.get_pk_value(item)\n\n form = self.edit_form.refresh(request.form)\n self._fill_form_exclude_cols(exclude_cols,form)\n form._id = pk\n if form.validate():\n form.populate_obj(item)\n self.pre_update(item)\n if self.datamodel.edit(item):\n self.post_update(item)\n http_return_code = 200\n else:\n http_return_code = 500\n else:\n is_valid_form = False\n if is_valid_form:\n response = make_response(jsonify({'message':self.datamodel.message[0],'severity':self.datamodel.message[1]}),http_return_code)\n else:\n response = make_response(jsonify({'message':'Invalid form','severity':'warning'}),500)\n return response\n\n @expose_api(name='delete',url='/api/delete/',methods=['DELETE'])\n @has_access_api\n @permission_name('delete')\n def api_delete(self,pk):\n item = self.datamodel.get(pk,self._base_filters)\n if not item:\n abort(404)\n self.pre_delete(item)\n if self.datamodel.delete(item):\n self.post_delete(item)\n http_return_code = 200\n else:\n http_return_code = 500\n response = make_response(jsonify({'message':self.datamodel.message[0],'severity':self.datamodel.message[1]}),http_return_code)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n def _get_related_column_data(self,col_name,filters):\n rel_datamodel = self.datamodel.get_related_interface(col_name)\n _filters = rel_datamodel.get_filters(rel_datamodel.get_search_columns_list())\n get_filter_args(_filters)\n if filters:\n filters = _filters.add_filter_list(filters)\n else:\n filters = _filters\n result = rel_datamodel.query(filters)[1]\n ret_list = list()\n for item in result:\n pk = rel_datamodel.get_pk_value(item)\n ret_list.append({'id':int(pk),'text':str(item)})\n ret_json = json.dumps(ret_list)\n return ret_json\n\n @expose_api(name='column_add',url='/api/column/add/',methods=['GET'])\n @has_access_api\n @permission_name('add')\n def api_column_add(self,col_name):\n\n filter_rel_fields = None\n if self.add_form_query_rel_fields:\n filter_rel_fields = self.add_form_query_rel_fields.get(col_name)\n ret_json = self._get_related_column_data(col_name,filter_rel_fields)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='column_edit',url='/api/column/edit/',methods=['GET'])\n @has_access_api\n @permission_name('edit')\n def api_column_edit(self,col_name):\n\n filter_rel_fields = None\n if self.edit_form_query_rel_fields:\n filter_rel_fields = self.edit_form_query_rel_fields\n ret_json = self._get_related_column_data(col_name,filter_rel_fields)\n response = make_response(ret_json,200)\n response.headers['Content-Type']=\"application/json\"\n return response\n\n @expose_api(name='readvalues',url='/api/readvalues',methods=['GET'])\n @has_access_api\n @permission_name('list')\n def api_readvalues(self):\n\n if get_order_args().get(self.__class__.__name__):\n order_column,order_direction = get_order_args().get(self.__class__.__name__)\n else:\n order_column,order_direction = '',''\n\n get_filter_args(self._filters)\n joined_filters = self._filters.get_joined_filters(self._base_filters)\n count,result = self.datamodel.query(joined_filters,order_column,order_direction)\n\n ret_list = list()\n for item in result:\n pk = self.datamodel.get_pk_value(item)\n ret_list.append({'id':int(pk),'text':str(item)})\n\n ret_json = json.dumps(ret_list)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n\nclass ModelView(RestCRUDView):\n\n def __init__(self,**kwargs):\n super(ModelView,self).__init__(**kwargs)\n\n def post_add_redirect(self):\n \"\"\"Override this function to control the redirect after add endpoint is called.\"\"\"\n return redirect(self.get_redirect())\n\n def post_edit_redirect(self):\n return redirect(self.get_redirect())\n\n def post_delete_redirect(self):\n return redirect(self.get_redirect())\n\n \"\"\"\n ---------------\n LIST SHOW ADD EDIT DELETE ---------------\n \"\"\"\n\n @expose('/list/')\n @has_access\n def list(self):\n\n widgets = self._list()\n return self.render_template(self.list_template,title=self.list_title,widgets=widgets)\n\n @expose('/show/',methods=['GET'])\n @has_access\n def show(self,pk):\n widgets = self._show(pk)\n return self.render_template(self.show_template,pk=pk,title=self.show_title,\n widgets=widgets,related_views=self._related_views)\n\n @expose('/add',methods=['GET','POST'])\n @has_access\n def add(self):\n widget = self._add()\n if not widget:\n return self.post_add_redirect()\n else:\n return self.render_template(self.add_template,title=self.add_title,widgets=widget)\n\n @expose('/edit/',methods=['GET','POST'])\n @has_access\n def edit(self,pk):\n widgets = self._edit(pk)\n if not widgets:\n return self.post_edit_redirect()\n else:\n return self.render_template(self.edit_template,title=self.edit_title,\n widgets=widgets,related_views=self._related_views)\n\n @expose('/delete/')\n @has_access\n def delete(self,pk):\n self._delete(pk)\n return self.post_delete_redirect()\n\n @expose('/download/')\n @has_access\n def download(self,filename):\n return send_file(self.appbuilder.app.config['UPLOAD_FOLDER'] + filename,\n attachment_filename=uuid_originalname(filename),as_attachment=True)\n\n @expose('/action//',methods=['GET'])\n def action(self,name,pk):\n if self.appbuilder.sm.has_access(name,self.__class__.__name__):\n action = self.actions.get(name)\n return action.func(self.datamodel.get(pk))\n else:\n flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED),\"danger\")\n return redirect('.')\n\n @expose('/action_post',methods=['POST'])\n def action_post(self):\n name = request.form['action']\n pks = request.form.getlist('rowid')\n if self.appbuilder.sm.has_access(name,self.__class__.__name__):\n action = self.actions.get(name)\n items = [self.datamodel.get(pk) for pk in pks]\n return action.func(items)\n else:\n flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED),\"danger\")\n return redirect('.')\n\n\nclass MasterDetailView(BaseCRUDView):\n \"\"\"\n Implements behaviour for controlling two CRUD views\n linked by PK and FK, in a master/detail type with\n two lists.\n\n Master view will behave like a left menu::\n class DetailView(ModelView):\n datamodel = SQLAInterface(DetailTable, db.session)\n class MasterView(MasterDetailView):\n datamodel = SQLAInterface(MasterTable, db.session)\n related_views = [DetailView]\n \"\"\"\n list_template = 'appbuilder/general/model/left_master_detail.html'\n list_widget = ListMasterWidget\n master_div_width = 2\n \"\"\" Set to configure bootstrap class for master grid size\"\"\"\n\n @expose('/list/')\n @expose('/list/')\n @has_access\n def list(self,pk=None):\n pages = get_page_args()\n page_sizes = get_page_size_args()\n orders = get_order_args()\n\n widgets = self._list()\n if pk:\n item = self.datamodel.get(pk)\n widgets = self._get_related_views_widgets(item,orders=orders,\n pages=pages,page_sizes=page_sizes,widgets=widgets)\n related_views = self._related_views\n else:\n related_views = []\n\n return self.render_template(self.list_template,\n title=self.list_title,\n widgets=widgets,\n related_views=related_views,\n master_div_width=self.master_div_width)\n\n\n\nclass MultipleView(BaseView):\n\n list_template = 'appbuilder/general/model/multiple_views.html'\n views = None\n _views = None\n\n def __init__(self,**kwargs):\n super(MultipleView,self).__init__(**kwargs)\n self.views = self.views or list()\n self._views = self._views or list()\n\n def get_uninit_inner_views(self):\n return self.views\n\n def get_init_inner_views(self):\n return self._views\n\n @expose('/list/')\n @has_access\n def list(self):\n pages = get_page_args()\n page_sizes = get_page_size_args()\n orders = get_order_args()\n views_widgets = list()\n for view in self._views:\n if orders.get(view.__class__.__name__):\n order_column,order_direction = orders.get(view.__class__.__name__)\n else:\n order_column,order_direction = '',''\n page = pages.get(view.__class__.__name__)\n page_size = page_sizes.get(view.__class__.__name__)\n views_widgets.append(view._get_view_widget(filters=view._base_filters,\n order_column=order_column,\n order_direction=order_direction,\n page=page,page_size=page_size))\n self.update_redirect()\n return self.render_template(self.list_template,\n views=self._views,views_widgets=views_widgets)\n\n\n\nclass CompactCRUDMixin(BaseCRUDView):\n\n @classmethod\n def set_key(cls,k,v):\n k = cls.__name__ + '__' + k\n session[k] = v\n\n @classmethod\n def get_key(cls,k,default=None):\n k = cls.__name__ + '__' + k\n if k in session:\n return session[k]\n else:\n return default\n\n @classmethod\n def del_key(cls,k):\n k = cls.__name__ + '__' + k\n session.pop(k)\n\n def _get_list_widget(self,**args):\n widgets = super(CompactCRUDMixin,self)._get_list_widget(**args)\n session_form_widget = self.get_key('session_form_widget',None)\n\n form_widget = None\n if session_form_widget == 'add':\n form_widget = self._add().get('add')\n elif session_form_widget == 'edit':\n pk = self.get_key('session_form_edit_pk')\n if pk:\n form_widget = self._edit(int(pk)).get('edit')\n return {\n 'list':GroupFormListWidget(\n list_widget=widgets.get('list'),\n form_widget = form_widget,\n form_action=self.get_key('session_form_action',''),\n form_title=self.get_key('session_form_title',''),\n )\n }\n\n @expose('/list/',methods=['GET','POST'])\n @has_access\n def list(self):\n list_widgets = self._list()\n return self.render_template(self.list_template,\n title=self.list_title,widgets=list_widgets)\n\n @expose('/add/',methods=['GET','POST'])\n @has_access\n def add(self):\n widgets = self._add()\n if not widgets:\n self.set_key('session_form_action', '')\n self.set_key('session_form_widget', None)\n return redirect(request.referrer)\n else:\n self.set_key('session_form_widget','add')\n self.set_key('session_form_action',request.full_path)\n self.set_key('session_form_title',self.add_title)\n return redirect(self.get_redirect())\n\n @expose('/edit/',methods=['GET','POST'])\n @has_access\n def edit(self,pk):\n widgets = self._edit(pk)\n self.update_redirect()\n if not widgets:\n self.set_key('session_form_action','')\n self.set_key('session_form_widget',None)\n return redirect(self.get_redirect())\n else:\n self.set_key('session_form_widget','edit')\n self.set_key('session_form_action',request.full_path)\n self.set_key('session_form_title',self.add_title)\n self.set_key('session_form_edit_pk',pk)\n return redirect(self.get_redirect())\n\n @expose('/delete/')\n @has_access\n def delete(self,pk):\n self._delete(pk)\n edit_pk = self.get_key('session_form_edit_pk')\n if pk == edit_pk:\n self.del_key('session_form_edit_pk')\n return redirect(self.get_redirect())\n\n\"\"\"\n This is for retro compatibility\n\"\"\"\nGeneralView = ModelView\n\n\n","sub_path":"flask_appbuilder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"269981979","text":"\"\"\"\nThis code will contain python data structures\n\n\"\"\"\n\"\"\"Singly linked list\nThe basic structure of a singly linked list is that it contains:\ndata\nA reference to the next node. Node contains data and the link to the next done.\n\nWe will create a singly linked list whose node contains data and the link to the next node\n\"\"\"\n\n\n# Define basic element that forms the list\nclass Node:\n # Constructor for the node (has data and link to next node)\n def __init__(self, data=None, next_node=None):\n self.__data = data\n self.__next_node = next_node\n\n \"define methods to get the data, get next node and set the next node\"\n\n # Get data\n def get_data(self):\n return self.__data\n\n # Get the next node\n def get_next(self):\n return self.__next_node\n\n # Set the next node\n def set_next(self, new_next):\n self.__next_node = new_next\n\n\n\"Define the list\"\n\n\nclass SinglyLinkedList:\n # Constructor\n def __init__(self):\n self.__head = Node(\"__head__\")\n\n # Get the first node that contains the specified data\n def get_node(self, data):\n current = self.__head\n\n # Go through the list until it finds a match, or reach the end of the list\n while current:\n if current.get_data() == data:\n return current\n else:\n current = current.get_next()\n return None\n\n # Delete first node that contains the specified data\n def delete(self, data):\n current = self.__head\n previous = None\n\n if current.get_data() != data:\n previous = current\n current = current.get_next()\n # Go through the list until it finds a match, or reach the end of the line\n else:\n previous.set_next(current.get_next())\n # break;\n\n # Append new node to the end of the list\n def append(self, data):\n current = self.__head\n # Go to the last node in the list\n while current.get_next():\n current = current.get_next()\n\n # Append at the end of the list\n current.set_next(Node(data))\n\n # Get the number of nodes in the list\n def size(self):\n current = self.__head\n count = 0\n while current:\n count += 1\n current = current.get_next()\n return count - 1\n\n # Print List\n def print_list(self):\n current = self.__head.get_next()\n while current:\n print(current.get_data())\n current = current.get_next()\n\n\n\"Test our list\"\n# Create list object\nl = SinglyLinkedList()\n# Append cat to the list and print\nl.append('cat')\n# More appends\nl.append('dog')\nl.append('fish')\nl.append('bird')\n\nprint(l.print_list())\n\n# Test Get node\nnode = l.get_node('fish')\nprint(node.get_data())\n\n# delete fish\nl.delete('fish')\nprint(l.print_list())\n\n# size\nprint(l.size())\n","sub_path":"data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"194970957","text":"from functions.functions import *\nimport itertools\n\n# read ciphertext from file\nf=open('text/12.txt', 'r')\nline=''.join(f.readlines()).replace('\\n', '')\nf.close()\n\n# get five most often bigrams\nx=sort_dict(count_bigrams(line, False))\nfirst_elements = [i[0] for i in list(x.items())[:5]]\n\n# set flag to 0 (change it if plaintext found)\nflag = 0\n\n# go thought every possible combination of\n# (2 bigrams from most often in language, 2 bigrams from most often in ciphertext)\nfor i in itertools.permutations(five_most_often, 2):\n for j in itertools.combinations(first_elements, 2):\n print('-> X: '+str(i)+'; Y: '+str(j)+';')\n\n # get coefficients for decryption, check if not None\n a, b=get_coefs(i, j)\n if len(a)<1:\n print(' error: coef a does not exist')\n\n # go thought every pair (a, b) for decryption\n for (ael, bel) in zip(a, b):\n print(' a: '+str(ael)+'; b: '+str(bel) )\n plaintext = decipher_afin_bigrams(ael, bel, line)\n\n # check if plaintext follows the criteria\n # and if inverse of a exists (if it does, len(plaintext) will be >0)\n if len(plaintext)>0 and is_plaintext(plaintext):\n print(' plaintext: '+plaintext[:100]+'...')\n\n # write plaintext to file and change flag to 1\n if flag==0:\n fileout = open('results/decrypted_12.txt', 'w')\n fileout.write(plaintext)\n fileout.close()\n flag+=1\n\n #exit() # uncomment to end program on first plaintext found\n print()\n","sub_path":"cp_3/kostetska_fb-83_cp3/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"390257667","text":"#!/usr/bin/env python3\n\nimport datetime\nimport logging\nimport signal\nimport sqlite3\nimport libopenzwave\n\n\nDATABASE = '/home/pi/lunares_hab/sensors-data.sqlite3'\nALLOWED_MEASUREMENTS = ['Battery Level', 'Powerlevel', 'Temperature', 'Luminance', 'Relative Humidity', 'Ultraviolet'] # 'Burglar'\ndevice = '/dev/ttyACM0'\nlog = 'Info'\nsniff = 60.0\n\n\noptions = libopenzwave.PyOptions(\n config_path='/usr/local/etc/openzwave/',\n user_path='/home/pi/lunares_hab/',\n cmd_line='--logging false')\n\noptions.lock()\nmanager = libopenzwave.PyManager()\nmanager.create()\n\n\nwith sqlite3.connect(DATABASE) as db:\n db.execute(\"\"\"CREATE TABLE IF NOT EXISTS sensor_data (\n datetime DATETIME PRIMARY KEY,\n sync_datetime DATETIME DEFAULT NULL,\n device VARCHAR(255),\n type VARCHAR(255),\n value VARCHAR(255),\n unit VARCHAR(255));\"\"\")\n db.execute('CREATE UNIQUE INDEX IF NOT EXISTS sensor_data_datetime_index ON sensor_data (datetime);')\n db.execute('CREATE INDEX IF NOT EXISTS sensor_data_sync_datetime_index ON sensor_data (sync_datetime);')\n\n\ndef save_to_sqlite3(args):\n values = args.get('valueId')\n\n if not values or values.get('label') not in ALLOWED_MEASUREMENTS:\n return None\n\n with sqlite3.connect(DATABASE) as db:\n db.execute('INSERT INTO sensor_data VALUES (:datetime, NULL, :device, :type, :value, :unit)', {\n 'datetime': datetime.datetime.now(datetime.timezone.utc),\n 'type': values.get('label'),\n 'value': values.get('value'),\n 'unit': values.get('units'),\n 'device': '{base:08x}-{node}'.format(\n base=values.get('homeId'),\n node=values.get('nodeId'))})\n\n\nif __name__ == '__main__':\n logging.info('Add watcher')\n manager.addWatcher(save_to_sqlite3)\n\n logging.info('Add device')\n manager.addDriver(device)\n\n try:\n signal.pause()\n\n finally:\n logging.info('Remove watcher')\n manager.removeWatcher(save_to_sqlite3)\n\n logging.info('Remove device')\n manager.removeDriver(device)\n","sub_path":"bin/sensor-zwave-collector.py","file_name":"sensor-zwave-collector.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176413393","text":"'''important'''\n'''The Little Elephant loves playing with arrays. He has array a, consisting of n positive integers, indexed from 1 to n. Let's denote the number with index i as ai.\n\nAdditionally the Little Elephant has m queries to the array, each query is characterised by a pair of integers lj and rj (1 ≤ lj ≤ rj ≤ n). For each query lj, rj the Little Elephant has to count, how many numbers x exist, such that number x occurs exactly x times among numbers alj, alj + 1, ..., arj.\n\nHelp the Little Elephant to count the answers to all queries.\n\nInput\nThe first line contains two space-separated integers n and m (1 ≤ n, m ≤ 105) — the size of array a and the number of queries to it. The next line contains n space-separated positive integers a1, a2, ..., an (1 ≤ ai ≤ 109). Next m lines contain descriptions of queries, one per line. The j-th of these lines contains the description of the j-th query as two space-separated integers lj and rj (1 ≤ lj ≤ rj ≤ n).\n\nOutput\nIn m lines print m integers — the answers to the queries. The j-th line should contain the answer to the j-th query.\n\nExamples\ninputCopy\n7 2\n3 1 2 2 3 3 7\n1 7\n3 4\noutputCopy\n3\n1\n'''\n\ndef helpElephant(arr,queries,q): # 3 1 2 2 3 3 7\n Ans = [0]*q\n freq =[0]*10000\n queries.sort(key = lambda x:x[1])\n MXN = 10**5 + 5\n\n currL =0\n currR = -1\n count =0\n idx_lis = [0] * q\n\n for i in range(q):\n\n L,R = queries[i]\n idx = origin_index.index([L, R])\n idx_lis[idx]+=1\n if idx_lis[idx]>1:\n idx+=1\n print(idx)\n\n\n\n while(currR=MXN:\n return 0\n if freq[arr[currR]] == arr[currR]:\n count-=1\n freq[arr[currR]]+=1\n if freq[arr[currR]] == arr[currR]:\n count+=1\n\n\n while (currL > L):\n currL -= 1\n if arr[currL]>=MXN:\n return 0\n if freq[arr[currL]] == arr[currL]:\n count -= 1\n freq[arr[currL]] += 1\n if freq[arr[currL]] == arr[currL]:\n count += 1\n\n\n while (currL < L):\n if freq[arr[currL]] == arr[currL]:\n count -= 1\n freq[arr[currL]] -= 1\n if freq[arr[currL]] == arr[currL]:\n count += 1\n currL += 1\n while (currR > R):\n if freq[arr[currR]] == arr[currR]:\n count -= 1\n freq[arr[currR]] -= 1\n if freq[arr[currR]] == arr[currR]:\n count += 1\n currR -= 1\n Ans[idx] = count\n\n\n\n\n for i in range(len(Ans)) :\n print(Ans[i])\n\ninp = list(map(int,input(\"n q\").split()))\nn= inp[0]\nq = inp[1]\narr = list(map(int,input(\"array\").split()))\nqueries = []\norigin_index = [0]*q\nans = []\nfor i in range(q):\n queries.append(list(map(int,input(\" query range\").split())))\n queries[i][0]-=1\n queries[i][1]-=1\n origin_index[i]=queries[i]\n\nhelpElephant(arr,queries,q)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"BasicThingsUshould Know/PlmsonMosAlgo.py","file_name":"PlmsonMosAlgo.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"630796445","text":"class Solution:\n def fourSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n\n # the solution set must not contain duplicate quadruplets.\n\n if len(nums) ==0 or not nums:\n return []\n return_list =[]\n nums.sort()\n print(nums)\n last = len(nums) - 1\n\n for i in range(len(nums) - 3):\n\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n\n for j in range(i+1 ,len(nums) - 2):\n\n #if j > 0 and nums[j] == nums[j - 1]:\n # continue\n\n k = j+ 1\n l = last\n\n while k target:\n l -= 1\n if sum < target:\n k += 1\n\n return return_list\n\n\nsolution = Solution\nprint(solution.fourSum(solution,nums = [1, 0, -1, 0, -2, 2], target = 0))\nprint(solution.fourSum(solution,nums = [0,0,0,0], target = 0))\nprint(solution.fourSum(solution,nums = [-3,-2,-1,0,0,1,2,3], target = 0))","sub_path":"M_18_Four_Sum.py","file_name":"M_18_Four_Sum.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167798335","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets, QtCore\n\n\nclass subSelectWindow(QtWidgets.QWidget):\n\n semsub = QtCore.pyqtSignal(str, str)\n\n def __init__(self):\n super(subSelectWindow, self).__init__()\n self.resize(800,600)\n self.selectSem = QtWidgets.QComboBox(self)\n self.selectSem.setGeometry(QtCore.QRect(175, 100, 450, 80))\n self.selectSem.setObjectName(\"selectSem\")\n font = self.selectSem.font()\n font.setPointSize(15)\n self.selectSem.setFont(font)\n self.selectSubject = QtWidgets.QComboBox(self)\n self.selectSubject.setGeometry(QtCore.QRect(175, 220, 450, 80))\n self.selectSubject.setObjectName(\"selectSubject\")\n font = self.selectSubject.font()\n font.setPointSize(12)\n self.selectSubject.setFont(font)\n self.selectBtn = QtWidgets.QPushButton(self)\n self.selectBtn.setGeometry(QtCore.QRect(225, 350, 350, 90))\n self.selectBtn.setObjectName(\"selectBtn\")\n self.selectBtn.setText(\"Confirm and close\")\n self.selectBtn.setStyleSheet(\"#selectBtn{\\n\"\n \"display: inline-block;\\n\"\n \" padding: 15px 25px;\\n\"\n \" font-size: 24px;\\n\"\n \" cursor: pointer;\\n\"\n \" text-align: center;\\n\"\n \" text-decoration: none;\\n\"\n \" outline: none;\\n\"\n \" color: #fff;\\n\"\n \" background-color: #4da6ff;\\n\"\n \" border: none;\\n\"\n \" border-radius: 45px;\\n\"\n \" box-shadow: 0 9px #999;\\n\"\n \"}\\n\"\n )\n self.subjectLabel = QtWidgets.QLabel(self)\n self.subjectLabel.setGeometry(QtCore.QRect(225, 450, 350, 90))\n self.subjectLabel.setObjectName(\"subjectLabel\")\n font = self.subjectLabel.font()\n font.setPointSize(12)\n self.subjectLabel.setFont(font)\n self.selectSem.addItems([\" SEM I\", \" SEM II\", \" SEM III\", \" SEM IV\", \" SEM V\", \" SEM VI\", \" SEM VII\", \" SEM VIII\"])\n self.selectSem.activated[str].connect(self.onSemSelected)\n self.selectSubject.activated[str].connect(self.onSubjectSelected)\n self.selectBtn.clicked.connect(self.send_clicked)\n self.setWindowTitle(\"Select Subject\")\n\n def send_clicked(self):\n\n self.semsub.emit(self.selectSem.currentText(), self.selectSubject.currentText())\n self.close()\n\n\n def onSubjectSelected(self, text):\n # self.got_password.emit(text)\n self.subjectLabel.setText(text)\n\n def onSemSelected(self, text):\n\n if(text == ' SEM I'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Applied Mathematics 1\", \" Applied Chemistry 1\", \" Applied Physics 1\", \" Basic Electrical and Electronic Engineering\", \" Engineering Mechanics\", \" Environmental Studies\"])\n elif(text == ' SEM II'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Applied Mathematics 2\", \" Applied Chemistry 2\", \" Applied Physics 2\", \" Engineering Drawing\", \" Structured Programming Approach\", \" Communication Skills\"])\n elif(text == ' SEM III'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Data Structure and Analysis\", \" Logic Design\", \" Principle of Communications\", \" Database Managemnet System\",\" Applied Mathematics 3\"])\n elif(text == ' SEM IV'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Automata Theory\", \" Operating Systems\", \" Computer Networks\", \" Computer Organizations and Architecture\",\" Applied Mathematics 4\"])\n elif(text == ' SEM V'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Microcontroller and Embedded Programming\", \" Cryptography and Network Security\", \" Internet Programming\", \" E-commerce and E-business\", \" Advanced Data Management Technology\"])\n elif(text == ' SEM VI'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Software Engineering with\\n Project Management\", \" Data Mining and Business Intelligence\", \" Cloud Computing and Services\", \" Digital Forensics\", \" Wireless Networks\"])\n elif(text == ' SEM VII'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Enterprise Network Design\", \" Infrastructure Security\", \" Artificial Intelligence\", \" Software Testing and\\n Quality Assurance\", \" Management Information System\"])\n elif(text == ' SEM VIII'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Enterprise Resource Managenment\", \" Big Data Analytics\", \" Project Management\", \" Internet Of Everything\"])\n","sub_path":"Add_questions/subSelect.py","file_name":"subSelect.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"623931231","text":"#!/bin/env python\n\nimport argparse\nimport math\nimport os\n\nimport cv2\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\n\nfrom class_mapper import map_to\nfrom model import load_model\n\n# IMPORTANT: Keep the slash at the end!\nDATA_ROOT_PATH = '/tmp/data/tl_classification/'\n\ndef load_image(base_path, image_path):\n \"\"\"Reads an image file and returns a bgr8 array and its label\n\n Arguments:\n base_path -- The directory where the captures are saved\n image_path -- The name of the file to load\n \"\"\"\n full_path = os.path.join(base_path, image_path)\n label = map_to(int(image_path.split('/')[0]))\n image = cv2.imread(full_path) # Assumes the file is a bgr8 encoded jpg\n #image = cv2.cvtColor(cv2.imread(base_path + '/' + image_path), cv2.COLOR_BGR2RGB)\n return image, label\n\ndef train(weights_file):\n captures = [] #os.listdir(DATA_ROOT_PATH)\n\n for dirpath, subdirs, files in os.walk(DATA_ROOT_PATH):\n if len(files) > 0:\n for f in files:\n captures.append(os.path.join(dirpath, f).replace(DATA_ROOT_PATH, ''))\n\n assert len(captures) > 0, \"No files found!\"\n\n from sklearn.cross_validation import train_test_split\n train_samples, validation_samples = train_test_split(captures, test_size=0.2)\n\n import sklearn\n\n def batch_len(array):\n \"\"\"Shortcut function to calculating the length of a batch from the\n generator based on the augmentations performed\"\"\"\n return len(array) * 2 # Mirror\n\n def generator(samples, batch_size=32):\n \"\"\"Generator function to return a number of example instances for training\n\n Arguments:\n samples -- The full array of samples (X data and y result)\n batch_size -- The number of samples (before augmentation) that will be\n returned by the generator. The samples are shuffled before\n each new batch is generated.\n \"\"\"\n def augment_and_append(image, label):\n images.append(image)\n labels.append(label)\n images.append(np.fliplr(image))\n labels.append(label)\n\n num_samples = len(samples)\n while 1:\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n labels = []\n for batch_sample in batch_samples:\n image, label = load_image(DATA_ROOT_PATH, batch_sample)\n augment_and_append(image, label)\n\n #print(labels)\n X_data = np.array(images)\n y_data = np.array(labels)\n yield sklearn.utils.shuffle(X_data, y_data)\n\n train_generator = generator(train_samples, batch_size=10)\n validation_generator = generator(validation_samples, batch_size=10)\n\n #inputs = Input(shape=(600, 800, 3))\n #resized = Lambda(lambda image: ktf.image.resize_images(image, (224, 224)))(inputs)\n #model = MobileNet(alpha=2, depth_multiplier=1, include_top=True, weights=None, classes=4, input_tensor=resized)\n\n #model.compile(loss='mse', optimizer='adam')\n\n #if not base_model is None:\n # model.load_weights(base_model)\n\n model = load_model(weights_file)\n checkpoint = ModelCheckpoint('M_{val_loss:.4f}.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=True)\n callbacks_list = [checkpoint]\n\n history = model.fit_generator(train_generator, steps_per_epoch=2000, epochs=30, callbacks=callbacks_list, validation_data=validation_generator, validation_steps=30, use_multiprocessing=True)\n\n model.save('M.h5')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Traffic Light Classification Training\")\n parser.add_argument(\n 'model_weights',\n type=str,\n help=\"Path to a weights file that will be fine-tuned\"\n )\n args = parser.parse_args()\n train(args.model_weights)\n","sub_path":"ros/src/tl_detector/light_classification/mobilenet_model.py","file_name":"mobilenet_model.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"251770382","text":"import pandas as pd\nimport requests\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport datetime\nimport numpy as np\nimport random\nimport sys\n\n\nstart_date = input('Enter start date (yyyymmdd): ')\nstart_day = start_date[6:8]\nstart_month = start_date[4:6]\nstart_year = start_date[:4]\nstart_minute = '00'\n\nend_date = input('Enter a end date (yyyymmdd): ')\nif end_date == '':\n\tend_date = start_date\n\nend_day = end_date[6:8]\nend_month = end_date[4:6]\nend_year = end_date[:4]\nend_minute = '00'\n\nif len(start_date) != 8 or len(end_date) != 8:\n\tprint('\\nDATE ERROR: Dates must have 8 digits.')\n\tsys.exit(0)\n\nstart_hour = input('Enter a start hour or \"full\": ').zfill(2)\nif start_hour.isdigit() == True:\n\tend_hour = input('Enter a end hour: ').zfill(2)\n\tif start_date == end_date:\n\t\tif (int(end_hour) - int(start_hour)) < 0:\n\t\t\tprint('\\nTIME ERROR: Difference between two hours must be greater than zero.')\n\t\t\tsys.exit(0)\n\telif int(end_hour) > 24 or int(start_hour) > 23:\n\t\tprint('\\nTIME ERROR: Hours must be between 0 and 23.')\n\t\tsys.exit(0)\n\nif start_hour.isdigit() == False:\n\tif start_hour == 'full':\n\t\tstart_hour = '00'.zfill(2)\n\t\tend_hour = '23'.zfill(2)\n\telse:\n\t\tprint('\\nTIME ERROR: Not a valid alternative hour.')\n\t\tsys.exit(0)\n\n\n#=========NM Stations\n\nlist_nm = ['AATB','APTY','ARNM','ATHN','BKSN','CALG','CALM','DOMB',\n\t\t\t'DOMC','DRBS','ESOI','FSMT','HRMS','INVK','IRK2','IRK3',\n\t\t\t'IRKT','JBGO','JUNG','JUNG1','KERG','KIEL','KIEL2','LMKS',\n\t\t\t'MCRL','MGDN','MOSC','MRNY','MWSN','MXCO','NAIN','NANM','NEU3',\n\t\t\t'NEWK','NRLK','NVBK','OULU','PSNM','PTFM','PWNK','ROME','SANB','SNAE'\n\t\t\t,'SOPB','SOPO','TERA','THUL','TIBT','TXBY','YKTK']\n\nnum_station = int(input('How many stations to parse: '))\nprint(f'You are parsing {num_station} station(s)')\n\nstation_multi = []\nfor i in range(num_station):\n\tstation = input('Enter station names: ').upper()\n\tif station == '':\n\t\tstation = 'OULU'\n\t\tstation_multi.append(station)\n\telif station == 'RANDOM':\n\t\tstation = random.choice(list_nm)\n\t\tstation_multi.append(station)\n\telse:\n\t\tstation_multi.append(station)\n\nprint(f'Parsing the {station_multi} stations')\n\n\nevent_obj_start = datetime.datetime.strptime(f'{start_date} {start_hour}', '%Y%m%d %H')\nevent_obj_start_str = datetime.datetime.strftime(event_obj_start, '%Y%m%d %H:%M:%S')\nevent_obj_start_str_date = datetime.datetime.strftime(event_obj_start, '%Y%m%d %H')\n\nevent_obj_end = datetime.datetime.strptime(f'{end_date} {end_hour}', '%Y%m%d %H')\nevent_obj_end_str = datetime.datetime.strftime(event_obj_end, '%Y%m%d %H:%M:%S')\nevent_obj_end_str_date = datetime.datetime.strftime(event_obj_end, '%Y%m%d %H')\n\n\n\n#=======sorting column header test\n\n#sorter_list = ['PSNM', 'TIBT', 'ESOI', 'ATHN', 'MXCO', 'ARNM', 'NANM', 'PTFM', 'CALM', 'AATB', 'ROME', 'BKSN', 'HRMS', 'JUNG', 'JUNG1', 'LMKS', 'IRK2', 'IRK3', 'IRKT', 'DRBS', 'NVBK', 'MCRL', 'MOSC', 'NEWK', 'KIEL', 'KIEL2', 'MGDN', 'YKTK', 'KERG', 'CALG', 'OULU', 'SANB', 'SNAE', 'APTY', 'NRLK', 'TXBY', 'FSMT', 'INVK', 'JBGO', 'NAIN', 'PWNK', 'THUL', 'MWSN', 'NEU3', 'SOPB', 'SOPO', 'MRNY', 'DOMB', 'DOMC', 'TERA']\nsorter = {'PSNM':0, 'TIBT':1, 'ESOI':2, 'ATHN':3, 'MXCO':4, 'ARNM':5, 'NANM':6, 'PTFM':7, 'CALM':8, 'AATB':9, 'ROME':10, 'BKSN':11, 'HRMS':12, 'JUNG':13, 'JUNG1':14, 'LMKS':15, 'IRK2':16, 'IRK3':17, 'IRKT':18, 'DRBS':19, 'NVBK':20, 'MCRL':21, 'MOSC':22, 'NEWK':23, 'KIEL':24, 'KIEL2':25, 'MGDN':26, 'YKTK':27, 'KERG':28, 'CALG':29, 'OULU':30, 'SANB':31, 'SNAE':32, 'APTY':33, 'NRLK':34, 'TXBY':35, 'FSMT':36, 'INVK':37, 'JBGO':38, 'NAIN':39, 'PWNK':40, 'THUL':41, 'MWSN':42, 'NEU3':43, 'SOPB':44, 'SOPO':45, 'MRNY':46, 'DOMB':47, 'DOMC':48, 'TERA':49}\n#sorted_sorter = sorted(sorter.items(), key=operator.itemgetter(1))\nsorted_lambda = sorted(sorter.items(), key=lambda x: x[1])\n\n\nsorted_nm_list = []\nfor i in [i[0] for i in sorted_lambda]:\n\tif i in station_multi:\n\t\tsorted_nm_list.append(i)\n\n#========creating station string for url\n\nstation_str = ''\nfor i in sorted_nm_list:\n\tstation_str += f'&stations[]={i}'\n\n#=========Fetch online neutron monitor data\n\nurl = f'http://www.nmdb.eu/nest/draw_graph.php?formchk=1{station_str}&tabchoice=revori&dtype=corr_for_efficiency&tresolution=0&yunits=0&date_choice=bydate&start_day={start_day}&start_month={start_month}&start_year={start_year}&start_hour={start_hour}&start_min={start_minute}&end_day={end_day}&end_month={end_month}&end_year={end_year}&end_hour={end_hour}&end_min={end_minute}&output=ascii'\n\nnm_data = pd.DataFrame([])\n\nname_list = ['datetime'] + [ str(i) for i in sorted_nm_list]\n\ndateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')\nnm_data = pd.read_csv(url,sep=';|\\n|\\b', skiprows=133, skipfooter=3, engine='python', index_col='datetime', date_parser=dateparse, names=name_list, na_values=[' null']) #, , parse_dates=['datetime'], date_parser=dateparse\n#delim_whitespace=True\n\nnm_counter = []\nfor item in sorted_nm_list:\n\tif nm_data[f'{item}'].isnull().values.any() == True:\n\t\tnm_counter.append(1)\n\telse:\n\t\tnm_counter.append(0)\n\n'''\nfor i in nm_counter:\n\tif i == 1:\n\t\tprint('Please select station with data for this time frame.')\n\t\tsys.exit(0)\n'''\n\n#====Plotting\nmyFmt = mdates.DateFormatter('%m/%d\\n%H:%M') #this is line that breaks code (ValueError: year 60740 is out of range)\n\ncolor_count = []\nfor i in sorted_nm_list:\n\n\tcolor_list = ['red','orange','green','blue','indigo','violet','purple'] #,'yellow'\n\tcolor_list = list(set(color_list) - set(color_count))\n\n\trand_color = random.choice(color_list)\n\tcolor_count.append(rand_color)\n\n\t#nm_data[f'{i}'].loc[f'{event_obj_start_str_date}':f'{event_obj_end_str_date}'].plot(color=rand_color, label= f'{i}')\n\tplt.plot(nm_data.index, nm_data[f'{i}'], color=rand_color, label=f'{i}')\n\n#nm_data['RCORR_E'].loc[f'{event_obj_start_str_date}':f'{event_obj_end_str_date}'].plot(color='limegreen', label= 'Corrected for Efficiency')\n\nplt.title(f'Neutron Monitor Data Corrected for Efficiency\\n[{event_obj_start_str} -- {event_obj_end_str}]', fontname=\"Arial\", fontsize = 14)\nplt.xlabel('Time', fontname=\"Arial\", fontsize = 14)\nplt.ylabel('Counts/s', fontname=\"Arial\", fontsize = 14)\nplt.minorticks_on()\nplt.grid(True)\n#plt.yscale('log')\nplt.legend(loc='upper right')\nplt.tight_layout()\n#ax = fig.add_subplot(111)\nax = plt.gca()\n\nax.xaxis.set_major_formatter(myFmt) #this is line that breaks code (ValueError: year 60740 is out of range)\n#ax.xaxis.set_major_formatter(dates.DateFormatter('%H'))\n#plt.axes().xaxis.set_major_formatter(myFmt)\n\nplt.setp(ax.xaxis.get_majorticklabels(), rotation=0, horizontalalignment='center')\n\n\n#plt.savefig('nm_data.png', format='png', dpi=900)\nplt.show()\n","sub_path":"Scripts/deprecated_scripts/pandas_test_nm.py","file_name":"pandas_test_nm.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"174889086","text":"\nimport os\nimport sys\nimport time\n\nimport threading as mt\nimport multiprocessing as mp\n\nimport radical.utils as ru\n\nfrom .. import Session\nfrom .. import utils as rpu\nfrom .. import constants as rpc\n\n\n# ------------------------------------------------------------------------------\n#\nclass Worker(rpu.Component):\n\n # --------------------------------------------------------------------------\n #\n def __init__(self, cfg):\n\n if isinstance(cfg, str): cfg = ru.Config(cfg=ru.read_json(cfg))\n else : cfg = ru.Config(cfg=cfg)\n\n self._n_cores = cfg.cores\n self._n_gpus = cfg.gpus\n\n self._info = ru.Config(cfg=cfg.get('info', {}))\n self._session = Session(cfg=cfg, uid=cfg.sid, _primary=False)\n\n rpu.Component.__init__(self, cfg, self._session)\n\n self._term = mp.Event() # set to terminate\n self._res_evt = mp.Event() # set on free resources\n\n self._mlock = ru.Lock(self._uid) # lock `_modes` and `_mdata`\n self._modes = dict() # call modes (call, exec, eval, ...)\n self._mdata = dict() # call mode meta data\n\n # We need to make sure to run only up to `gpn` tasks using a gpu\n # within that pool, so need a separate counter for that.\n self._resources = {'cores' : [0] * self._n_cores,\n 'gpus' : [0] * self._n_gpus}\n\n # resources are initially all free\n self._res_evt.set()\n\n # # create a multiprocessing pool with `cpn` worker processors. Set\n # # `maxtasksperchild` to `1` so that we get a fresh process for each\n # # task. That will also allow us to run command lines via `exec`,\n # # effectively replacing the worker process in the pool for a specific\n # # task.\n # #\n # # We use a `fork` context to inherit log and profile handles.\n # #\n # # NOTE: The mp documentation is wrong; mp.Pool does *not* have a context\n # # parameters. Instead, the Pool has to be created within\n # # a context.\n # ctx = mp.get_context('fork')\n # self._pool = ctx.Pool(processes=self._n_cores,\n # initializer=None,\n # maxtasksperchild=1)\n # NOTE: a multiprocessing pool won't work, as pickle is not able to\n # serialize our worker object. So we use our own process pool.\n # It's not much of a loss since we want to respawn new processes for\n # each task anyway (to improve isolation).\n self._pool = dict() # map task uid to process instance\n self._plock = ru.Lock('p' + self._uid) # lock _pool\n\n # We also create a queue for communicating results back, and a thread to\n # watch that queue\n self._result_queue = mp.Queue()\n self._result_thead = mt.Thread(target=self._result_watcher)\n self._result_thead.daemon = True\n self._result_thead.start()\n\n # connect to master\n self.register_subscriber(rpc.CONTROL_PUBSUB, self._control_cb)\n self.register_publisher(rpc.CONTROL_PUBSUB)\n\n # run worker initialization *before* starting to work on requests.\n # the worker provides three builtin methods:\n # eval: evaluate a piece of python code\n # exec: execute a command line (fork/exec)\n # shell: execute a shell command\n # call: execute a method or function call\n self.register_mode('call', self._call)\n self.register_mode('eval', self._eval)\n self.register_mode('exec', self._exec)\n self.register_mode('shell', self._shell)\n\n self.pre_exec()\n\n # connect to the request / response ZMQ queues\n self._res_put = ru.zmq.Putter('to_res', self._info.res_addr_put)\n self._req_get = ru.zmq.Getter('to_req', self._info.req_addr_get,\n cb=self._request_cb)\n\n # the worker can return custom information which will be made available\n # to the master. This can be used to communicate, for example, worker\n # specific communication endpoints.\n\n # `info` is a placeholder for any additional meta data communicated to\n # the worker\n self.publish(rpc.CONTROL_PUBSUB, {'cmd': 'worker_register',\n 'arg': {'uid' : self._uid,\n 'info': self._info}})\n\n\n # --------------------------------------------------------------------------\n #\n def pre_exec(self):\n '''\n This method can be overloaded by the Worker implementation to run any\n pre_exec commands before spawning worker processes.\n '''\n\n pass\n\n\n # --------------------------------------------------------------------------\n #\n def register_mode(self, name, executor):\n\n assert(name not in self._modes)\n\n self._modes[name] = executor\n self._mdata[name] = dict()\n\n\n # --------------------------------------------------------------------------\n #\n def register_call(self, name, method):\n\n # ensure the call mode is usable\n mode = 'call'\n\n assert(mode in self._modes)\n assert(name not in self._mdata[mode])\n\n self._mdata[mode][name] = method\n\n\n # --------------------------------------------------------------------------\n #\n def _call(self, data):\n '''\n We expect data to have a three entries: 'method' or 'function',\n containing the name of the member method or the name of a free function\n to call, `args`, an optional list of unnamed parameters, and `kwargs`,\n and optional dictionary of named parameters.\n '''\n\n if 'method' in data:\n to_call = getattr(self, data['method'], None)\n\n elif 'function' in data:\n names = dict(list(globals().items()) + list(locals().items()))\n to_call = names.get(data['function'])\n\n else:\n raise ValueError('no method or function specified: %s' % data)\n\n if not to_call:\n raise ValueError('callable not found: %s' % data)\n\n\n args = data.get('args', [])\n kwargs = data.get('kwargs', {})\n\n try:\n out = to_call(*args, **kwargs)\n err = None\n ret = 0\n\n except Exception as e:\n self._log.exception('_call failed: %s' % (data))\n out = None\n err = 'call failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _eval(self, data):\n '''\n We expect data to have a single entry: 'code', containing the Python\n code to be eval'ed\n '''\n\n try:\n out = eval(data['code'])\n err = None\n ret = 0\n\n except Exception as e:\n self._log.exception('_eval failed: %s' % (data))\n out = None\n err = 'eval failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _exec(self, data):\n '''\n We expect data to have two entries: 'exe', containing the executabele to\n run, and `args` containing a list of arguments (strings) to pass as\n command line arguments. We use `sp.Popen` to run the fork/exec, and to\n collect stdout, stderr and return code\n '''\n\n try:\n import subprocess as sp\n\n exe = data['exe'],\n args = data.get('args', []),\n env = data.get('env', {}),\n\n proc = sp.Popen(executable=exe, args=args, env=env,\n stdin=None, stdout=sp.PIPE, stderr=sp.PIPE,\n close_fds=True, shell=False)\n out, err = proc.communicate()\n ret = proc.returncode\n\n except Exception as e:\n self._log.exception('_exec failed: %s' % (data))\n out = None\n err = 'exec failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _shell(self, data):\n '''\n We expect data to have a single entry: 'cmd', containing the command\n line to be called as string.\n '''\n\n try:\n out, err, ret = ru.sh_callout(data['cmd'])\n\n except Exception as e:\n self._log.exception('_shell failed: %s' % (data))\n out = None\n err = 'shell failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _alloc_task(self, task):\n '''\n allocate task resources\n '''\n\n with self._mlock:\n\n cores = task.get('cores', 1)\n gpus = task.get('gpus' , 0)\n\n assert(cores >= 1)\n assert(cores <= self._n_cores)\n assert(gpus <= self._n_gpus)\n\n if cores > self._resources['cores'].count(0): return False\n if gpus > self._resources['gpus' ].count(0): return False\n\n alloc_cores = list()\n alloc_gpus = list()\n\n if cores:\n for n in range(self._n_cores):\n if not self._resources['cores'][n]:\n self._resources['cores'][n] = 1\n alloc_cores.append(n)\n if len(alloc_cores) == cores:\n break\n\n if gpus:\n for n in range(self._n_gpus):\n if not self._resources['gpus'][n]:\n self._resources['gpus'][n] = 1\n alloc_gpus.append(n)\n if len(alloc_gpus) == gpus:\n break\n\n task['resources'] = {'cores': alloc_cores,\n 'gpus' : alloc_gpus}\n return True\n\n\n # --------------------------------------------------------------------------\n #\n def _dealloc_task(self, task):\n '''\n deallocate task resources\n '''\n\n with self._mlock:\n\n resources = task['resources']\n\n for n in resources['cores']:\n assert(self._resources['cores'][n])\n self._resources['cores'][n] = 0\n\n for n in resources['gpus']:\n assert(self._resources['gpus'][n])\n self._resources['gpus'][n] = 0\n\n # signal available resources\n self._res_evt.set()\n\n return True\n\n\n # --------------------------------------------------------------------------\n #\n def _request_cb(self, tasks):\n '''\n grep call type from tasks, check if methods are registered, and\n invoke them.\n '''\n\n task = ru.as_list(task)\n\n for task in tasks:\n\n self._prof.prof('reg_start', uid=self._uid, msg=task['uid'])\n task['worker'] = self._uid\n\n try:\n # ok, we have work to do. Check the requirements to see how\n # many cpus and gpus we need to mark as busy\n while not self._alloc_task(task):\n # no resource - wait for new resources\n #\n # NOTE: this will block smaller tasks from being executed\n # right now. alloc_task is not a proper scheduler,\n # after all.\n while not self._res_evt.wait(timeout=1.0):\n\n # break on termination\n if self._term.is_set():\n return False\n\n self._res_evt.clear()\n\n # we got an allocation for this task, and can run it, so apply\n # to the process pool. The callback (`self._result_cb`) will\n # pick the task up on completion and free resources.\n #\n # NOTE: we don't use mp.Pool - see __init__ for details\n\n # ret = self._pool.apply_async(func=self._dispatch, args=[task],\n # callback=self._result_cb,\n # error_callback=self._error_cb)\n proc = mp.Process(target=self._dispatch, args=[task],\n daemon=True)\n\n with self._plock:\n\n # we need to include `proc.start()` in the lock, as\n # otherwise we may end up getting the `self._result_cb`\n # before the pid could be registered in `self._pool`.\n proc.start()\n self._pool[proc.pid] = proc\n self._log.debug('applied: %s: %s: %s',\n task['uid'], proc.pid, self._pool.keys())\n\n except Exception as e:\n\n self._log.exception('request failed')\n\n # free resources again for failed task\n self._dealloc_task(task)\n\n res = {'req': task['uid'],\n 'out': None,\n 'err': 'req_cb error: %s' % e,\n 'ret': 1}\n\n self._res_put.put(res)\n\n\n # --------------------------------------------------------------------------\n #\n def _dispatch(self, task):\n\n # this method is running in a process of the process pool, and will now\n # apply the task to the respective execution mode.\n #\n # NOTE: application of pre_exec directives may got here\n\n task['pid'] = os.getpid()\n\n # ----------------------------------------------------------------------\n def _dispatch_thread(tlock):\n out, err, ret = self._modes[mode](task.get('data'))\n with tlock:\n res = [task, str(out), str(err), int(ret)]\n self._log.debug('put 1 result: task %s', task['uid'])\n self._result_queue.put(res)\n # ----------------------------------------------------------------------\n\n\n try:\n # self._log.debug('dispatch: %s: %d', task['uid'], task['pid'])\n mode = task['mode']\n assert(mode in self._modes), 'no such call mode %s' % mode\n\n tout = self._cfg.workload.timeout\n self._log.debug('dispatch with tout %s', tout)\n\n tlock = mt.Lock()\n thread = mt.Thread(target=_dispatch_thread,\n args=[tlock])\n thread.daemon = True\n thread.start()\n thread.join(timeout=tout)\n\n with tlock:\n if thread.is_alive():\n out = None\n err = 'timeout (>%s)' % tout\n ret = 1\n res = [task, str(out), str(err), int(ret)]\n self._log.debug('put 2 result: task %s', task['uid'])\n self._result_queue.put(res)\n\n # self._log.debug('dispatch done: %s', task['uid'])\n\n except Exception as e:\n\n self._log.exception('dispatch failed')\n out = None\n err = 'dispatch failed: %s' % e\n ret = 1\n res = [task, str(out), str(err), int(ret)]\n self._log.debug('put 3 result: task %s', task['uid'])\n self._result_queue.put(res)\n\n finally:\n # if we kill the process too quickly, the result put above\n # will not make it out, thus make sure the queue is empty\n # first.\n self._result_queue.close()\n self._result_queue.join_thread()\n sys.exit(ret)\n # os.kill(os.getpid(), signal.SIGTERM)\n\n\n\n # --------------------------------------------------------------------------\n #\n def _result_watcher(self):\n\n while True:\n\n try:\n res = self._result_queue.get()\n self._log.debug('got result: %s', res)\n self._result_cb(res)\n except:\n self._log.exception('queue error')\n raise\n\n\n # --------------------------------------------------------------------------\n #\n def _result_cb(self, result):\n\n try:\n task, out, err, ret = result\n # self._log.debug('result cb: task %s', task['uid'])\n\n with self._plock:\n pid = task['pid']\n del(self._pool[pid])\n\n # free resources again for the task\n self._dealloc_task(task)\n\n res = {'req': task['uid'],\n 'out': out,\n 'err': err,\n 'ret': ret}\n\n self._res_put.put(res)\n self._prof.prof('reg_stop', uid=self._uid, msg=task['uid'])\n except:\n self._log.exception('result cb failed')\n raise\n\n\n\n # --------------------------------------------------------------------------\n #\n def _error_cb(self, error):\n\n self._log.debug('error: %s', error)\n raise RuntimeError(error)\n\n\n # --------------------------------------------------------------------------\n #\n def _control_cb(self, topic, msg):\n\n if msg['cmd'] == 'worker_terminate':\n if msg['arg']['uid'] == self._uid:\n\n self._log.debug('got terminate msg: %s: %s', topic, msg)\n\n self._term.set()\n self.stop()\n sys.exit(0)\n\n\n # --------------------------------------------------------------------------\n #\n def run(self):\n\n while not self._term.is_set():\n time.sleep(1)\n\n\n# ------------------------------------------------------------------------------\n","sub_path":"src/radical/pilot/task_overlay/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":17871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"523018541","text":"from flask import render_template, jsonify, request\nfrom application import app, conn\nfrom mixpanel import Mixpanel\n\nmp = Mixpanel(\"e25bfe00c1f58cb35f850ae58bd8378b\")\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/_petition\", methods=['GET', 'POST'])\ndef petition():\n cur = conn.cursor()\n\n if request.method == 'POST':\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n email = request.form['email']\n story = request.form.get('story')\n\n mp.people_set(email, {\n '$first_name' : first_name,\n '$last_name' : last_name,\n '$email' : email,\n 'story' : story\n })\n\n mp.track(email, \"Signed Petition\");\n\n cur.execute(\"INSERT INTO signature (first_name, last_name, email, story) VALUES (%s, %s, %s, %s);\", (first_name, last_name, email, story))\n\n\n cur.execute(\"SELECT first_name, last_name FROM signature;\")\n results = cur.fetchall()\n\n signatures = []\n for signature in results:\n signatures.append(signature[0] + ' ' + signature[1])\n\n cur.close()\n conn.commit()\n return jsonify({\"results\": signatures})\n","sub_path":"application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"497977883","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Rapid Events Test.\n\nAuthor: Imran Matin\nEmail: imatin@ucsd.edu\n\nUsage:\n# in a new terminal\npython cSBC.py\n# in a new terminal\npython test_send_rapid_events.py\n\nTests the functionality of the cSBC when it is processing an event, and \nanother event is triggered and sent to it. Functions as the mSBC. Note, change\nthe HOST variable to be the IP of the cSBC if you are not running it on the \nlocalhost. Set the number of events to be sent back to back and how many times\nto send them.\n\"\"\"\n\n# Import socket module\nimport socket\nfrom time import sleep\n\n# The server's hostname or IP address\nHOST = \"127.0.0.1\"\n# The port used by the server\nPORT = 65431\n# Number of back to back events to be sent\nNUM_EVENTS = 5\n# Number of times to send NUM_EVENTS\nNUM_TRIALS = 3\n\n\ndef send_events(num_events):\n \"\"\"Sends num_events back to back commands to the cSBC.\"\"\"\n # Send num_events back to back event requests\n for i in range(0, num_events):\n try:\n # open a socket for this client\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n # connect to the server\n s.connect((HOST, PORT))\n print(\"Sending event\")\n # send a command to the server\n s.sendall(b\"EVENT\")\n sleep(0.05)\n except Exception as e:\n print(f\"\\n{e}\\n\")\n\n\ndef send_shutdown():\n \"\"\"Continuously sends shutdown command to cSBC until it is shutdown.\"\"\"\n while True:\n try:\n # open a socket for this client\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n # connect to the server\n s.connect((HOST, PORT))\n print(\"Sending shutdown\")\n # send a command to the server\n s.sendall(b\"SHUTDOWN\")\n break\n except Exception as e:\n print(f\"\\n{e}\\n\")\n sleep(1)\n\n\nif __name__ == \"__main__\":\n print(\"Starting Send Rapid Events Test...\")\n try:\n for i in range(0, NUM_TRIALS):\n # wait period to allow for cSBC to place collect images\n sleep(5)\n send_events(NUM_EVENTS)\n\n # Shutdown the cSBC\n send_shutdown()\n except Exception as e:\n print(f\"Exception Occurred\")\n print(\"Completed Send Rapid Events Test...\")\n","sub_path":"tests/test_send_rapid_events.py","file_name":"test_send_rapid_events.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"463479341","text":"import datajoint as dj\nimport os, re, inspect\nimport numpy as np\nfrom churchland_pipeline_python import lab, acquisition, equipment, reference, processing\nfrom churchland_pipeline_python.utilities import speedgoat, datajointutils\nfrom decimal import Decimal\nfrom functools import reduce\nfrom typing import Tuple, List\n\nDataJointTable = dj.user_tables.UserTable\n\nschema = dj.schema(dj.config.get('database.prefix') + 'churchland_analyses_pacman_acquisition')\n\n# =======\n# LEVEL 0\n# =======\n\n@schema \nclass ArmPosture(dj.Lookup):\n definition = \"\"\"\n # Arm posture\n -> lab.Monkey\n arm_posture_id: tinyint unsigned # arm posture ID number\n ---\n elbow_flexion: tinyint unsigned # elbow flexion angle (deg)\n shoulder_flexion: tinyint unsigned # shoulder flexion angle relative to coronal plane (deg)\n \"\"\"\n \n contents = [\n ['Cousteau', 0, 90, 65],\n ['Cousteau', 1, 90, 40],\n ['Cousteau', 2, 90, 75]\n ]\n\n\n@schema\nclass ConditionParams(dj.Lookup):\n \"\"\"\n Task condition parameters. Each condition consists of a unique combination of force, \n stimulation, and general target trajectory parameters. For conditions when stimulation\n was not delivered, stimulation parameters are left empty. Each condition also includes\n a set of parameters unique to the particular type of target trajectory.\n \"\"\"\n\n definition = \"\"\"\n condition_id: smallint unsigned # condition ID number\n \"\"\"\n\n class Force(dj.Part):\n definition = \"\"\"\n # Force parameters\n -> master\n force_id: smallint unsigned # force ID number\n ---\n force_max: tinyint unsigned # maximum force (N)\n force_offset: decimal(5,4) # baseline force (N)\n force_inverted: bool # whether pushing on the load cell moves PacMan up (False) or down (True) onscreen\n \"\"\"\n \n class Stim(dj.Part):\n definition = \"\"\"\n # CereStim parameters\n -> master\n stim_id: smallint unsigned # stim ID number\n ---\n -> equipment.ElectrodeArrayModel.Electrode # stim electrode\n stim_current: smallint unsigned # stim current (uA)\n stim_polarity: tinyint unsigned # cathodic (0) or anodic (1) first //TODO check this\n stim_pulses: tinyint unsigned # number of pulses in stim train\n stim_width1: smallint unsigned # first pulse duration (us)\n stim_width2: smallint unsigned # second pulse duration (us)\n stim_interphase: smallint unsigned # interphase duration (us)\n stim_frequency: smallint unsigned # stim frequency (Hz)\n \"\"\"\n\n class Target(dj.Part):\n definition = \"\"\"\n # Target force profile parameters\n -> master\n target_id: smallint unsigned # target ID number\n ---\n target_duration: decimal(5,4) # target duration (s)\n target_offset: decimal(5,4) # target offset from baseline (proportion playable window)\n target_pad_pre: decimal(5,4) # duration of \"padding\" dots preceding target force profile (s)\n target_pad_post: decimal(5,4) # duration of \"padding\" dots following target force profile (s)\n \"\"\"\n \n class Static(dj.Part):\n definition = \"\"\"\n # Static force profile parameters\n -> master.Target\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='CONVERT(ROUND(force_max*target_offset,{}), char)'.format(n_sigfigs)) \\\n .proj(condition_label='CONCAT(\"Static (\", amp, \" N)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='CONVERT(ROUND(force_max*target_offset, 4), char)') \\\n .proj(condition_rank='CONCAT(\"00_\", LPAD(amp, 8, 0))')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n\n class Ramp(dj.Part):\n definition = \"\"\"\n # Linear ramp force profile parameters\n -> master.Target\n ---\n target_amplitude: decimal(5,4) # target amplitude (proportion playable window)\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='CONVERT(ROUND(force_max*target_amplitude/target_duration,{}), char)'.format(n_sigfigs)) \\\n .proj(condition_label='CONCAT(\"Ramp (\", amp, \" N/s)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='ROUND(force_max*target_amplitude/target_duration, 4)') \\\n .proj(condition_rank='CONCAT(\"10_\", LPAD(CONVERT(ABS(amp),char), 8, 0), \"_\", IF(amp>0, \"0\", \"1\"))')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n \n class Sine(dj.Part):\n definition = \"\"\"\n # Sinusoidal (single-frequency) force profile parameters\n -> master.Target\n ---\n target_amplitude: decimal(5,4) # target amplitude (proportion playable window)\n target_frequency: decimal(5,4) # target frequency (Hz)\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Force) \\\n .proj(\n amp='CONVERT(ROUND(target_amplitude*force_max,{}), char)'.format(n_sigfigs), \n freq='CONVERT(ROUND(target_frequency,{}), char)'.format(n_sigfigs)\n ) \\\n .proj(condition_label='CONCAT(\"Sine (\", amp, \" N, \", freq, \" Hz)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(\n amp='ROUND(target_amplitude*force_max, 4)', \n freq='CONVERT(ROUND(target_frequency, 4), char)'\n ) \\\n .proj(condition_rank=(\n 'CONCAT(\"20_\", LPAD(freq, 8, 0), \"_\", LPAD(CONVERT(ABS(amp),char), 8, 0), \"_\", IF(amp>0, \"0\", \"1\"))'\n ))\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n \n class Chirp(dj.Part):\n definition = \"\"\"\n # Chirp force profile parameters\n -> master.Target\n ---\n target_amplitude: decimal(5,4) # target amplitude (proportion playable window)\n target_frequency_init: decimal(5,4) # target initial frequency (Hz)\n target_frequency_final: decimal(5,4) # target final frequency (Hz)\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Force) \\\n .proj(\n amp='CONVERT(ROUND(force_max*target_amplitude,{}), char)'.format(n_sigfigs),\n freq1='CONVERT(ROUND(target_frequency_init,{}), char)'.format(n_sigfigs),\n freq2='CONVERT(ROUND(target_frequency_final,{}), char)'.format(n_sigfigs),\n ) \\\n .proj(condition_label='CONCAT(\"Chirp (\", amp, \" N, \", freq1, \"-\", freq2, \" Hz)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Force) \\\n .proj(\n amp='ROUND(force_max*target_amplitude, 4)',\n freq1='LPAD(CONVERT(ROUND(target_frequency_init, 4), char), 8, 0)',\n freq2='LPAD(CONVERT(ROUND(target_frequency_final, 4), char), 8, 0)',\n ) \\\n .proj(condition_rank=(\n 'CONCAT(\"30_\", freq1, \"_\", freq2, \"_\", LPAD(CONVERT(ABS(amp),char), 8, 0), \"_\", IF(amp>0, \"0\", \"1\"))'\n ))\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n\n def proj_label(self, n_sigfigs: int=4):\n \"\"\"Project label in all child target tables and joins with master.\"\"\"\n\n target_children = datajointutils.get_parts(ConditionParams.Target)\n\n target_labels = [dj.U('condition_id', 'condition_label') & (x & self).proj_label(n_sigfigs=n_sigfigs) for x in target_children]\n\n labeled_self = reduce(lambda x,y: x+y, target_labels)\n\n return labeled_self\n\n\n def proj_rank(self):\n \"\"\"Project rank in all child target tables and joins with master.\"\"\"\n\n target_children = datajointutils.get_parts(ConditionParams.Target)\n\n target_ranks = [dj.U('condition_id', 'condition_rank') & (x & self).proj_rank() for x in target_children]\n\n ranked_self = reduce(lambda x,y: x+y, target_ranks)\n\n return ranked_self\n\n\n def get_common_attributes(\n self, \n table: DataJointTable, \n include: List[str]=['label','rank'],\n n_sigfigs: int=4,\n ) -> List[dict]:\n \"\"\"Fetches most common attributes in the input table.\n\n Args:\n table (DataJointTable): DataJoint table to use in the restriction\n include (List[str], optional): Attributes to project into the condition table. \n Options: ['label','rank','time','force']. Defaults to ['label','rank'].\n n_sigfigs (int, optional): Number of significant figures include in label. Defaults to 4.\n\n Returns:\n condition_attributes (List[dict]): list of attributes\n \"\"\"\n\n # count condition frequency in the table\n condition_counts = self.aggr(table, count='count(*)')\n\n # restrict by most counts\n max_count = dj.U().aggr(condition_counts, count='max(count)').fetch1('count')\n self = self & (condition_counts & 'count={}'.format(max_count)).proj()\n\n if include is not None:\n\n # project label\n self = self * ConditionParams().proj_label(n_sigfigs=n_sigfigs) if 'label' in include else self\n\n # project rank\n self = self * ConditionParams().proj_rank() if 'rank' in include else self\n\n # fetch attributes\n condition_attributes = self.fetch(as_dict=True, order_by=('condition_rank' if 'rank' in include else None))\n\n # aggregate target attributes\n target_attributes = []\n target_attributes.append('condition_time') if 'time' in include else None\n target_attributes.append('condition_force') if 'force' in include else None\n\n if any(target_attributes):\n\n # ensure matched sample rates across sessions\n behavior_recordings = acquisition.BehaviorRecording & table\n unique_sample_rates = dj.U('behavior_recording_sample_rate') & behavior_recordings\n assert len(unique_sample_rates) == 1, 'Mismatched sample rates!'\n\n fs = unique_sample_rates.fetch1('behavior_recording_sample_rate')\n\n # join condition table with secondary attributes\n for cond_attr in condition_attributes:\n\n t, f = ConditionParams.target_force_profile(cond_attr['condition_id'], fs)\n\n if 'time' in include:\n cond_attr.update(condition_time=t)\n\n if 'force' in include:\n cond_attr.update(condition_force=f)\n\n else:\n condition_attributes = self.fetch(as_dict=True)\n\n return condition_attributes\n\n \n @classmethod\n def parse_params(self, params: dict, session_date: str=''):\n \"\"\"\n Parses a dictionary constructed from a set of Speedgoat parameters (written\n on each trial) in order to extract the set of attributes associated with each\n part table of ConditionParams\n \"\"\"\n\n # force attributes\n force_attr = dict(\n force_max = params['frcMax'], \n force_offset = params['frcOff'],\n force_inverted = params['frcPol']==-1\n )\n\n cond_rel = self.Force\n\n # stimulation attributes\n if params.get('stim')==1:\n \n prog = re.compile('stim([A-Z]\\w*)')\n stim_attr = {\n 'stim_' + prog.search(k).group(1).lower(): v\n for k,v in zip(params.keys(), params.values()) \n if prog.search(k) is not None and k != 'stimDelay'\n }\n\n # replace stim electrode with electrode array model electrode key\n try:\n ephys_stimulation_rel = acquisition.EphysStimulation & {'session_date': session_date}\n electrode_model_key = (equipment.ElectrodeArrayModel & ephys_stimulation_rel).fetch1('KEY')\n\n except:\n print('Missing EphysStimulation entry for session {}'.format(session_date))\n\n else:\n # get electrode array model electrode key (convert index from matlab convention)\n electrode_idx_key = {'electrode_idx': stim_attr['stim_electrode'] - 1}\n electrode_key = (equipment.ElectrodeArrayModel.Electrode & electrode_model_key & electrode_idx_key).fetch1('KEY')\n stim_attr.update(**electrode_key)\n\n # remove stim electrode attribute\n stim_attr.pop('stim_electrode')\n\n cond_rel = cond_rel * self.Stim\n \n else:\n stim_attr = dict()\n cond_rel = cond_rel - self.Stim\n\n # target attributes\n targ_attr = dict(\n target_duration = params['duration'],\n target_offset = params['offset'][0]\n )\n\n # target pad durations\n pad_dur = [v for k,v in params.items() if re.search('padDur',k) is not None]\n if len(pad_dur) == 1:\n targ_attr.update(target_pad_pre=pad_dur[0], target_pad_post=pad_dur[0])\n\n # target type attributes\n if params['type'] == 'STA':\n\n targ_type_rel = self.Static\n targ_type_attr = dict()\n\n elif params['type'] == 'RMP':\n\n targ_type_rel = self.Ramp\n targ_type_attr = dict(\n target_amplitude = params['amplitude'][0]\n )\n\n elif params['type'] == 'SIN':\n\n targ_type_rel = self.Sine\n targ_type_attr = dict(\n target_amplitude = params['amplitude'][0],\n target_frequency = params['frequency'][0]\n )\n\n elif params['type'] == 'CHP':\n\n targ_type_rel = self.Chirp\n targ_type_attr = dict(\n target_amplitude = params['amplitude'][0],\n target_frequency_init = params['frequency'][0],\n target_frequency_final = params['frequency'][1]\n )\n\n cond_rel = cond_rel * self.Target * targ_type_rel\n\n # aggregate all parameter attributes into a dictionary\n cond_attr = dict(\n Force = force_attr,\n Stim = stim_attr,\n Target = targ_attr,\n TargetType = targ_type_attr\n )\n\n return cond_attr, cond_rel, targ_type_rel\n \n @classmethod\n def target_force_profile(self, condition_id: int, fs: int):\n\n # ensure integer frequency\n assert fs == round(fs), 'Non-integer frequency'\n fs = int(fs)\n\n # join condition table with part tables\n joined_table, part_tables = datajointutils.join_parts(self, {'condition_id': condition_id}, depth=2, context=inspect.currentframe())\n\n # condition parameters\n cond_params = joined_table.fetch1()\n\n # convert sample rate to decimal type with precision inferred from condition parameters\n fs_dec = Decimal(fs).quantize(cond_params['target_duration'])\n\n # lengths of each target region\n target_lens = (\n int(round(cond_params['target_pad_pre'] * fs_dec)),\n int(round(cond_params['target_duration'] * fs_dec)) + 1,\n int(round(cond_params['target_pad_post'] * fs_dec))\n )\n\n # time samples\n xi = (\n np.arange(-target_lens[0], 0),\n np.arange(0, target_lens[1]),\n np.arange(target_lens[1], sum(target_lens[-2:]))\n )\n\n # target force functions\n if self.Static in part_tables:\n\n force_fcn = lambda t,c: c['target_offset'] * np.zeros(t.shape)\n\n elif self.Ramp in part_tables:\n\n force_fcn = lambda t,c: (c['target_amplitude']/c['target_duration']) * t\n\n elif self.Sine in part_tables:\n\n force_fcn = lambda t,c: c['target_amplitude']/2 * (1 - np.cos(2*np.pi*c['target_frequency']*t))\n\n elif self.Chirp in part_tables:\n\n force_fcn = lambda t,c: c['target_amplitude']/2 * \\\n (1 - np.cos(2*np.pi*t * (c['target_frequency_init'] + (c['target_frequency_final']-c['target_frequency_init'])/(2*c['target_duration'])*t)))\n\n else:\n print('Unrecognized condition table')\n\n # convert condition parameters to float\n cond_params = {k:float(v) if isinstance(v,Decimal) else v for k,v in cond_params.items()}\n\n # construct target force profile\n force = np.hstack((\n force_fcn(xi[1][0]/fs, cond_params) * np.ones(target_lens[0]),\n force_fcn(xi[1]/fs, cond_params),\n force_fcn(xi[1][-1]/fs, cond_params) * np.ones(target_lens[2])\n ))\n\n # add force offset\n force += cond_params['target_offset']\n\n # scale force from screen units to Newtons\n force *= cond_params['force_max']\n\n # concatenate time samples and convert to seconds\n t = np.hstack(xi) / fs\n\n # round time to maximum temporal precision\n t = t.round(int(np.ceil(np.log10(fs))))\n\n return t, force\n\n\n@schema\nclass TaskState(dj.Lookup):\n definition = \"\"\"\n # Simulink Stateflow task state IDs and names\n task_state_id: tinyint unsigned # task state ID number\n ---\n task_state_name: varchar(255) # task state name\n \"\"\"\n \n\n# =======\n# LEVEL 1\n# =======\n \n@schema\nclass Behavior(dj.Imported):\n definition = \"\"\"\n # Behavioral data imported from Speedgoat\n -> acquisition.BehaviorRecording\n \"\"\"\n\n key_source = acquisition.BehaviorRecording\n\n class Condition(dj.Part):\n definition = \"\"\"\n # Condition data\n -> master\n -> ConditionParams\n ---\n condition_time: longblob # condition time vector (s)\n condition_force: longblob # condition force profile (N)\n \"\"\"\n\n class SaveTag(dj.Part):\n definition = \"\"\"\n # Save tags and associated notes\n -> master\n save_tag: tinyint unsigned # save tag number\n \"\"\"\n\n class Trial(dj.Part):\n definition = \"\"\"\n # Trial data\n -> master.Condition\n trial: smallint unsigned # session trial number\n ---\n -> master.SaveTag\n successful_trial: bool # whether the trial was successful\n simulation_time: longblob # task model simulation time\n task_state: longblob # task state IDs\n force_raw_online: longblob # amplified output of load cell\n force_filt_online: longblob # online (boxcar) filtered and normalized force used to control Pac-Man\n reward: longblob # TTL signal indicating the delivery of juice reward\n photobox: longblob # photobox signal\n stim = null: longblob # TTL signal indicating the delivery of a stim pulse\n \"\"\"\n\n def process_force(self, data_type='raw', apply_filter=True, keep_keys=False):\n\n # aggregate load cell parameters per session\n load_cell_params = (acquisition.Session.Hardware & {'hardware': '5lb Load Cell'}) * equipment.Hardware.Parameter & self\n\n force_capacity_per_session = dj.U(*acquisition.Session.primary_key) \\\n .aggr((load_cell_params & {'equipment_parameter': 'force capacity'}), force_capacity='equipment_parameter_value')\n\n voltage_output_per_session = dj.U(*acquisition.Session.primary_key) \\\n .aggr((load_cell_params & {'equipment_parameter': 'voltage output'}), voltage_output='equipment_parameter_value')\n\n load_cell_params_per_session = force_capacity_per_session * voltage_output_per_session\n\n # 25 ms Gaussian filter\n filter_rel = processing.Filter.Gaussian & {'sd':25e-3, 'width':4}\n\n # join trial force data with force and load cell parameters\n force_rel = self * ConditionParams.Force * load_cell_params_per_session\n\n # fetch force data\n data_type_attr = {'raw':'force_raw_online', 'filt':'force_filt_online'}\n data_attr = data_type_attr[data_type]\n force_data = force_rel \\\n .proj(data_attr, 'force_max', 'force_offset', 'force_capacity', 'voltage_output') \\\n .fetch(as_dict=True, order_by='trial')\n\n # sample rate\n fs = (acquisition.BehaviorRecording & self).fetch1('behavior_recording_sample_rate')\n\n # process trial data\n for f in force_data:\n\n f[data_attr] = f[data_attr].copy()\n\n # normalize force (V) by load cell capacity (V)\n f[data_attr] /= f['voltage_output']\n\n # convert force to proportion of maximum load cell output (N)\n f[data_attr] *= f['force_capacity']/f['force_max']\n\n # subtract baseline force (N)\n f[data_attr] -= float(f['force_offset'])\n\n # multiply force by maximum gain (N)\n f[data_attr] *= f['force_max']\n\n # filter\n if apply_filter:\n f[data_attr] = filter_rel.filt(f[data_attr], fs)\n\n # pop force parameters\n for key in ['force_id', 'force_max', 'force_offset', 'force_capacity', 'voltage_output']:\n [f.pop(key) for f in force_data]\n\n # limit output to force signal\n if not keep_keys:\n force_data = np.array([f[data_attr] for f in force_data])\n\n return force_data \n \n def make(self, key):\n\n self.insert1(key)\n\n if (acquisition.Session.Hardware & key & {'hardware': 'Speedgoat'}):\n\n # behavior sample rate\n fs = int((acquisition.BehaviorRecording & key).fetch1('behavior_recording_sample_rate'))\n\n # summary file path\n summary_file_path = (acquisition.BehaviorRecording.File & key & {'behavior_file_extension': 'summary'})\\\n .proj_file_path().fetch1('behavior_file_path')\n\n # ensure local path\n summary_file_path = reference.EngramTier.ensure_local(summary_file_path)\n\n # read summary file\n summary = speedgoat.read_task_states(summary_file_path)\n\n # update task states\n TaskState.insert(summary, skip_duplicates=True)\n\n # parameter and data file paths\n params_file_paths = (acquisition.BehaviorRecording.File & key & {'behavior_file_extension': 'params'})\\\n .proj_file_path().fetch('behavior_file_path')\n\n data_file_paths = (acquisition.BehaviorRecording.File & key & {'behavior_file_extension': 'data'})\\\n .proj_file_path().fetch('behavior_file_path')\n\n # ensure local paths\n params_file_paths = [reference.EngramTier.ensure_local(pth) for pth in params_file_paths]\n data_file_paths = [reference.EngramTier.ensure_local(pth) for pth in data_file_paths]\n\n # populate conditions from parameter files\n for params_path in params_file_paths:\n\n # trial number\n trial = re.search(r'beh_(\\d*)', params_path).group(1)\n\n # ensure matching data file exists\n if params_path.replace('params','data') not in data_file_paths:\n\n print('Missing data file for trial {}'.format(trial))\n\n else:\n # read params file\n params = speedgoat.read_trial_params(params_path)\n\n if not params:\n continue\n\n # extract condition attributes from params file\n cond_attr, cond_rel, targ_type_rel = ConditionParams.parse_params(params, key['session_date'])\n\n # aggregate condition part table parameters into a single dictionary\n all_cond_attr = {k: v for d in list(cond_attr.values()) for k, v in d.items()}\n \n # insert new condition if none exists\n if not(cond_rel & all_cond_attr):\n\n # insert condition table\n new_cond_id = datajointutils.next_unique_int(ConditionParams, 'condition_id')\n cond_key = {'condition_id': new_cond_id}\n\n ConditionParams.insert1(cond_key)\n\n # insert Force, Stim, and Target tables\n for cond_part_name in ['Force', 'Stim', 'Target']:\n\n # attributes for part table\n cond_part_attr = cond_attr[cond_part_name]\n\n if not(cond_part_attr):\n continue\n\n cond_part_rel = getattr(ConditionParams, cond_part_name)\n cond_part_id = cond_part_name.lower() + '_id'\n\n if not(cond_part_rel & cond_part_attr):\n\n cond_part_attr[cond_part_id] = datajointutils.next_unique_int(cond_part_rel, cond_part_id)\n \n else:\n cond_part_attr[cond_part_id] = (cond_part_rel & cond_part_attr).fetch(cond_part_id, limit=1)[0]\n\n cond_part_rel.insert1(dict(**cond_key, **cond_part_attr))\n\n # insert target type table\n targ_type_rel.insert1(dict(**cond_key, **cond_attr['TargetType'], target_id=cond_attr['Target']['target_id']))\n \n\n # populate trials from data files\n success_state = (TaskState() & 'task_state_name=\"Success\"').fetch1('task_state_id')\n\n for data_path in data_file_paths:\n\n # trial number\n trial = int(re.search(r'beh_(\\d*)',data_path).group(1))\n\n # find matching parameters file\n try:\n params_path = next(filter(lambda f: data_path.replace('data','params')==f, params_file_paths))\n except StopIteration:\n print('Missing parameters file for trial {}'.format(trial))\n else:\n # convert params to condition keys\n params = speedgoat.read_trial_params(params_path)\n\n if not params:\n continue\n\n cond_attr, cond_rel, targ_type_rel = ConditionParams.parse_params(params, key['session_date'])\n\n # read data\n data = speedgoat.read_trial_data(data_path, success_state, fs)\n\n if not data:\n continue\n \n # aggregate condition part table parameters into a single dictionary\n all_cond_attr = {k: v for d in list(cond_attr.values()) for k, v in d.items()}\n\n # insert condition data\n cond_id = (cond_rel & all_cond_attr).fetch1('condition_id')\n cond_key = dict(**key, condition_id=cond_id)\n if not(self.Condition & cond_key):\n t, force = ConditionParams.target_force_profile(cond_id, fs)\n cond_key.update(condition_time=t, condition_force=force)\n self.Condition.insert1(cond_key, allow_direct_insert=True)\n\n # insert save tag key\n save_tag_key = dict(**key, save_tag=params['saveTag'])\n if not (self.SaveTag & save_tag_key):\n self.SaveTag.insert1(save_tag_key)\n\n # insert trial data\n trial_key = dict(**key, trial=trial, condition_id=cond_id, **data, save_tag=params['saveTag'])\n self.Trial.insert1(trial_key)\n\n else: \n print('Unrecognized task controller')\n return None","sub_path":"pacman_pipeline_python/pacman_acquisition.py","file_name":"pacman_acquisition.py","file_ext":"py","file_size_in_byte":29384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"57476203","text":"from __future__ import (print_function,\n unicode_literals,\n division)\nfrom future.builtins import str, open, range, dict\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom spikevo import *\nfrom spikevo.pynn_transforms import PyNNAL\nimport argparse\nfrom pprint import pprint\n\nbackend = 'genn'\nneuron_class = 'IF_cond_exp'\n# heidelberg's brainscales seems to like these params\ne_rev = 92 #mV\n# e_rev = 500.0 #mV\n\nbase_params = {\n 'cm': 0.09, # nF\n 'v_reset': -70., # mV\n 'v_rest': -65., # mV\n 'v_thresh': -55., # mV\n # 'e_rev_I': -e_rev, #mV\n # 'e_rev_E': 0.,#e_rev, #mV\n 'tau_m': 10., # ms\n 'tau_refrac': 2.0, # ms\n 'tau_syn_E': 1.0, # ms\n 'tau_syn_I': 5.0, # ms\n\n}\n\nbase_params['e_rev_I'] = -e_rev\nbase_params['e_rev_E'] = 0.0\n\ntimestep = 0.1\nmax_w = 0.01\nstart_w = max_w / 2.0\n\ntau_plus = 5.0\ntau_minus = 10.0\na_plus = 0.01\na_minus = 0.005\ndelays = range(1, 11)\n\nstart_dt, num_dt = -15, 30\nsim_time = np.round(1.5 * num_dt)\nstart_t = sim_time - num_dt\ntrigger_t = start_t + (start_dt + num_dt//2)\nnum_neurons = num_dt\n\npynnx = PyNNAL(backend)\npynnx._sim.setup(timestep=timestep, min_delay=timestep,\n backend='SingleThreadedCPU')\n\npprojs = {}\nfor delay in delays:\n\n a_plus_local = a_plus if delay == 1.0 else -a_plus\n a_minus_local = a_minus if delay == 1.0 else -a_minus\n\n projs = {}\n for dt in range(start_dt, start_dt+num_dt, 1):\n pre_spike_times = [[trigger_t + dt]]\n trigger_spike_times = [[trigger_t]]\n\n trigger = pynnx.Pop(1, 'SpikeSourceArray',\n {'spike_times': trigger_spike_times})\n\n post = pynnx.Pop(1, neuron_class, base_params)\n pynnx.set_recording(post, 'spikes')\n\n pre = pynnx.Pop(1, 'SpikeSourceArray',\n {'spike_times': pre_spike_times})\n\n tr2post = pynnx.Proj(trigger, post, 'OneToOneConnector', 0.1, 1.0, label='trigger connection')\n\n\n stdp = {\n 'timing_dependence': {\n 'name': 'SpikePairRule',\n 'params': {'tau_plus': tau_plus,\n 'tau_minus': tau_minus,\n # 'tau_minus': 33.7,\n },\n },\n 'weight_dependence': {\n 'name':'AdditiveWeightDependence',\n # 'name':'MultiplicativeWeightDependence',\n 'params': {\n # 'w_min': (static_w['KC to DN'])/10.0,\n 'w_min': 0.0,\n 'w_max': max_w,\n # 'w_max': (static_w['KC to DN']),\n 'A_plus': a_plus_local,\n 'A_minus': a_minus_local,\n # 'A_plus': max_w * a_plus,\n # 'A_minus': max_w * a_minus,\n },\n }\n }\n\n pre2post = pynnx.Proj(pre, post, 'AllToAllConnector', start_w, delay,\n stdp=stdp, label='plastic connection')\n\n projs[dt] = pre2post\n\n pprojs[delay] = projs\n\npynnx.run(sim_time)\nexperiments = {}\nfor delay in pprojs:\n dt_dw = {}\n for dt in pprojs[delay]:\n dt_dw[dt] = (pynnx.get_weights(pprojs[delay][dt])[0,0] - start_w) / max_w\n experiments[delay] = dt_dw\n\npynnx.end()\n\n\n\nplt.figure()\nax = plt.subplot()\nplt.axvline(0, linestyle='--', color='gray')\nplt.axhline(0, linestyle='--', color='gray')\n\nfor delay in experiments:\n dt_dw = experiments[delay]\n dts = sorted(dt_dw.keys())\n dws = [dt_dw[dt] for dt in dts]\n plt.plot(dts, dws, label=delay)\n\nmax_dw = np.max(np.abs(dws)) * 1.5\nax.set_ylim(-max_dw, max_dw)\nax.set_xlabel(r'$\\Delta t = t_{pre} - t_{post}$ [ms]')\nax.set_ylabel(r'$\\Delta w $')\nplt.legend()\nplt.grid()\nplt.show()\n\nnp.savez_compressed('delay_experiments.npz', experiments=experiments)","sub_path":"codebase/misc_tests/stdp_curve.py","file_name":"stdp_curve.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"255638872","text":"#!/usr/bin/env python3\n\nfrom math import inf, sqrt\nfrom uuid import uuid4\n\nfrom ev3dev.ev3 import Sound\nimport paho.mqtt.client as mqtt\n\nfrom communication import Communication\nfrom motioncontrol import MotionControl\nfrom planet import Direction, Planet\n\nADJACENCIES = {\n Direction.NORTH: [(-1, 1), (0, 1), (1, 1)],\n Direction.EAST: [(1, 1), (1, 0), (1, -1)],\n Direction.SOUTH: [(1, -1), (0, -1), (-1, -1)],\n Direction.WEST: [(-1, -1), (-1, 0), (-1, 1)]\n}\n\ndef play_daisy():\n with open('daisy.txt') as f:\n notes = [line.rstrip('\\n') for line in f.readlines()]\n notes = ' '.join(notes)\n s = Sound()\n s.beep(notes)\n\nclass RobotBrain:\n def __init__(self, testplanet=None):\n\n mqtt_client = mqtt.Client(\n client_id=str(uuid4()),\n clean_session=False,\n protocol=mqtt.MQTTv31)\n\n self.communication = Communication(mqtt_client, planet=testplanet)\n\n self.motioncontrol = MotionControl()\n self.planet = Planet()\n\n self.visited = set()\n self.current_path = []\n self.target = None\n self.use_target_heuristic = False\n\n # calibrate and drive to starting node\n self.motioncontrol.calibrate()\n start_color, _, _ = self.motioncontrol.follow()\n\n start = self.communication.send_ready()\n\n if __debug__:\n print(\"received starting node from server: {}\".format(start))\n\n self.planet.set_starting_node(start, start_color)\n\n self.current_node = start\n self.visited.add(start)\n self.motioncontrol.update_position(Planet.from_node(start))\n\n # discover exits\n self.motioncontrol.update_rotation(Planet.from_direction(Direction.NORTH))\n exits, self.current_direction = self.motioncontrol.scan_paths()\n\n if __debug__:\n print('discovered exits: {}'.format(exits))\n\n for exit in exits:\n if exit == Direction.SOUTH:\n continue\n self.planet.add_undiscovered_exit(self.current_node + (exit,))\n\n def explore(self):\n while True:\n if __debug__:\n print(\"exploring, current pose is: {}\".format(\n self.current_node + (self.current_direction,)))\n\n # follow path to next node\n start = self.current_node + (self.current_direction,)\n reached = self.motioncontrol.follow()\n\n destination = None\n blocked = False\n if reached is None:\n destination = start\n blocked = True\n\n else:\n color, position, rotation = reached\n node = self.planet.to_node(position, color)\n direction = self.planet.to_direction(rotation)\n destination = node + (Planet.invert_direction(direction),)\n\n if __debug__:\n print(\"reached node at: {}\".format(destination))\n\n destination, weight, blocked, paths, target = \\\n self.communication.send_path(start, destination, blocked)\n\n if __debug__:\n print(\"received path response from server:\")\n print(\" destination: {}:\".format(destination))\n print(\" weight: {}\".format(weight))\n print(\" paths: {}\".format(paths))\n print(\" target: {}\".format(target))\n\n self.planet.add_path(start, destination, weight)\n self.planet.mark_exit_discovered(start)\n self.planet.mark_exit_discovered(destination)\n\n if target:\n self.target=target\n\n if __debug__:\n print(\"new target!\")\n\n received_paths = False\n if paths:\n received_paths = True\n for start, dest, weight in paths:\n self.planet.add_path(start, dest, weight)\n self.planet.mark_exit_discovered(start)\n self.planet.mark_exit_discovered(dest)\n\n x, y, incoming_direction = destination\n direction = Planet.invert_direction(incoming_direction)\n\n self.current_node = (x, y)\n self.current_direction = direction\n\n if self.target:\n target_x, target_y = self.target\n self.use_target_heuristic = sqrt((target_x - x)**2 + \\\n (target_y - y)**2) < 15\n\n if not self.use_target_heuristic and __debug__:\n print(\"WARNING: ignoring target heuristics!\")\n\n self.motioncontrol.update_position(\n Planet.from_node(self.current_node))\n self.motioncontrol.update_rotation(\n Planet.from_direction(self.current_direction))\n\n already_visited = True\n if self.current_node not in self.visited:\n already_visited = False\n\n exits, direction = self.motioncontrol.scan_paths()\n self.current_direction = direction\n\n self.motioncontrol.update_rotation(Planet.from_direction(\n self.current_direction))\n\n if __debug__:\n print('discovered exits: {}'.format(exits))\n\n for exit in exits:\n if exit == incoming_direction:\n continue\n self.planet.add_undiscovered_exit(self.current_node + (exit,))\n\n self.visited.add(self.current_node)\n\n current_x, current_y = self.current_node\n known_paths = self.planet.get_paths()\n undiscovered = self.planet.get_undiscovered_exits()\n\n # exploration completed\n target_path_known = self.target and \\\n self.planet.shortest_path(self.current_node, self.target)\n\n if not undiscovered and not target_path_known:\n self.communication.send_exploration_completed(\"...\")\n\n if __debug__:\n print(\"completey discovered planet!\")\n\n break\n\n # progress towards target\n if self.target is not None:\n if __debug__:\n print(\"There is a target...\")\n\n if self.current_node == self.target:\n self.communication.send_target_reached(\"...\")\n\n if __debug__:\n print(\"finished!\")\n\n break\n\n shortest_path = \\\n self.planet.shortest_path(self.current_node, self.target)\n if shortest_path:\n if __debug__:\n print(\"found shortest path to target\")\n\n self.current_path = shortest_path\n elif self.use_target_heuristic:\n if __debug__:\n print(\"no known path to target, finding path to nearest projected node\")\n\n target_x, target_y = self.target\n\n best_node = None\n best_node_distance = inf\n best_exit = None\n best_path = None\n\n reachable_nodes = \\\n self.planet.get_connected_known_nodes(self.current_node)\n\n for node in reachable_nodes:\n shortest_path = self.planet.shortest_path(\n self.current_node, node)\n\n if shortest_path is None:\n continue\n\n exits = self.planet.get_undiscovered_exits().get(node)\n if not exits:\n continue\n\n node_x, node_y = node\n\n for exit in exits:\n projected_x, projected_y = ADJACENCIES[exit][1]\n\n projected_node_distance = \\\n sqrt((target_x - (node_x + projected_x))**2 + \\\n (target_y - (node_y + projected_y))**2)\n\n if projected_node_distance > best_node_distance:\n continue\n\n if projected_node_distance < best_node_distance or \\\n len(shortest_path) < len(best_path):\n\n best_node = node\n best_node_distance = projected_node_distance\n best_exit = exit\n best_path = shortest_path + [(node_x, node_y, exit)]\n\n self.current_path = best_path\n\n # current node not yet discovered\n if self.current_node in undiscovered and \\\n (not self.use_target_heuristic or target is None):\n\n if __debug__:\n print(\"current node not discovered\")\n\n undiscovered_exits = undiscovered[self.current_node]\n\n # prefer paths that are likely to lead to already discovered nodes\n preferred_exit_heuristics = {}\n\n for exit in undiscovered_exits:\n preferred_exit_heuristics[exit] = 0\n for delta_x, delta_y in ADJACENCIES[exit]:\n if (current_x + delta_x, current_y + delta_y) in known_paths:\n preferred_exit_heuristics[exit] += 1\n\n if __debug__:\n print(\"exit heuristics: {}\".format(preferred_exit_heuristics))\n\n while True:\n preferred_exit = None\n max_heuristic = -1\n for direction in [Direction.NORTH, Direction.EAST,\n Direction.SOUTH, Direction.WEST]:\n\n heuristic = preferred_exit_heuristics.get(direction)\n if heuristic is None:\n continue\n\n if heuristic > max_heuristic:\n preferred_exit = direction\n max_heuristic = heuristic\n\n if __debug__:\n print(\"choosing exit: \" + str(preferred_exit))\n\n turn = self.motioncontrol.turn_to(preferred_exit,\n sweep=already_visited)\n if turn == preferred_exit:\n break\n\n if __debug__:\n warn = \"WARNING: exit {} not found, marking as discovered\"\n print(warn.format(preferred_exit))\n\n self.planet.mark_exit_discovered(\n self.current_node + (preferred_exit,))\n preferred_exit_heuristics.pop(preferred_exit)\n\n self.current_direction = preferred_exit\n self.motioncontrol.update_rotation(\n Planet.from_direction(self.current_direction))\n\n continue\n\n elif (not self.current_path or received_paths) and \\\n (not self.use_target_heuristic or self.target is None):\n\n if __debug__:\n print(\"current node discovered, looking for nearest undiscovered node\")\n\n # find nearest node with unexplored exit\n nearest_undiscovered_node = None\n nearest_undiscovered_node_path = None\n nearest_undiscovered_node_distance = inf\n\n for node in undiscovered:\n path = self.planet.shortest_path(self.current_node, node)\n if not path:\n continue\n\n pathlen = len(path)\n\n if pathlen < nearest_undiscovered_node_distance:\n nearest_undiscovered_node = node\n nearest_undiscovered_node_path = path\n nearest_undiscovered_node_distance = pathlen\n\n self.current_path = nearest_undiscovered_node_path\n\n if __debug__:\n dbg = \"backtracking to nearest undiscovered node {} on path {}\"\n print(dbg.format(nearest_undiscovered_node,\n nearest_undiscovered_node_path))\n\n if __debug__:\n print(\"following path {}\".format(self.current_path))\n\n _, _, exit = self.current_path[0]\n self.current_path = self.current_path[1:]\n\n while self.motioncontrol.turn_to(exit, sweep=already_visited) != exit:\n pass\n\n self.current_direction = exit\n self.motioncontrol.update_rotation(\n Planet.from_direction(self.current_direction))\n\n play_daisy()\n","sub_path":"src/robotbrain.py","file_name":"robotbrain.py","file_ext":"py","file_size_in_byte":12743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"292581692","text":"import pandas as pd\r\nimport numpy as np\r\nimport time\r\nimport pickle\r\nimport sys\r\nimport os\r\nfrom datetime import timedelta\r\nfrom array import array\r\n\r\ndef init():\r\n global fldr\r\n try:\r\n from statsmodels.tsa.stattools import acf, pacf\r\n from statsmodels.tsa.arima_model import ARIMA\r\n except ImportError:\r\n import pip\r\n package_name='statsmodels'\r\n pip.main(['install', package_name])\r\n from statsmodels.tsa.stattools import acf, pacf\r\n from statsmodels.tsa.arima_model import ARIMA\r\n # serialize the model on disk in the special 'outputs' folder\r\n print (\"Read the model from model.pkl in directory \", fldr)\r\n \r\n fl = open(fldr+\"model.pkl\", 'rb')\r\n global ar_res\r\n ar_res = pickle.load( fl)\r\n fl.close()\r\n\r\ndef PrepareFcstData(strt, stp):\r\n df = pd.DataFrame(np.zeros(shape=(stp-strt+1, 1)))\r\n fcst = pd.DataFrame(np.zeros(shape=(stp-strt+1, 1)))\r\n\r\n for idx in range(strt, strt + len(df)):\r\n d = timedelta(hours=idx)\r\n df.iloc[idx - strt, 0] = pd.datetime(2017, 6, 19, 0, 0, 0, 0) + d\r\n # print(df)\r\n fcst.index = df.iloc[:, 0]\r\n fcst.index.name = 'time'\r\n fcst.columns = ['forecast']\r\n return fcst\r\n\r\ndef predictForecast(strt, stp):\r\n import json\r\n import numpy\r\n fst = PrepareFcstData(strt,stp)\r\n\r\n global ar_res\r\n fst['forecast']=ar_res.predict(start = strt-1, end= stp, dynamic= True)\r\n return fst\r\n\r\ndef run(inputString):\r\n import json\r\n import numpy\r\n try:\r\n input_list=json.loads(inputString)\r\n except ValueError:\r\n return 'Bad input: expecting a json encoded list of lists.'\r\n strt = int(input_list[0][\"start\"])\r\n stp = int(input_list[1][\"stop\"])\r\n print(\"start:\",strt)\r\n print(\"stop:\",stp)\r\n \r\n pred = predictForecast(strt,stp)\r\n return str(pred)\r\n\r\nglobal fldr\r\nfldr=\"\"\r\nif __name__ == \"__main__\":\r\n \r\n fldr = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'] + \"outputs/\"\r\n # predict future values\r\n print ('Python version: {}'.format(sys.version))\r\n print('Pandas version:',pd.__version__)\r\n print ()\r\n init()\r\n #f = run('{\"input\":[{\"start\":\"127\"},{\"stop\":\"151\"}]}')\r\n f = run('[{\"start\":\"127\"},{\"stop\":\"151\"}]')\r\n\r\n print(\"Forecast Values:\")\r\n print(f)\r\n\r\n\r\n","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"123567607","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\n\nfrom newspapers.utils import find_one_tag, form_article_id\n\n\ndef check_economist_url(url, logger):\n parts = url.split('/')\n if parts[3] == '1843':\n return False\n\n # page not found error\n if url == 'https://www.economist.com/prospero/2020/01/09/how-to-save-culture-from-climate-change':\n return False\n\n import re\n pattern = re.compile('\\/\\d{4}\\/\\d{2}\\/\\d{2}\\/')\n match = pattern.findall(url)\n if len(match) == 1:\n return True\n return False\n\n\ndef clean_string(string, unwanted):\n for unw in unwanted:\n string = string.replace(unw, '')\n return string\n\n\ndef parse_economist_url(url, logger=None):\n import time\n # error without the sleep\n time.sleep(1)\n response = requests.get(url)\n html = response.text\n soup = BeautifulSoup(html, features=\"html5lib\")\n\n headline = find_one_tag(soup, 'span', {'class': 'article__headline', 'itemprop': 'headline'}).text\n\n body = find_one_tag(soup, 'div', {\n 'itemprop': 'text',\n 'class': 'ds-layout-grid ds-layout-grid--edged layout-article-body'\n })\n body = body.findAll('p')\n body = ''.join(p.text for p in body)\n\n unwanted = [\n 'For more coverage of climate change, register for The Climate Issue, our fortnightly newsletter, or visit our climate-change hub',\n 'Sign up to our new fortnightly climate-change newsletter hereThis article appeared in the Leaders section of the print edition under the headline \"The climate issue\"'\n ]\n body = clean_string(body, unwanted)\n\n app = find_one_tag(soup, 'script', {'type': 'application/json'})\n app = json.loads(app.text)\n meta = app['props']['pageProps']['metadata']\n published = meta['datePublished']\n modified = meta['dateModified']\n\n return {\n \"newspaper_id\": \"economist\",\n 'body': body,\n 'article_id': form_article_id(url),\n 'headline': headline,\n 'article_url': url,\n 'html': html,\n 'date_published': published,\n 'date_modified': modified,\n }\n\n\neconomist = {\n \"newspaper_id\": \"economist\",\n \"newspaper\": \"The Economist\",\n \"newspaper_url\": \"economist.com\",\n \"checker\": check_economist_url,\n \"parser\": parse_economist_url\n}\n\n\nif __name__ == '__main__':\n url = 'https://www.economist.com/briefing/2010/11/25/facing-the-consequences'\n url = 'https://www.economist.com/books-and-arts/2019/05/16/climate-change-strikes-the-venice-biennale'\n response = requests.get(url)\n html = response.text\n soup = BeautifulSoup(html, features=\"html5lib\")\n\n headline = find_one_tag(soup, 'span', {'class': 'article__headline', 'itemprop': 'headline'}).text\n","sub_path":"newspapers/economist.py","file_name":"economist.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"623883936","text":"import time\r\n\r\nimport pymysql\r\n\r\n\r\ndb = pymysql.connect(\"192.168.1.151\",\"root\",\"123456\",\"db_ny_ctd\",charset='utf8')\r\ncursor = db.cursor()\r\n\r\ndb2 = pymysql.connect(\"localhost\",\"root\",\"123456\",\"test\",charset='utf8')\r\ncursor2 = db2.cursor()\r\n\r\nwhile True:\r\n\r\n get_new_record_sql = 'select max(endtime) from t_c_cti_callrecord'\r\n cursor2.execute(get_new_record_sql)\r\n new_row = cursor2.fetchone()\r\n new_time = new_row[0]\r\n print(new_time)\r\n\r\n sql = \"SELECT * FROM t_c_cti_callrecord \\\r\n WHERE endtime > '%s'\" % (new_time)\r\n try:\r\n cursor.execute(sql)\r\n results = cursor.fetchall()\r\n for row in results:\r\n\r\n insert_sql = \"INSERT INTO t_c_cti_callrecord \\\r\n VALUES('%s', '%s', '%d', '%s', '%s','%d', '%d', '%d', '%d', '%s','%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s','%s', '%d', '%d', '%d', '%d','%d', '%d', '%s', '%s', '%d','%s', '%s', '%s', '%d', '%d','%s', '%s', '%s', '%s', '%s', '%d','%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s' )\" % \\\r\n (row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14],row[15],row[16],row[17],row[18],row[19],row[20],row[21],row[22],row[23],row[24],row[25],row[26],row[27],row[28],row[29],row[30],row[31],row[32],row[33],row[34],row[35],row[36],row[37],row[38],row[39],row[40],row[41],row[42],row[43],row[44],row[45],row[46],row[47],row[48],row[49],row[50],row[51],row[52],row[53],row[54])\r\n\r\n cursor2.execute(insert_sql)\r\n db2.commit()\r\n except () as e:\r\n print (e)\r\n time.sleep(1)\r\n\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"607618083","text":"import random\nfrom termcolor import colored \n\ndef NumRand():\n random.seed()\n x=random.randint(0,100)\n \n return x\n\ndef NumRandPer(x,y):\n random.seed()\n x=random.randint(x,y)\n \n return x\n\ndef depuraNum():\n nValido=False\n while (nValido==False):\n try:\n n= int (input(colored(\"ingrese el numero deseado: \\n\",\"yellow\")))\n nValido=True\n except ValueError:\n print(colored(\"el cararcter ingresado no es un numero ingrese el valor nuevamente \\n\",\"red\"))\n nValido=False\n return n \n\ndef finalizar():\n print(colored(\"Programa finalizado\",\"red\") )\n\n\n\ndef menu():\n print(colored(\"Ejercicio numero aleatorio \\n\",\"yellow\"))\n print(colored(\"que deseas hacer? \\n\",\"yellow\"))\n print(colored(\"1. numero aleatorio entre 0 y 100\\n\",\"green\"))\n print(colored(\"2. numero aleatorio valores personalizados\\n\",\"green\"))\n print(colored(\"3. salir\\n\",\"red\"))\n respuesta= input(\"\")\n return respuesta\n\n\n\ndef continuar():\n input(colored(\"Presiona Enter para continuar...\",\"yellow\"))\n\ndef main():\n salida=False\n while (salida==False):\n opcion=menu()\n if (opcion==\"1\"):\n print(colored(\"su numero aleatorio es: \",\"yellow\") )\n print(colored(str(NumRand()) + \"\\n\",\"green\") )\n continuar()\n \n elif(opcion==\"2\"):\n print(colored(\"rango minimo\\n\", \"yellow\") )\n x = depuraNum()\n print(colored(\"rango maximo\\n\", \"yellow\") )\n y = depuraNum()\n print(colored(\"su numero aleatorio entre \" + str(x) +\" y \" + str(y) + \"es: \",\"yellow\") )\n print(colored(str(NumRandPer(x,y)) + \"\\n\",\"green\") )\n continuar()\n elif(opcion==\"3\"): \n salida=True\n print(colored(\"Gracias por usar el programa\\n\",\"red\") )\n finalizar()\n else:\n print(colored(\"No ha ingresado una opcion correcta\\n\",\"red\") )\n continuar() \n\nmain() ","sub_path":"py6_retos2/reto5.py","file_name":"reto5.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"168673813","text":"\"\"\"\nCompute the Celestial location radius RMS corresponding to the PRD requirement\nof 1.0 arcsec.\n\"\"\"\n\nimport asciitable\nimport numpy as np\nfrom Chandra.Time import DateTime\n\n# Read using Tab instead of Rdb because the RDB 2nd-line header is wrong.\ndat = asciitable.read(\n \"/proj/sot/ska/data/astromon/standard_xcorr/plot.rdb\",\n Reader=asciitable.Tab,\n data_start=2,\n guess=False,\n)\nok = dat[\"status_id\"] == \"\"\ndat = dat[ok]\n\nstart = DateTime() - (5 * 365)\nstop = DateTime()\nok = (DateTime(dat[\"date_obs\"]).date > start.date) & (\n DateTime(dat[\"date_obs\"]).date < stop.date\n)\nprint(\"{} to {}\".format(start.date, stop.date))\n\nprint(\"N srcs: {}\".format(len(dat[ok])))\nprint(\"RMS radius {}\".format(np.sqrt(np.mean(dat[ok][\"dr\"] ** 2))))\nprint(\"90 percentile radius = {} arcsec\".format(np.percentile(dat[ok][\"dr\"], 90)))\nprint(\"99 percentile radius = {} arcsec\".format(np.percentile(dat[ok][\"dr\"], 99)))\n\nfor detector in [\"ACIS-S\", \"ACIS-I\", \"HRC-S\", \"HRC-I\"]:\n det = dat[ok][\"detector\"] == detector\n print(\n \"90 percentile radius for {} is {} arcsec\".format(\n detector, np.percentile(dat[ok][\"dr\"][det], 90)\n )\n )\n\nprint(\n \"{:.1f} percent outside a 1 arcsec radius\".format(\n 100.0 * np.count_nonzero(dat[ok][\"dr\"] > 1.0) / len(dat[ok][\"dr\"])\n )\n)\n\n\nprint(\"Worst case is {:.1f}\".format(np.max(dat[ok][\"dr\"])))\n","sub_path":"legacy/calc_rms.py","file_name":"calc_rms.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"522969284","text":"#!/usr/bin/env python\n\"\"\"Main script for running the pyinseq package.\"\"\"\n\nimport argparse\nimport os\nfrom shutil import copyfile\nimport sys\nimport yaml\nfrom demultiplex import sample_prep, demultiplex_fastq, trim_fastq\nfrom gbkconvert import gbk2fna, gbk2ftt\nfrom mapReads import bowtieBuild, bowtieMap, parseBowtie\nfrom processMapping import mapSites, mapGenes, buildGeneTable\nfrom utils import convert_to_filename, createExperimentDirectories\n\ndef parseArgs(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input',\n help='input Illumina reads file',\n required=True)\n parser.add_argument('-s', '--samples',\n help='sample list with barcodes',\n required=True)\n parser.add_argument('-e', '--experiment',\n help='experiment name (no spaces or special characters)',\n required=True)\n parser.add_argument('-g', '--genome',\n help='genome in GenBank format (one concatenated file for multiple contigs/chromosomes)',\n required=True)\n parser.add_argument('-d', '--disruption',\n help='fraction of gene disrupted (0.0 - 1.0)',\n default=1.0)\n parser.add_argument('--nobarcodes',\n help='barcodes have already been removed from the samples; \\\n -i should list the directory with filenames (.fastq.gz) \\\n corresponding to the sample names',\n action='store_true',\n default=False)\n parser.add_argument('--keepall',\n help='keep all intermediate files generated \\\n (warning: large size!)',\n action='store_true',\n default=False)\n return parser.parse_args(args)\n\n\nclass cd:\n \"\"\"Context manager to change to the specified directory then back.\"\"\"\n def __init__(self, newPath):\n self.newPath = os.path.expanduser(newPath)\n\n def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)\n\n def __exit__(self, etype, value, traceback):\n os.chdir(self.savedPath)\n\n\ndef pipeline_organize(samples):\n\n print('\\n===================='\\\n '\\n* Setting up *'\\\n '\\n====================\\n')\n\n # Create the directory struture based on the experiment name\n createExperimentDirectories(experiment)\n\n # Note: barcode length hardcoded at 4 bp here\n barcode_qc, barcode_length = True, 4\n\n # if nobarcodes:\n # barcode_qc, barcode_length = False, 0\n\n # TODO(For rerunning samples, modify samplesDict construction; read in a YAML file?)\n\n # TODO(Modify as needed for already-demultiplexed samples)\n\n # samples = OrderedDict([('name1', {'name': 'name1', 'barcode': 'barcode1'}),\n # ('name2', {'name': 'name2', 'barcode': 'barcode2'})])\n global samplesDict\n samplesDict = sample_prep(samples, barcode_qc)\n\n # add 'demultiplexedPath' and 'trimmedPath' fields for each sample\n for sample in samplesDict:\n demultiplexedPath = 'results/{experiment}/raw_data/{sampleName}.fastq.gz'.format(\n experiment=experiment,\n sampleName=samplesDict[sample]['name'])\n trimmedPath = 'results/{experiment}/{sampleName}_trimmed.fastq'.format(\n experiment=experiment,\n sampleName=samplesDict[sample]['name'])\n samplesDict[sample]['demultiplexedPath'] = demultiplexedPath\n samplesDict[sample]['trimmedPath'] = trimmedPath\n\n print('\\nProcessing {} total samples:'.format(len(samplesDict)))\n for s in samplesDict:\n print('{0}\\n barcode: {1}'.format(s, samplesDict[s]['barcode']))\n samples_yaml = 'results/{}/samples.yml'.format(experiment)\n with open(samples_yaml, 'w') as fo:\n fo.write(yaml.dump(samplesDict, default_flow_style=False))\n print('Sample details written to {}'.format(samples_yaml))\n\ndef pipeline_no_demultiplex(reads):\n # copy reads files into the experiment/raw_data directory\n for sample in samplesDict:\n # makes sure the reads directory has a trailing slash\n if reads[-1] != '/':\n reads += '/'\n src = reads + sample + '.fastq.gz'\n dst = samplesDict[sample]['demultiplexedPath']\n copyfile(src, dst)\n\ndef pipeline_demultiplex(reads):\n\n print('\\n===================='\\\n '\\n* Demultiplexing *'\\\n '\\n====================\\n')\n\n # demultiplex based on barcodes defined in the sample file\n print('\\nDemultiplexing from input file:\\n {}'.format(reads))\n nreads = demultiplex_fastq(reads, samplesDict, experiment)\n logdata['total_reads'] = nreads\n print('Demultiplexed into output files:')\n for s in samplesDict:\n print(' ' + samplesDict[s]['demultiplexedPath'])\n\ndef pipeline_mapping(gbkfile, organism, genomeDir, disruption, barcode_length=4):\n # Prepare genome files from the GenBank input\n\n print('\\n===================='\\\n '\\n* Mapping *'\\\n '\\n====================\\n')\n\n fnaPrint = \\\n '\\nPreparing nucleotide fasta file from GenBank file to use in bowtie mapping.\\n' \\\n ' GenBank source file: {}'.format(gbkfile)\n fttPrint = \\\n '\\nPreparing feature table file from GenBank file to use in gene mapping.\\n' \\\n ' GenBank source file: {}'.format(gbkfile)\n print(fnaPrint)\n gbk2fna(gbkfile, organism, genomeDir)\n print(fttPrint)\n gbk2ftt(gbkfile, organism, genomeDir)\n\n # Change directory, build bowtie indexes, change directory back\n with cd(genomeDir):\n print('\\nBuilding bowtie index files in results/{}/genome_lookup'.format(experiment))\n bowtieBuild(organism)\n\n # Dictionary of each sample's cpm by gene\n geneMappings = {}\n for sample in samplesDict:\n s = samplesDict[sample]\n print('\\nProcessing sample {}'.format(sample))\n sample_reads, trimmed_reads = trim_fastq(s['demultiplexedPath'], s['trimmedPath'], sample, barcode_length)\n logdata[sample] = {}\n logdata[sample]['reads_with_bc'] = sample_reads\n logdata[sample]['reads_with_bc_seq_tn'] = trimmed_reads\n # Change directory, map to bowtie, change directory back\n trimmedSampleFile = '{0}_trimmed.fastq'.format(sample)\n bowtieOutputFile = '{0}_bowtie.txt'.format(sample)\n with cd(genomeDir):\n # Paths are relative to the genome_lookup directory\n # from where bowtie is called\n bowtie_in = '../{0}'.format(trimmedSampleFile)\n bowtie_out = '../{0}'.format(bowtieOutputFile)\n # map to bowtie and produce the output file\n print('\\nMapping {} reads with bowtie'.format(sample))\n bowtie_msg_out = bowtieMap(organism, bowtie_in, bowtie_out)\n # store bowtie data for each sample in dictionary\n logdata[sample]['bowtie_results'] = parseBowtie(bowtie_msg_out)\n # Map each bowtie result to the chromosome\n insertions = len(mapSites('results/{0}/{1}'.format(experiment, bowtieOutputFile)))\n logdata[sample]['insertion_sites'] = insertions\n # Add gene-level results for the sample to geneMappings\n # Filtered on gene fraction disrupted as specified by -d flag\n geneMappings[sample] = mapGenes(organism, sample, disruption, experiment)\n if not keepall:\n # Delete trimmed fastq file, bowtie mapping file after writing mapping results\n os.remove(s['trimmedPath'])\n os.remove('results/{0}/{1}'.format(experiment, bowtieOutputFile))\n buildGeneTable(organism, samplesDict, geneMappings, experiment)\n # print(logdata)\n\n\ndef pipeline_analysis():\n\n print('\\n===================='\\\n '\\n* Analysis *'\\\n '\\n====================\\n')\n\n samples_summary = 'results/{}/samples_summary.yml'.format(experiment)\n with open(samples_summary, 'w') as fo:\n fo.write(yaml.dump(logdata, default_flow_style=False))\n print('Writing file with summary of results:\\n {}'.format(samples_summary))\n\ndef main():\n \"\"\"Start here.\"\"\"\n args = parseArgs(sys.argv[1:])\n global experiment\n experiment = convert_to_filename(args.experiment)\n gbkfile = args.genome\n reads = args.input\n samples = args.samples\n disruption = float(args.disruption) #set input disruption value as a float as input can be int\n if disruption < 0.0 or disruption > 1.0: #test whether disruption value is from 0.0 to 1.0\n disruption = 1.0 #if disruption value is not from 0.0 to 1.0, set disruption to default value of 1.0\n print('\\n*** WARNING ***'\\\n '\\nDisruption value: {}'\n '\\nDisruption value must be from 0.0 to 1.0'\\\n '\\nProceeding with default value of 1.0\\n'.format(float(args.disruption)))\n nobarcodes = args.nobarcodes\n global keepall\n keepall = args.keepall\n # Logging of sample info\n global logdata\n logdata = {}\n # Organism reference files called 'genome.fna' etc\n organism = 'genome'\n\n # --- ORGANIZE SAMPLE LIST AND FILE PATHS --- #\n pipeline_organize(samples)\n\n # --- DEMULTIPLEX OR MOVE FILES IF ALREADY DEMULTIPLEXED --- #\n if nobarcodes:\n pipeline_no_demultiplex(reads)\n else:\n pipeline_demultiplex(reads)\n\n # --- BOWTIE MAPPING --- #\n genomeDir = 'results/{experiment}/genome_lookup/'.format(experiment=experiment)\n pipeline_mapping(gbkfile, organism, genomeDir, disruption)\n\n # --- ANALYSIS OF RESULTS --- #\n pipeline_analysis()\n\n\n # --- CONFIRM COMPLETION --- #\n print('\\n===================='\\\n '\\n* Done *'\\\n '\\n====================\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/pyinseq.py","file_name":"pyinseq.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"159716256","text":"# Copyright (c) 2019 Patrick Levin\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n# ==============================================================================\r\n\r\n# Keys in the model.json file\r\nTFJS_NODE_KEY = 'node'\r\n\r\nTFJS_NODE_ATTR_KEY = 'attr'\r\nTFJS_NODE_CONST_KEY = 'Const'\r\nTFJS_NODE_PLACEHOLDER_KEY = 'Placeholder'\r\n\r\nTFJS_ATTR_DTYPE_KEY = 'dtype'\r\nTFJS_ATTR_SHAPE_KEY = 'shape'\r\nTFJS_ATTR_VALUE_KEY = 'value'\r\nTFJS_ATTR_STRING_VALUE_KEY = 's'\r\n\r\nTFJS_NAME_KEY = 'name'\r\nTFJS_DATA_KEY = 'data'\r\n\r\n# CLI arguments\r\nCLI_INPUT_PATH = 'input_path'\r\nCLI_OUTPUT_PATH = 'output_path'\r\nCLI_OUTPUT_FORMAT = 'output_format'\r\nCLI_SAVED_MODEL_TAGS = 'saved_model_tags'\r\nCLI_VERSION = 'version'\r\nCLI_SAVED_MODEL = 'tf_saved_model'\r\nCLI_FROZEN_MODEL = 'tf_frozen_model'\r\nCLI_SILENT_MODE = 'silent'","sub_path":"tfjs_graph_converter/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"45735600","text":"# Display the puppy image with a tight view.\n#\nimport pyvista as pv\nfrom pyvista import examples\npuppy = examples.download_puppy()\npl = pv.Plotter(border=True, border_width=5)\n_ = pl.add_mesh(puppy, rgb=True)\npl.camera.tight()\npl.show()\n#\n# Set the background to blue use a 5% padding around the image.\n#\npl = pv.Plotter()\n_ = pl.add_mesh(puppy, rgb=True)\npl.background_color = 'b'\npl.camera.tight(padding=0.05)\npl.show()\n","sub_path":"version/0.36/api/core/_autosummary/pyvista-Camera-tight-1.py","file_name":"pyvista-Camera-tight-1.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"294243107","text":"import keyring\nfrom selenium import webdriver\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom config import Config\nfrom supported_settings import valid_room_settings\n\n\ndef calc_date(days):\n \"\"\"return: date in 'days' number of days \"\"\"\n today = datetime.today()\n two_weeks = today + timedelta(days=days)\n book_date = two_weeks.strftime(\"%d.%m.%Y\")\n\n return book_date\n\n\nclass NTNU:\n\n def __init__(self):\n self.driver = None\n self.__username = Config.username\n self.__password = keyring.get_password('ntnu', self.__username)\n self.chromedriver_path = Config.chromedriver\n\n def change_login_info(self, username, passwd):\n self.__username = username\n self.__password = passwd\n\n def start_session(self, headless=True):\n \"\"\"this function starts selenium, you can toggle gui with headless\"\"\"\n\n\n if headless:\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--log-level=off')\n self.driver = webdriver.Chrome(self.chromedriver_path, options=options)\n return\n\n self.driver = webdriver.Chrome(self.chromedriver_path)\n\n def login(self, headless=True):\n \"\"\"this loges in to ntnu\"\"\"\n self.start_session(headless)\n\n # self.driver.minimize_window()\n\n self.driver.get(\"https://innsida.ntnu.no/c/portal/login\")\n\n # username\n self.driver.find_element_by_id('username').send_keys(self.__username)\n\n # password\n self.driver.find_element_by_id('password').send_keys(self.__password)\n\n # click login button\n # WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.xPATH, \"/html/body/div/article/section[2]/div[1]/form[1]/button\"))).click()\n self.driver.find_element_by_xpath('/html/body/div/article/section[2]/div[1]/form[1]/button').click()\n\n # self.driver.find_element_by_id('students-menu-button').click()\n\n def book_room(self, **kwargs):\n \"\"\"has to continue after login\"\"\"\n # pass in kwargs corresponding to parameters to change it\n parameters = {\n 'start_time': '08:00',\n 'duration': '04:00', # this is duration from booking in hours\n 'days': 14,\n 'area': 'Gløshaugen',\n 'building': \"Elektro E/F\",\n 'min_people': None,\n 'room_id': 'E204',\n 'description_text': \"Studering\"\n }\n\n for key, value in kwargs.items():\n parameters[key] = value\n\n\n self.driver.get(\"http://www.ntnu.no/romres\")\n\n # tries to press yes, comtinue button, if element not found we skip this part as we dont need it\n try:\n WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.NAME, \"Yes\"))).click()\n except Exception:\n pass\n\n # start time\n start_time = parameters['start_time']\n select_start = Select(self.driver.find_element_by_id(\"start\"))\n select_start.select_by_value(start_time)\n\n # end time\n duration = parameters['duration']\n select_end = Select(self.driver.find_element_by_id('duration'))\n select_end.select_by_value(duration)\n\n # date\n days = parameters['days']\n date = calc_date(days) # max\n select_date = self.driver.find_element_by_id('preset_date')\n select_date.clear()\n select_date.send_keys(date)\n select_date.send_keys(Keys.ENTER)\n\n # area\n area = parameters['area']\n select_area = Select(self.driver.find_element_by_id('area'))\n select_area.select_by_visible_text(area)\n\n # building\n building = parameters['building']\n select_building = Select(self.driver.find_element_by_id('building'))\n select_building.select_by_visible_text(building)\n\n # min people\n min_people = parameters['min_people']\n if min_people:\n people_input_box = self.driver.find_element_by_id('size')\n people_input_box.send_keys(min_people)\n people_input_box.send_keys(Keys.ENTER)\n\n # press \"vis ledige rom\" button\n self.driver.find_element_by_id('preformsubmit').click()\n\n # UNCOMMENT LINE UNDER TO GET TEXT ELEMENT OF ALL ROOMS THAT OCCURS\n # available_rooms_text = self.driver.find_element_by_id('room_table').text\n\n # this fetches the right input str for the chosen room (see supported_settings.py)\n room_id = valid_room_settings[0][parameters['area']][parameters['building']][parameters['room_id']]\n\n # choose the room\n\n try:\n WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.ID, room_id))).click()\n # self.driver.find_element_by_id(room_id).click()\n fail = False\n except:\n print(f\"room: {room_id} not found. \\n trying to book a random room\")\n fail = True\n\n # booking your desired room failed, will continue to try booking first element\n if fail:\n try:\n self.driver.find_elements_by_xpath(\n '/html/body/div[4]/div[2]/div[2]/section/form/div/section[1]/fieldset/ul/li[1]/div[1]/input').click()\n except:\n print(\"first try failed\")\n try:\n self.driver.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div[2]/section/form/div/section[1]/fieldset/ul/li/div[1]/input').click()\n except:\n print('failed on second try. \\nCanceling')\n self.driver.close()\n return\n\n # order button\n self.driver.find_element_by_id('rb-bestill').click()\n\n # description\n description_text = parameters['description_text']\n # description_box = self.driver.find_element_by_id('name')\n description_box = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.ID, 'name')))\n description_box.send_keys(description_text)\n\n # confirm buttton\n self.driver.find_element_by_name('confirm').click()\n\n # send comfirmation email\n self.driver.find_element_by_name('sendmail').click()\n\n print(\"\\nBooking complete! \\n-----------------------------------------------\")\n\n self.driver.quit()\n\n def tab(self, **action):\n \"\"\"currently supported kwargs: newtab, switch\"\"\"\n options = {\n 'newtab': False,\n 'switch': None\n }\n for key, value in options.items():\n if key in action.keys():\n options[key] = action[key]\n print(options)\n\n # open new tab and switch to it\n if options['newtab'] and options['switch'] is True:\n self.driver.execute_script(\"window.open('');\")\n self.driver.switch_to.window(self.driver.window_handles[-1])\n # if url is spesified go to that url\n if str(options['newtab'])[:3] == 'http':\n self.driver.get(options['new_tab'])\n\n if type(options['switch']) is int:\n tab = options['switch']\n self.driver.switch_to.window(self.driver.window_handles[tab])\n\n\nif __name__ == '__main__':\n book = NTNU()\n book.login(False)\n book.book_room()\n","sub_path":"ntnu.py","file_name":"ntnu.py","file_ext":"py","file_size_in_byte":7592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"276607114","text":"#source:\n# http://news.cnstock.com/news/sns_yw/index.html\n# view-source:http://news.cnstock.com/news/sns_yw/index.html (html文档)\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint #pprint库,中的pprint方法,将字典格式化输出)source:(http://www.broadview.com.cn/article/194)\nfrom time import sleep,ctime\nimport os #与操作系统相关的第三方库,比如os.path\nimport threading\nimport queue\n\nall_urls = []\nnews_data_title = []\n#for item in range(1,116):\n# pages_list = []\n# pages = 'http://news.cnstock.com/news/sns_yw/'+str(item)\n# for page in pages:\n# pages_list.append(page)\npage_url = ['{}{}'.format('http://news.cnstock.com/news/sns_yw/',page) for page in range(1,2)]#获得第1页所有的url,也可以改为(1,100)\n\n\ndef get_links_and_title(): #获得目录页面的新闻标题和新闻链接\n for one_page in page_url:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36'}\n html = requests.get(one_page, headers=headers)\n metadata = html.text\n soup = BeautifulSoup(metadata, 'html.parser')\n links_part = soup.find_all('ul',class_='new-list article-mini') #此处获得相关新闻模块的源码\n for links in links_part:\n a = links.find_all('a')\n for one in a:\n #print(one)\n href = one.attrs['href']\n all_urls.append(href)#将链接放到列表中\n news_title = one.get_text() #attrs['title'] #使用此方法期间遇到有的a标签没有title属性导致报错\n news_data_title.append(news_title) #将标题放到列表中\n pprint('新闻链接:{}'.format(href))\n pprint('新闻标题:{}'.format(news_title))\n #print(len(all_urls)) 此方法统计所有页面的新闻链接数量 \n\ndef get_url_text(): # 获取单个新闻页面的新闻内容\n file_directory = r'C:\\Users\\马海斌\\Desktop\\文件\\programing\\Python\\爬取文件' # 创建文件目录\n news_index = 1 # 申明文件初始的索引号\n num_news = 0 # 申明一个变量统计爬取文章的数量\n for url in all_urls:\n num_news += 1\n if num_news == 1000:\n sleep(10) #设置睡眠条件和时间\n num_news = 0\n else:\n html = requests.get(url)\n metadata = html.text\n soup = BeautifulSoup(metadata,'html.parser')\n try:\n file_title = soup.find('h1').get_text()\n except AttributeError:\n pass\n\n file_title = file_title.replace('*','')\n file_title = file_title.replace('|','')\n file_title = file_title.replace('?', '')\n file_title = file_title.replace('\"', '')\n file_title = file_title.replace('\"', '')\n file_title = file_title.replace('>', '')\n file_title = file_title.replace('<', '')\n file_title = file_title.replace(':', '')\n file_title = file_title.replace('\\\\', '')\n file_title = file_title.replace('/', '')\n file_title = file_title.replace('\\r\\n', '')\n\n t = '' # 申明写入的文件的初始局部变量\n try:\n news_time = soup.find('span', class_='timer').get_text() # 获取新闻页面的发布时间\n t += '<新闻发布时间:{}>'.format(news_time)\n except AttributeError:\n pass\n\n for text in soup.select('div.content > p'):\n t += text.get_text()\n t += '<新闻链接:{}>'.format(url) #文件中加上url\n\n #print(t)\n with open(os.path.join(file_directory, str(file_title)) + '.txt', 'a',encoding='utf8') as file:\n file.write(t)\n news_index += 1\n #text = soup.find_all('p') 此方法不够精确\n #t = ''\n #for w in text:\n # t += w.get_text()\n #pprint(t)\n\n\nif __name__ == '__main__':\n\n get_links_and_title() # 得到所有的标题链接\n\n get_url_text() # 得到单链接的新闻文本","sub_path":"spider of Python/stock_news.py","file_name":"stock_news.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"52436563","text":"from Kisok.backends import Backend\nimport sys, gtk, gobject\nimport braseroburn\n\nclass BurnBackend(Backend):\n\n def __init__(self, *args, **kwargs):\n self.iso = None\n\n def write(self, isoname):\n braseroburn.start()\n track = braseroburn.TrackImageCfg()\n track.set_source(isoname)\n session = braseroburn.SessionCfg()\n session.add_track(track, None)\n\n option_dialog = braseroburn.BurnOptions(session)\n response = option_dialog.run()\n option_dialog.destroy()\n if response != gtk.RESPONSE_OK:\n sys.exit(1)\n\n burn_dialog = braseroburn.BurnDialog()\n burn_dialog.show()\n burn_dialog.run(session)\n burn_dialog.destroy()\n braseroburn.stop()\n","sub_path":"Kisok/backends/burn.py","file_name":"burn.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"298376439","text":"class Iadc(object):\n def __init__(self, fpga, zdok=0):\n self.fpga = fpga\n self.zdok = zdok\n self.reg = 'iadc%d_controller' % self.zdok\n self._set_3wire(0, 0, 1, 0) # initial state\n\n def reg_reset(self):\n self._set_3wire(0, 0, 1, 0)\n self._set_3wire(1, 0, 1, 0)\n self._set_3wire(0, 0, 1, 0)\n \n\n def ddrb_reset(self):\n self.fpga.write_int(self.reg, 0, offset=1, blindwrite=True)\n self.fpga.write_int(self.reg, 1, offset=1, blindwrite=True)\n self.fpga.write_int(self.reg, 0, offset=1, blindwrite=True)\n\n def mmcm_reset(self):\n self.fpga.write_int(self.reg, 0, offset=2, blindwrite=True)\n self.fpga.write_int(self.reg, 1, offset=2, blindwrite=True)\n self.fpga.write_int(self.reg, 0, offset=2, blindwrite=True)\n\n def _set_3wire(self, mode, clk, ldn, data):\n # bit mappings\n CLK = 0\n DATA = 1\n STROBE = 2\n MODE = 3\n v = (mode << MODE) + (ldn << STROBE) + (data << DATA) + (clk << CLK)\n #print mode, clk, ldn, data,\n #if clk:\n # print 'Clocked data', data\n #else:\n # print ''\n self.fpga.write_int(self.reg, v, blindwrite=True)\n \n def write_reg(self, addr, val):\n self._set_3wire(1, 0, 1, 0) # mode high\n self._set_3wire(1, 0, 1, 0) # strobe high\n self._set_3wire(1, 1, 1, 0) # clock tick\n self._set_3wire(1, 0, 1, 0) # \n self._set_3wire(1, 0, 0, 0) # strobe down\n for i in range(3)[::-1]:\n d = (addr >> i) & 0x1\n self._set_3wire(1, 0, 0, d) # set data bit\n self._set_3wire(1, 1, 0, d) # tick clock\n self._set_3wire(1, 0, 0, d) # \n for i in range(16)[::-1]:\n d = (val >> i) & 0x1\n self._set_3wire(1, 0, 0, d) # set data bit\n self._set_3wire(1, 1, 0, d) # tick clock\n self._set_3wire(1, 0, 0, d) # \n # tick clock once more\n self._set_3wire(1, 1, 0, 0) # tick clock\n self._set_3wire(1, 0, 0, 0) # \n # strobe\n self._set_3wire(1, 0, 1, 0) # tick clock\n self._set_3wire(1, 1, 1, 0) # tick clock\n self._set_3wire(1, 0, 1, 0) # \n\n def set_dual_input(self):\n #self.write_reg(0, 0b0111000010111100)\n self.write_reg(0, 0x7cbc)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_single_input(self):\n self.write_reg(0, 0x7cac)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_ramp_mode(self):\n self.write_reg(0b110, 0b11)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_const_mode(self, const=0xaa):\n self.write_reg(0b110, (const<<2) + 0b01)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_data_mode(self):\n self.write_reg(0b110, 0b00)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n\n","sub_path":"attic/quicklook/iadc.py","file_name":"iadc.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"399375539","text":"from random import randint\nfrom math import gcd\nimport csv\n\n\n# instead of quantum order finding\ndef order(a, N):\n for i in range(1, N):\n if pow(a, i, N) == 1:\n return i\n\n\ndata = []\n\nfor N in range(3, 100000):\n order_counts = 0\n while True:\n a = randint(2, N-1)\n if gcd(a, N) != 1:\n data.append([N, False, order_counts])\n break\n t = pow(a, (N-1)//2, N)\n if t != 1 and t != N - 1:\n data.append([N, False, order_counts])\n break\n elif t == 1:\n continue\n else:\n ord_a = order(a, N)\n order_counts += 1\n if ord_a == N - 1:\n data.append([N, True, order_counts])\n break\n\n\nwith open(\"output.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(data)\n","sub_path":"primary_test.py","file_name":"primary_test.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"443219517","text":"def fonk(a): #fonksiyon belirtilen sayıya kadar o sayının bölenlerini hesaplar ardından bölenler toplamının sayıya eşitliğini ineler\n bölenler=[]\n for i in range(1,a):\n kalan=a%i\n if kalan==0:\n bölenler=bölenler+[i]\n topla=0\n for i in bölenler:\n topla+=i\n if topla==a:\n return print(a)\nwhile True:\n try:\n for i in range(1000):\n fonk(i)\n seçim=input(\"\\n*****Devam etmek için her hangi bir tuşa /// çıkmak için (q) tuşuna basınız..:\")\n if seçim==\"q\" or seçim ==\"Q\":\n break\n except ValueError:\n print(\"Lütfen bir tam sayı giriniz..:\")\n continue\n\n\n\n","sub_path":"8.hafta 3. ödev (mükemmel sayı tesbiti).py","file_name":"8.hafta 3. ödev (mükemmel sayı tesbiti).py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"76325987","text":"# Embedded file name: /usr/lib/python2.6/site-packages/awx/main/models/ad_hoc_commands.py\nimport hmac\nimport json\nimport logging\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.text import Truncator\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.exceptions import ValidationError\nfrom django.core.urlresolvers import reverse\nfrom jsonfield import JSONField\nfrom awx.main.models.base import *\nfrom awx.main.models.unified_jobs import *\nfrom awx.main.utils import decrypt_field\nlogger = logging.getLogger('awx.main.models.ad_hoc_commands')\n__all__ = ['AdHocCommand', 'AdHocCommandEvent']\n\nclass AdHocCommand(UnifiedJob):\n MODULE_NAME_CHOICES = [ (x, x) for x in settings.AD_HOC_COMMANDS ]\n MODULE_NAME_DEFAULT = 'command' if 'command' in settings.AD_HOC_COMMANDS else None\n\n class Meta(object):\n app_label = 'main'\n\n job_type = models.CharField(max_length=64, choices=JOB_TYPE_CHOICES, default='run')\n inventory = models.ForeignKey('Inventory', related_name='ad_hoc_commands', null=True, on_delete=models.SET_NULL)\n limit = models.CharField(max_length=1024, blank=True, default='')\n credential = models.ForeignKey('Credential', related_name='ad_hoc_commands', null=True, default=None, on_delete=models.SET_NULL)\n module_name = models.CharField(max_length=1024, default=MODULE_NAME_DEFAULT, choices=MODULE_NAME_CHOICES, blank=bool(MODULE_NAME_DEFAULT))\n module_args = models.TextField(blank=True, default='')\n forks = models.PositiveIntegerField(blank=True, default=0)\n verbosity = models.PositiveIntegerField(choices=VERBOSITY_CHOICES, blank=True, default=0)\n become_enabled = models.BooleanField(default=False)\n hosts = models.ManyToManyField('Host', related_name='ad_hoc_commands', editable=False, through='AdHocCommandEvent')\n\n def clean_credential(self):\n cred = self.credential\n if cred and cred.kind != 'ssh':\n raise ValidationError('You must provide a machine / SSH credential.')\n return cred\n\n def clean_limit(self):\n return self.limit\n\n def clean_module_name(self):\n if type(self.module_name) not in (str, unicode):\n raise ValidationError('Invalid type for ad hoc command')\n if not self.module_name.strip():\n module_name = 'command'\n raise module_name not in settings.AD_HOC_COMMANDS and ValidationError('Unsupported module for ad hoc commands.')\n return module_name\n\n def clean_module_args(self):\n if type(self.module_args) not in (str, unicode):\n raise ValidationError('Invalid type for ad hoc command')\n module_args = self.module_args\n if self.module_name in ('command', 'shell') and not module_args:\n raise ValidationError('No argument passed to %s module.' % self.module_name)\n return module_args\n\n @property\n def passwords_needed_to_start(self):\n \"\"\"Return list of password field names needed to start the job.\"\"\"\n if self.credential and self.credential.active:\n return self.credential.passwords_needed\n else:\n return []\n\n @classmethod\n def _get_parent_field_name(cls):\n return ''\n\n @classmethod\n def _get_task_class(cls):\n from awx.main.tasks import RunAdHocCommand\n return RunAdHocCommand\n\n def get_absolute_url(self):\n return reverse('api:ad_hoc_command_detail', args=(self.pk,))\n\n @property\n def task_auth_token(self):\n \"\"\"Return temporary auth token used for task requests via API.\"\"\"\n if self.status == 'running':\n h = hmac.new(settings.SECRET_KEY, self.created.isoformat())\n return '%d-%s' % (self.pk, h.hexdigest())\n\n def get_passwords_needed_to_start(self):\n return self.passwords_needed_to_start\n\n def is_blocked_by(self, obj):\n from awx.main.models import InventoryUpdate\n if type(obj) == InventoryUpdate:\n if self.inventory == obj.inventory_source.inventory:\n return True\n return False\n\n @property\n def task_impact(self):\n from awx.main.models.inventory import Host\n count_hosts = Host.objects.filter(active=True, enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()\n return min(count_hosts, 5 if self.forks == 0 else self.forks) * 10\n\n def generate_dependencies(self, active_tasks):\n from awx.main.models import InventoryUpdate\n if not self.inventory:\n return []\n else:\n inventory_sources = self.inventory.inventory_sources.filter(active=True, update_on_launch=True)\n inventory_sources_found = []\n dependencies = []\n for obj in active_tasks:\n if type(obj) == InventoryUpdate:\n if obj.inventory_source in inventory_sources:\n inventory_sources_found.append(obj.inventory_source)\n\n try:\n start_args = json.loads(decrypt_field(self, 'start_args'))\n except Exception:\n start_args = None\n\n start_args = start_args or {}\n inventory_sources_already_updated = start_args.get('inventory_sources_already_updated', [])\n if inventory_sources_already_updated:\n for source in inventory_sources.filter(pk__in=inventory_sources_already_updated):\n if source not in inventory_sources_found:\n inventory_sources_found.append(source)\n\n if inventory_sources.count():\n for source in inventory_sources:\n if source not in inventory_sources_found and source.needs_update_on_launch:\n dependencies.append(source.create_inventory_update(launch_type='dependency'))\n\n return dependencies\n\n def copy(self):\n data = {}\n for field in ('job_type', 'inventory_id', 'limit', 'credential_id', 'module_name', 'module_args', 'forks', 'verbosity', 'become_enabled'):\n data[field] = getattr(self, field)\n\n return AdHocCommand.objects.create(**data)\n\n def save(self, *args, **kwargs):\n update_fields = kwargs.get('update_fields', [])\n if not self.name:\n self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)\n if 'name' not in update_fields:\n update_fields.append('name')\n super(AdHocCommand, self).save(*args, **kwargs)\n return\n\n\nclass AdHocCommandEvent(CreatedModifiedModel):\n \"\"\"\n An event/message logged from the ad hoc event callback for each host.\n \"\"\"\n EVENT_TYPES = [('runner_on_failed', _('Host Failed'), True), ('runner_on_ok', _('Host OK'), False), ('runner_on_unreachable', _('Host Unreachable'), True)]\n FAILED_EVENTS = [ x[0] for x in EVENT_TYPES if x[2] ]\n EVENT_CHOICES = [ (x[0], x[1]) for x in EVENT_TYPES ]\n\n class Meta:\n app_label = 'main'\n unique_together = [('ad_hoc_command', 'host_name')]\n ordering = ('-pk',)\n\n ad_hoc_command = models.ForeignKey('AdHocCommand', related_name='ad_hoc_command_events', on_delete=models.CASCADE, editable=False)\n host = models.ForeignKey('Host', related_name='ad_hoc_command_events', null=True, default=None, on_delete=models.SET_NULL, editable=False)\n host_name = models.CharField(max_length=1024, default='', editable=False)\n event = models.CharField(max_length=100, choices=EVENT_CHOICES)\n event_data = JSONField(blank=True, default={})\n failed = models.BooleanField(default=False, editable=False)\n changed = models.BooleanField(default=False, editable=False)\n counter = models.PositiveIntegerField(default=0)\n\n def get_absolute_url(self):\n return reverse('api:ad_hoc_command_event_detail', args=(self.pk,))\n\n def __unicode__(self):\n return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())\n\n def save(self, *args, **kwargs):\n from awx.main.models.inventory import Host\n update_fields = kwargs.get('update_fields', [])\n res = self.event_data.get('res', None)\n if self.event in self.FAILED_EVENTS:\n if not self.event_data.get('ignore_errors', False):\n self.failed = True\n if 'failed' not in update_fields:\n update_fields.append('failed')\n if isinstance(res, dict) and res.get('changed', False):\n self.changed = True\n if 'changed' not in update_fields:\n update_fields.append('changed')\n self.host_name = self.event_data.get('host', '').strip()\n if 'host_name' not in update_fields:\n update_fields.append('host_name')\n try:\n if not self.host_id and self.host_name:\n host_qs = Host.objects.filter(inventory__ad_hoc_commands__id=self.ad_hoc_command_id, name=self.host_name)\n host_id = host_qs.only('id').values_list('id', flat=True)\n if host_id.exists():\n self.host_id = host_id[0]\n if 'host_id' not in update_fields:\n update_fields.append('host_id')\n except (IndexError, AttributeError):\n pass\n\n super(AdHocCommandEvent, self).save(*args, **kwargs)\n return","sub_path":"usr/lib/python2.6/site-packages/awx/main/models/ad_hoc_commands.py","file_name":"ad_hoc_commands.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"16898486","text":"from slackclient import SlackClient\nfrom bot.users import Users\nfrom bot.channels import Channels\nfrom bot.commands import CommandLoader\nimport time\n\nclass SlackBot:\n\n commands = None\n slack = None\n users = None\n channels = None\n\n def __init__(self):\n self.slack = SlackClient('xoxb-22358844560-K8mlKRpzJwzYqTCSpX1DWla6')\n self.users = Users(self.slack)\n self.channels = Channels(self.slack)\n self.commands = CommandLoader().load(self.slack, self.users, self.channels)\n\n def start(self):\n if self.slack.rtm_connect():\n while True:\n for message in self.slack.rtm_read():\n if message.get('type', None) == 'message':\n\n # Middleware\n \n\n # Command Triggers\n for command in self.commands:\n for trigger in command['triggers']:\n if message.get('text', None) != None and '+' + trigger == message.get('text', None).partition(\" \")[0]:\n command['instance'].execute(message)\n else:\n print('Failed To Connect')\n","sub_path":"le-bot/bot/slackbot.py","file_name":"slackbot.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"476509937","text":"'''\nCreated on 06.03.2020\n\n@author: JanB-4096\n'''\nfrom src import GameConfig\nimport pygame\nimport numpy as np\n\n\nclass NPCControl():\n \n def __init__(self, p1, p2, p1_difficulty, p2_difficulty):\n self.settings = {'p1': {'mode': p1, 'difficulty': p1_difficulty}, \\\n 'p2': {'mode': p2, 'difficulty': p2_difficulty}}\n\n def translate_keyboard(self, events, change_position_p1, change_position_p2):\n \n for event in events:\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n change_position_p1 = -1*GameConfig.change_bar_vertical\n elif event.key == pygame.K_s:\n change_position_p1 = 1*GameConfig.change_bar_vertical\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_w or event.key == pygame.K_s:\n change_position_p1 = 0 \n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n change_position_p2 = -1*GameConfig.change_bar_vertical\n elif event.key == pygame.K_DOWN:\n change_position_p2 = 1*GameConfig.change_bar_vertical\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n change_position_p2 = 0\n \n return change_position_p1, change_position_p2\n \n def calc_linear_npc(self, position_p, position_ball, change_ball, player):\n change_position_p = 0\n \n if self.settings[player]['difficulty'] == 'middle':\n if position_p + 2*GameConfig.bar_hight/5 >= position_ball[1]:\n change_position_p = -1*GameConfig.change_bar_vertical\n elif position_p + 3*GameConfig.bar_hight/5 <= position_ball[1]:\n change_position_p = GameConfig.change_bar_vertical\n elif self.settings[player]['difficulty'] == 'easy':\n if position_p >= position_ball[1]:\n change_position_p = -1*GameConfig.change_bar_vertical\n elif position_p + GameConfig.bar_hight <= position_ball[1]:\n change_position_p = GameConfig.change_bar_vertical\n elif self.settings[player]['difficulty'] == 'very_hard':\n timesteps_until_hit = 0\n \n if change_ball[0] > 0 and player == 'p2': #going right\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p2[0]) / (change_ball[0]))\n elif change_ball[0] < 0 and player == 'p1':\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p1[0] + GameConfig.bar_width) / (change_ball[0]))\n \n if timesteps_until_hit != 0: \n predicted_y_ball = position_ball[1] + change_ball[1]*timesteps_until_hit\n if predicted_y_ball > GameConfig.display_hight:\n predicted_y_ball = 2*GameConfig.display_hight - predicted_y_ball\n elif predicted_y_ball < 0:\n predicted_y_ball *= -1\n \n # TODO: remove factor 2 ... just implemented for improving enemy for neural net\n if position_p + GameConfig.bar_hight/5 >= predicted_y_ball:\n change_position_p = -2*GameConfig.change_bar_vertical\n elif position_p + 4*GameConfig.bar_hight/5 <= predicted_y_ball:\n change_position_p = 2*GameConfig.change_bar_vertical\n if change_ball[0] > 0 and player == 'p1' or change_ball[0] < 0 and player == 'p2': #go back to middle position if ball was hit\n distance_to_center = GameConfig.display_hight/2 - (position_p + GameConfig.bar_hight/2)\n change_position_p = np.sign(distance_to_center) * 2*GameConfig.change_bar_vertical * int((np.abs(distance_to_center) > GameConfig.change_bar_vertical))\n \n else: \n timesteps_until_hit = 0\n if change_ball[0] > 0 and player == 'p2': #going right\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p2[0]) / (change_ball[0]))\n elif change_ball[0] < 0 and player == 'p1':\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p1[0] + GameConfig.bar_width) / (change_ball[0]))\n if timesteps_until_hit != 0: \n predicted_y_ball = position_ball[1] + change_ball[1]*timesteps_until_hit\n if predicted_y_ball > GameConfig.display_hight:\n predicted_y_ball = 2*GameConfig.display_hight - predicted_y_ball\n elif predicted_y_ball < 0:\n predicted_y_ball *= -1\n if position_p + GameConfig.bar_hight/5 >= predicted_y_ball:\n change_position_p = -1*GameConfig.change_bar_vertical\n elif position_p + 4*GameConfig.bar_hight/5 <= predicted_y_ball:\n change_position_p = GameConfig.change_bar_vertical\n \n return change_position_p\n \n def calc_ai_p1(self, position_p, position_ball, change_ball, species):\n \n output_nn = species.calculate_output_to_input([position_p, position_ball[0], position_ball[1], change_ball[0], change_ball[1]])\n if output_nn[0] > output_nn[1]: #up\n return -1*GameConfig.change_bar_vertical\n else: #down\n return GameConfig.change_bar_vertical\n \n def calc_swarm_p1(self, position_p, position_ball, change_ball, speciesList):\n output_nn = []\n for species in speciesList:\n output_nn.append(self.calc_ai_p1(position_p, position_ball, change_ball, species))\n return GameConfig.change_bar_vertical * np.sign(np.average(output_nn))\n","sub_path":"src/NPCControl.py","file_name":"NPCControl.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"607131800","text":"from dataclasses import dataclass\nfrom typing import Sequence, Tuple\n\nfrom django.contrib.admin.options import BaseModelAdmin\nfrom django.db import models\nfrom django.db.models import OuterRef, Subquery, functions\nfrom django.db.models.functions import Cast\nfrom django.urls import reverse\nfrom django.utils.html import format_html\n\nfrom .query import (\n BaseType,\n BooleanType,\n DateTimeType,\n DateType,\n HTMLType,\n MonthType,\n NumberType,\n StringType,\n WeekDayType,\n YearType,\n)\n\n_OPEN_IN_ADMIN = \"admin\"\n\n\n_AGG_MAP = {\n \"average\": lambda x: models.Avg(Cast(x, output_field=models.IntegerField())),\n \"count\": lambda x: models.Count(x, distinct=True),\n \"max\": models.Max,\n \"min\": models.Min,\n \"std_dev\": models.StdDev,\n \"sum\": lambda x: models.Sum(Cast(x, output_field=models.IntegerField())),\n \"variance\": models.Variance,\n}\n\n\n_AGGREGATES = {\n # NTS beware that Sum(type) -> type\n StringType: [\"count\"],\n NumberType: [\"average\", \"count\", \"max\", \"min\", \"std_dev\", \"sum\", \"variance\"],\n DateTimeType: [\"count\"], # average, min and max might be nice here but sqlite\n DateType: [\"count\"], # average, min and max might be nice here but sqlite\n BooleanType: [\"average\", \"sum\"],\n}\n\n\n_FUNC_MAP = {\n \"year\": (functions.ExtractYear, YearType),\n \"quarter\": (functions.ExtractQuarter, NumberType),\n \"month\": (functions.ExtractMonth, MonthType),\n \"day\": (functions.ExtractDay, NumberType),\n \"week_day\": (functions.ExtractWeekDay, WeekDayType),\n \"hour\": (functions.ExtractHour, NumberType),\n \"minute\": (functions.ExtractMinute, NumberType),\n \"second\": (functions.ExtractSecond, NumberType),\n \"date\": (functions.TruncDate, DateType),\n}\n\n_FUNCTIONS = {\n DateTimeType: [\n \"year\",\n \"quarter\",\n \"month\",\n \"day\",\n \"week_day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"date\",\n ],\n DateType: [\"year\", \"quarter\", \"month\", \"day\", \"week_day\"],\n}\n\n\ndef s(path):\n return \"__\".join(path)\n\n\ndef get_model_name(model, sep=\".\"):\n return f\"{model._meta.app_label}{sep}{model.__name__}\"\n\n\n@dataclass\nclass OrmBoundField:\n field: \"OrmBaseField\"\n previous: \"OrmBoundField\"\n full_path: Sequence[str]\n pretty_path: Sequence[str]\n queryset_path: str = None\n aggregate_clause: Tuple[str, models.Func] = None\n filter_: bool = False\n having: bool = False\n model_name: str = None\n\n @property\n def path_str(self):\n return s(self.full_path)\n\n @property\n def group_by(self):\n return self.field.can_pivot\n\n def annotate(self, request, qs):\n return qs\n\n def __getattr__(self, name):\n return getattr(self.field, name)\n\n @classmethod\n def blank(cls):\n return cls(field=None, previous=None, full_path=[], pretty_path=[])\n\n\n@dataclass\nclass OrmModel:\n fields: dict\n admin: BaseModelAdmin = None\n\n @property\n def root(self):\n return bool(self.admin)\n\n\n@dataclass\nclass OrmBaseField:\n model_name: str\n name: str\n pretty_name: str\n type_: BaseType = None\n concrete: bool = False\n rel_name: str = None\n can_pivot: bool = False\n admin: object = None\n choices: Sequence[Tuple[str, str]] = ()\n\n def __post_init__(self):\n if not self.type_:\n assert self.rel_name\n if self.concrete or self.can_pivot:\n assert self.type_\n\n def format(self, value):\n return self.type_.format(value, self.choices)\n\n\nclass OrmFkField(OrmBaseField):\n def __init__(self, model_name, name, pretty_name, rel_name):\n super().__init__(model_name, name, pretty_name, rel_name=rel_name)\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n )\n\n\nclass OrmConcreteField(OrmBaseField):\n def __init__(self, model_name, name, pretty_name, type_, choices=None):\n super().__init__(\n model_name,\n name,\n pretty_name,\n concrete=True,\n type_=type_,\n rel_name=(\n type_.name if type_ in _AGGREGATES or type_ in _FUNCTIONS else None\n ),\n can_pivot=True,\n choices=choices or (),\n )\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(full_path),\n filter_=True,\n )\n\n\nclass OrmCalculatedField(OrmBaseField):\n def __init__(self, model_name, name, pretty_name, admin):\n super().__init__(\n model_name, name, pretty_name, type_=StringType, can_pivot=True, admin=admin\n )\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(previous.full_path + [\"id\"]),\n model_name=self.model_name,\n )\n\n def format(self, obj):\n if obj is None:\n return None\n\n if hasattr(self.admin, self.name):\n # admin callable\n func = getattr(self.admin, self.name)\n try:\n return func(obj)\n except Exception as e:\n return str(e)\n else:\n # model property or callable\n try:\n value = getattr(obj, self.name)\n return value() if callable(value) else value\n except Exception as e:\n return str(e)\n\n\nclass OrmBoundAnnotatedField(OrmBoundField):\n def annotate(self, request, qs):\n from .orm import admin_get_queryset\n\n return qs.annotate(\n **{\n self.queryset_path: Subquery(\n admin_get_queryset(self.admin, request, [self.name])\n .filter(pk=OuterRef(s(self.previous.full_path + [\"id\"])))\n .values(self.admin_order_field)[:1],\n output_field=self.field_type,\n )\n }\n )\n\n\nclass OrmAnnotatedField(OrmBaseField):\n def __init__(\n self,\n model_name,\n name,\n pretty_name,\n type_,\n field_type,\n admin,\n admin_order_field,\n choices=None,\n ):\n super().__init__(\n model_name,\n name,\n pretty_name,\n type_=type_,\n can_pivot=True,\n admin=admin,\n concrete=True,\n choices=choices or (),\n )\n self.field_type = field_type\n self.admin_order_field = admin_order_field\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n\n full_path = previous.full_path + [self.name]\n return OrmBoundAnnotatedField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=f\"ddb_{s(full_path)}\",\n filter_=True,\n )\n\n\nclass OrmAdminField(OrmBaseField):\n def __init__(self, model_name):\n super().__init__(\n model_name, _OPEN_IN_ADMIN, _OPEN_IN_ADMIN, type_=HTMLType, can_pivot=True\n )\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(previous.full_path + [\"id\"]),\n model_name=self.model_name,\n )\n\n def format(self, obj):\n if obj is None:\n return None\n\n model_name = get_model_name(obj.__class__, \"_\")\n url_name = f\"admin:{model_name}_change\".lower()\n url = reverse(url_name, args=[obj.pk])\n return f'{obj}'\n\n\nclass OrmFileField(OrmConcreteField):\n def __init__(self, model_name, name, pretty_name, url_func):\n super().__init__(model_name, name, pretty_name, type_=HTMLType)\n self.url_func = url_func\n\n def format(self, value):\n if not value:\n return None\n\n return format_html('{}', self.url_func(value), value)\n\n\nclass OrmAggregateField(OrmBaseField):\n def __init__(self, model_name, name):\n super().__init__(model_name, name, name, type_=NumberType, concrete=True)\n self.aggregate = name\n\n def bind(self, previous):\n assert previous\n full_path = previous.full_path + [self.name]\n agg = _AGG_MAP[self.aggregate](s(previous.full_path))\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(full_path),\n aggregate_clause=(s(full_path), agg),\n having=True,\n )\n\n\nclass OrmBoundFunctionField(OrmBoundField):\n def annotate(self, request, qs):\n return qs.annotate(\n **{\n self.queryset_path: _FUNC_MAP[self.function][0](\n s(self.previous.full_path)\n )\n }\n )\n\n\nclass OrmFunctionField(OrmBaseField):\n def __init__(self, model_name, name, type_):\n super().__init__(\n model_name, name, name, type_=type_, concrete=True, can_pivot=True\n )\n self.function = name\n\n def bind(self, previous):\n assert previous\n full_path = previous.full_path + [self.name]\n return OrmBoundFunctionField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(full_path),\n filter_=True,\n )\n","sub_path":"data_browser/orm_fields.py","file_name":"orm_fields.py","file_ext":"py","file_size_in_byte":10401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"131839778","text":"from django.core.cache import cache\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom django.views.decorators.cache import cache_page\n\n# @cache_page(100,key_prefix='py2002')\nfrom userapp.models import User\n\n\ndef query(request):\n # 查询 model数据 回传到html文件\n users = User.objects.all()\n return render(request, 'redisapp/query.html', {'users': users})\n\n\ndef change_age(request):\n rst = request.GET.get('age')\n user = User.objects.get(pk=1)\n user.age = rst\n user.save()\n # 手动清除缓存\n # 方式一:\n # caches = cache.keys('*py2002*')\n # for c in caches:\n # cache.delete(c)\n # 方式二\n # caches = cache.keys('*py2002*')\n # cache.delete_many(caches)\n # 方式三:\n # cache.delete_pattern('*py2002*')\n # 方式四:\n # cache.clear()\n return HttpResponse('修改成功')\n\n\ndef set_session(request):\n request.session['python2002'] = True\n return HttpResponse('设置session')\n","sub_path":"redisapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"510050052","text":"URL_GOOGLE = 'https://www.google.com/search?q=погода+{}+{}.{}'\r\nURL_SINOPTIK = 'https://ua.sinoptik.ua/погода-{}/{}'\r\nURL_POGODA33 = 'https://pogoda33.ua/погода-{}/тиждень'\r\nURL_METEOTREND = 'https://ua.meteotrend.com/forecast/ua/{}/'\r\n\r\nDAYS = ('понеділок','вівторок','середа','четвер','пʼятниця','субота','неділя')\r\nMONTHS = ('січня','лютого','березня','квітня','травня','червня','липня','серпня','вересня','жовтня','листопада','грудня')\r\nHEADERS = {\r\n\t'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',\r\n\t'accept':'*/*'\r\n}\r\n\r\nlocalization = {'й':'i','ц':'ts','у':'u','к':'k','е':'e','н':'n','г':'h',\r\n'ш':'sh','щ':'sch','з':'z','х':'kh','ї':'i','ф':'f',\r\n'і':'i','в':'v','а':'a','п':'p','р':'r','о':'o','л':'l','д':'d','ж':'zh','є':'ie',\r\n'ґ':'g','я':'ia','ч':'ch','с':'s','м':'m','и':'y','т':'t','ь':'','б':'b','ю':'iu'\r\n}\r\n","sub_path":"configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"156108818","text":"'''\r\nImport the pandas package with the alias pd.\r\nImport the file 'tweets.csv' using the pandas function read_csv().\r\nAssign the resulting DataFrame to df.\r\nComplete the for loop by iterating over col, the 'lang' column in the DataFrame df.\r\nComplete the bodies of the if-else statements in the for loop:\r\nif the key is in the dictionary langs_count, add 1 to its current value,\r\nelse add the key to langs_count and set its value to 1.\r\nUse the loop variable entry in your code.\r\n'''\r\n\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Import Twitter data as DataFrame: df\r\ndf = pd.read_csv('tweets.csv')\r\n\r\n# Initialize an empty dictionary: langs_count\r\nlangs_count = {}\r\n\r\n# Extract column from DataFrame: col\r\ncol = df['lang']\r\n\r\n# Iterate over lang column in DataFrame\r\nfor entry in col:\r\n\r\n # If the language is in langs_count, add 1\r\n if entry in langs_count.keys():\r\n langs_count[entry] += 1\r\n # Else add the language to langs_count, set the value to 1\r\n else:\r\n langs_count[entry] = 1\r\n \r\n# Print the populated dictionary\r\nprint(langs_count)\r\n","sub_path":"03-python-data-science-toolbox-1/01-writing-your-own-functions/bringing-it-all-together.py","file_name":"bringing-it-all-together.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"649345009","text":"from ComplexLib import *\r\nimport sympy\r\nimport numpy\r\nimport math\r\nimport cmath\r\ndef act(matriz, vector):\r\n '''Función que calcula la acción de una matriz sobre un vector'''\r\n v = [(0, 0) for i in range(len(matriz))]\r\n for i in range(len(matriz)):\r\n for j in range(len(vector)):\r\n v[i] = sumaComplejos(v[i], productoComplejos(matriz[i][j], vector[j]))\r\n return v\r\n\r\ndef modsquare(a):\r\n '''Función que eleva al cuadrado las componentes de el número complejo'''\r\n return a[0] ** 2 + a[1] ** 2\r\n\r\n\r\ndef magVector(v1):\r\n '''Función que encuentra el la magnitud del vector'''\r\n suma = 0\r\n for i in range(len(v1)):\r\n suma += modsquare(v1[i])\r\n return math.sqrt(suma)\r\n\r\n\r\ndef innerP(v1, v2):\r\n '''Función que realiza el producto interno entre dos vectores'''\r\n suma = (0, 0)\r\n for i in range(len(v2)):\r\n v2[i] = conjugado(v2[i])\r\n for i in range(len(v1)):\r\n suma = sumaComplejos(suma, productoComplejos(v2[i], v1[i]))\r\n\r\n return suma\r\n\r\n\r\ndef probability(vector, position):\r\n '''Función que encuentra la probabilidad de observar una particula en una posicion luego de observarlo'''\r\n suma = 0\r\n for i in range(len(vector)):\r\n suma += modsquare(vector[i])\r\n a = modsquare(vector[position])\r\n return round((a / suma) * 100, 2)\r\n\r\n\r\ndef normalized(v1):\r\n '''Función que convierte los vectores en vectores unitarios'''\r\n a = magVector(v1)\r\n for i in range(len(v1)):\r\n v1[i] = divisionComplejos(v1[i], (a, 0))\r\n return v1\r\n\r\n\r\ndef amplitudTransicion(v1, v2):\r\n '''Encuentra la probabilidad de pasar de un estado a otro luego de ser observado'''\r\n return innerP(normalized(v1), normalized(v2))\r\n\r\n\r\ndef expectedValue(obs, state):\r\n '''Función que encuentra el valor esperado entre un observador y un estado inicial'''\r\n m1 = action2(obs, state)\r\n return (innerP(m1, state))[0]\r\n\r\n\r\ndef identity(n, val):\r\n '''Función que crea la matriz identidad y tiene como parametro el valor esperado'''\r\n matriz = [[(0,0) for j in range(n)] for i in range(n)]\r\n for i in range(n):\r\n matriz[i][i] = (val,0)\r\n return matriz\r\n\r\ndef varianza(obs, state):\r\n '''Función que encuentra la varianza entre un observador y un estado inicial'''\r\n s1 = list(state)\r\n m1 = restaMatrices(obs, identity(len(state), expectedValue(obs, state)))\r\n m2 = productoMatrices(m1,m1)\r\n m3 = innerP(action2(m2, s1), s1)\r\n return m3[0]\r\n\r\n\r\ndef med_var(observator, state):\r\n '''Función que encuentra la varianza y la media'''\r\n if matrizHermitiana(observator):\r\n print('MEDIA', expectedValue(observator, state))\r\n print('VARIANZA', varianza(observator, state))\r\n else:\r\n print('El observador no es una una matriz hermitiana')\r\n\r\n\r\ndef eigenValues(eValues):\r\n '''Función que encuentra los valores propios de una matriz'''\r\n lst = []\r\n min, max = -100, 100\r\n for i in range(min, max):\r\n if eValues.get(i) is not None:\r\n lst += [i]\r\n return lst\r\n\r\n\r\ndef eigenVectors(eVector):\r\n '''Función que halla los vectores propios de una matriz'''\r\n lst = []\r\n for i in range(len(eVector)):\r\n n = complex(eVector[i])\r\n x, y = int(n.real), int(n.imag)\r\n lst += [(x, y)]\r\n return lst\r\n\r\n\r\ndef convMatriz(m):\r\n '''Función que convierte una matriz de tuplas en una matriz de números complejos'''\r\n m1 = [[0 for j in range(len(m[0]))] for i in range(len(m))]\r\n for i in range(len(m)):\r\n for j in range(len(m[0])):\r\n m1[i][j] = complex(m[i][j][0], m[i][j][1])\r\n return m1\r\n\r\n\r\ndef prob(matriz, state,):\r\n '''Función que halla la probabilidad de que un estado llegue a un vector propio'''\r\n eValues, eVector = reviewObs(matriz, state)\r\n if expectedValue(matriz, state) in eValues:\r\n print('Probabilidad de llegar a un vector propio 100%')\r\n else:\r\n if type(eVector[0]) == list:\r\n for i in range(len(eVector)):\r\n return amplitudTransicion(state, eVector[i])\r\n else:\r\n return amplitudTransicion(state, eVector)\r\n\r\n\r\ndef reviewObs(m1, state):\r\n '''Función que revisa que la matriz sea hermitiana, y si lo es, calcula la media y la varianza del observable en el estado dado.'''\r\n a = convMatriz(m1)\r\n a = sympy.Matrix(a)\r\n eValues = a.eigenvals()\r\n eValues = eigenValues(eValues)\r\n x, v = a.eigenvects()[0][2][0], a.eigenvects()[0][0]\r\n eVectors = x*v\r\n eVector = eigenVectors(eVectors)\r\n return eValues, eVector\r\n\r\ndef dynamic(n, matriz, state):\r\n '''Función quhe calcula la posicion de una particula luego de que recorra una serie de matrices unitarias'''\r\n for i in range(n):\r\n state = act(matriz, state)\r\n return state\r\n","sub_path":"teoria_cuantica_basica/teoriaCuanticaBasica.py","file_name":"teoriaCuanticaBasica.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"541173885","text":"# -*- coding: utf-8 -*-\n\"\"\"\nScript to query the updated water use and allocation results.\n\"\"\"\n\nfrom pandas import merge, read_csv, DataFrame\nfrom query_use_allo_v01 import w_query\n\n#################################\n### Parameters\n\nseries_csv = 'C:/ecan/base_data/usage/usage_takes_series_sw_up2_with_cav.csv'\nallo_csv = 'C:/ecan/base_data/usage/takes_results2.csv'\n\nallo_cols = ['crc', 'wap', 'take_type', 'catchment', 'irr_area', 'gw_zone', 'sw_zone', 'use_type', 'catchment_num', 'cwms_zone']\ncwms_zone = ['Ashburton']\nyears = [2015]\nuse_type = ['stockwater']\ngrp_by = ['dates']\nallo_col = ['ann_allo_m3', 'up_allo_m3']\n\nexport_path = 'C:/ecan/Projects/requests/cwms/set2/ash_results_up_with_cav.csv'\n\n\n#################################\n### Read in allocation and usage data and merge data\n\nseries = read_csv(series_csv)\nallo = read_csv(allo_csv)[allo_cols]\n\nallo_use1 = merge(series, allo, on=['crc', 'wap'])\n\n### Read in input data to be used in the query\n\n#################################\n### Query data\n\nq1 = w_query(allo_use1, grp_by=grp_by, allo_col=allo_col, use_type=use_type, years=years, cwms_zone=cwms_zone, export_path=export_path, debug=True)\n\n\n\n\n","sub_path":"python_scripts/usage/requests/WUS_ROS_query_ashburton.py","file_name":"WUS_ROS_query_ashburton.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"529761585","text":"#!/usr/bin/env python\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport SocketServer\nimport json\nimport urlparse\nimport subprocess\nfrom ChatbotGet import ChatbotGet\n\nchatbotGet=ChatbotGet()\n\nclass S(BaseHTTPRequestHandler):\n\tdef _set_headers(self):\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type', 'application/json')\n\t\tself.end_headers()\n\n\tdef do_GET(self):\n\t\tself._set_headers()\n\t\tparsed_path = urlparse.urlparse(self.path)\n\t\trequest_id = parsed_path.path[1:]\n\t\t# response = subprocess.check_output([\"python\", request_id+'.py'])\n\t\t# self.wfile.write(json.dumps(response))\n\t\tself.wfile.write(json.dumps(chatbotGet.return_message(request_id)))\n\n\tdef do_POST(self):\n\t\tself._set_headers()\n\t\tparsed_path = urlparse.urlparse(self.path)\n\t\trequest_id = parsed_path.path\n\t\tprint(\"request_id of post\")\n\t\tprint(request_id)\n\t\tresponse = subprocess.check_output([\"python\", request_id])\n\t\tself.wfile.write(json.dumps(response))\n\n\tdef do_HEAD(self):\n\t\tself._set_headers()\n\ndef run(server_class=HTTPServer, handler_class=S, port=8000):\n\tserver_address = ('', port)\n\thttpd = server_class(server_address, handler_class)\n\tprint('Starting httpd...')\n\thttpd.serve_forever()\n\nif __name__ == \"__main__\":\n\tfrom sys import argv\n\n\tif len(argv) == 2:\n\t\trun(port=int(argv[1]))\n\telse:\n\t\trun()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"284631479","text":"'''\nHand class\n'''\n\nclass Hand:\n\t'''\n\t@input cards: initial cards\n\t'''\n\tdef __init__(self):\n\t\tself.cards = []\n\n\t'''\n\t@input: tuple card (suit, rank)\n\t'''\n\tdef __str__(self):\n\t\tstr = \"\"\n\t\tfor card in self.cards:\n\t\t\tstr += f\"{card[1]}-of-{card[0]}\\n\"\n\t\treturn str\n\n\t'''\n\t@return score: check the score on current hand\n\t'''\n\tdef getscore(self):\n\t\tscore = 0\n\t\tnumbers_A = 0\n\t\tfor card in self.cards:\n\t\t\tif card[1] == \"King\" or card[1] == \"Queen\" or card[1] == \"Jack\" or card[1] == \"10\": \n\t\t\t\tscore += 10\n\t\t\telif card[1] == \"Ace\":\n\t\t\t\tscore += 11\n\t\t\t\tnumbers_A += 1\n\t\t\telse:\n\t\t\t\tscore += int(card[1])\n\n\t\tif score <= 21:\n\t\t\treturn score\n\t\telse:\n\t\t\tfor i in range(numbers_A):\n\t\t\t\tscore -= 10\n\t\t\t\tif score <= 21:\n\t\t\t\t\treturn score\n\t\t\treturn \"Bust\"\n","sub_path":"08-Milestone Project - 2/black_jack/module/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"419306075","text":"import logging\nimport requests\nimport time\n\nfrom threading import Event\nfrom wrapt.decorators import synchronized\n\n\nclass ConnectionStorage:\n @property\n def connected(self) -> Event:\n return self.__connected\n\n @property\n def connection_timeout(self) -> int:\n return self.__connection_timeout\n\n @property\n @synchronized\n def session_id(self) -> str:\n if not self.__session_id:\n raise ConnectionError(\"Connection required.\")\n\n if self.is_timeout_expired():\n self.__connected.clear()\n raise TimeoutError(\"Connection has probably expired.\")\n\n return self.__session_id\n\n @session_id.setter\n @synchronized\n def session_id(self, session_id: str):\n if session_id:\n self.__session_id = session_id\n self.__connected.set()\n else:\n self.__session_id = session_id\n self.__connected.clear()\n\n def __init__(\n self,\n connection_timeout: int = 15,\n ):\n self.__connection_timeout = connection_timeout\n\n self.__connected = Event()\n self.__last_success = 0\n self.__logger = logging.getLogger(self.__module__)\n self.__session_id = \"\"\n\n @synchronized\n def is_timeout_expired(self):\n if not self.__last_success:\n return False\n\n return (time.monotonic() - self.__last_success) > self.__connection_timeout\n\n @synchronized\n def response_hook(self, response, *args, **kwargs):\n \"\"\"This hook will intercept all the \"requests.Response\".\"\"\"\n\n timestamp = time.monotonic()\n status_code = response.status_code\n\n if self.__last_success < timestamp and status_code == 200:\n self.__last_success = timestamp\n\n def setup_hooks(self, session: requests.Session):\n hooks = {\"response\": [self.response_hook]}\n session.hooks.update(hooks)\n","sub_path":"degiro_connector/quotecast/models/connection_storage.py","file_name":"connection_storage.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"310796351","text":"# USB VCP example.\n# This example shows how to use the USB VCP class to send an image to PC on demand.\n#\n# WARNING:\n# This script should NOT be run from the IDE or command line, it should be saved as main.py\n# Note the following commented script shows how to receive the image from the host side.\n#\n# #!/usr/bin/env python2.7\n# import sys, serial, struct\n# port = '/dev/ttyACM0'\n# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,\n# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)\n# sp.setDTR(True) # dsrdtr is ignored on Windows.\n# sp.write(\"snap\")\n# sp.flush()\n# size = struct.unpack(' 1:\n cv2.putText(image,\"All good!\", \n bottomLeftCornerOfText, \n font, \n fontScale,\n goodFontColor,\n lineType)\n # print(\"All good!\")\n else:\n cv2.putText(image,\"I can't see your face\", \n bottomLeftCornerOfText, \n font, \n fontScale,\n goodFontColor,\n lineType)\n # print(\"I can't see your face\")\n\n \n cv2.line(image, tuple(point1), tuple(\n point2), color, line_width, cv2.LINE_AA)\n cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)\n cv2.line(image, tuple(point_2d[1]), tuple(\n point_2d[6]), color, line_width, cv2.LINE_AA)\n cv2.line(image, tuple(point_2d[2]), tuple(\n point_2d[7]), color, line_width, cv2.LINE_AA)\n cv2.line(image, tuple(point_2d[3]), tuple(\n point_2d[8]), color, line_width, cv2.LINE_AA)\n\n def get_pose_marks(self, marks):\n \"\"\"Get marks ready for pose estimation from 68 marks\"\"\"\n pose_marks = []\n pose_marks.append(marks[30]) # Nose tip\n pose_marks.append(marks[8]) # Chin\n pose_marks.append(marks[36]) # Left eye left corner\n pose_marks.append(marks[45]) # Right eye right corner\n pose_marks.append(marks[48]) # Left Mouth corner\n pose_marks.append(marks[54]) # Right mouth corner\n return np.array(pose_marks)\n","sub_path":"pose_estimator.py","file_name":"pose_estimator.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"8075204","text":"import tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nfrom tornado.options import define, options\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self,input):\n self.write(input[::-1])\n def write_error(self,status_code,**kwargs):\n swlf.write(\"Gosh Darnit % error\" %staticmethod)\nclass WrapHandler(tornado.web.RequestHandler):\n def post(self):\n text1 = self.get_argument('text')\n self.write(text1)\nif __name__ == \"__main__\":\n tornado.options.parse_command_line()\n app = tornado.web.Application(handlers = [(r\"/in/(\\w+)\",IndexHandler),\n (r\"/wrap\",WrapHandler)])\n httpserverp = tornado.httpserver.HTTPServer(app)\n httpserverp.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"hellowtornado.py","file_name":"hellowtornado.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"10059226","text":"\"\"\"\n哈夫曼二叉树树\n\n给定n个权值作为n个叶子结点,构造一棵二叉树,若带权路径长度达到最小,称这样的二叉树为最优二叉树,也称为哈夫曼树(Huffman Tree)。哈夫曼树是带权路径长度最短的树,权值较大的结点离根较近。\n\n构造哈夫曼树的算法\n\n哈夫曼提出了一个算法,它能从任意的时数集合构造出与之对应的哈夫曼树。这个构造算法描述如下:\n 1,算法的输入为实数集 W = {w0, w1,...w(m-1)}\n 2,在构造中维护一个包含k颗二叉树的集合F,开始时k=m,且F={T0,T1,...T(m-1)},其中每个T1是一颗只包含权为w(i)的跟结点的单点二叉树。\n 3,算法过程中重复执行下面两个步骤,直到集合F中剩下一棵树为止\n a:构造一颗新的二叉树,其左右子树是从集合F中选取的两颗权最小的二叉树,其根结点的权值设置为这两颗子树的根节点的权值之和\n b:将所选的两颗二叉树从F中删除,把新构造的二叉树加入F,这个步骤每做一次,F里的二叉树就减少了一颗,这就保证了本算法必定结束\n\n如何实现\n\n显然,构造算法执行中需要维护一组二叉树,而且要直到每棵树(其树根结点)的权值,可以考虑使用二叉树的结点类构造哈夫曼树,在树根结点记录树的权值。\n\n在算法执行过程中需要不断选出权值最小的两颗二叉树, 并基于他们构造出一颗新的二叉树,很容易想到,我们需要最佳的选择就是用优先队列存放这组二叉树,按照二叉树的跟结点的权值排列优先顺序,从小到大\n\n算法开始时建立起一组单结点的二叉树,以权值作为优先码存入优先队列,要求先取出队列里的最小元素,然后反复做下面的事情\n 1,从优先队列里面弹出两个权值最小的元素(两颗二叉树)\n 2,基于所取的二叉树构造一颗新的二叉树,其权值取两颗子树权值之和,并向构造的新二叉树压入优先队列\n\n需要解决的问题:\n 1.需要为二叉树定义一个序,权值小的二叉树在前,\n 2。需要检查优先队列中的元素个数,以便在剩一颗时结束,这些都可以通过扩充前面已经定义的类型实现\n\n应用:哈夫曼编码\n\"\"\"\n\nfrom binary_tree import BinTreeNode\nfrom priority_queue_list import PriorityQueue\n\n# 以二叉树结点类作为基类,定义一个专门为构造哈夫曼树用的结点类,其特点是怎加一个小于比较操作符\nclass HaffmanNode(BinTreeNode):\n def __lt__(self, othernode):\n return self.data < othernode.data\n\n# 定义一个专门为哈夫曼算法服务的优先队列类,增加了一个检查队列中元素个数的方法\nclass HuffmanPriorityQueue(PriorityQueue):\n def number(self):\n return len(self._elems)\n\n# 定义哈夫曼树\ndef HuffmanTree(weights):\n trees = HuffmanPriorityQueue() # 实例化优先队列\n\n for w in weights: # 将每��权重构造成单点二叉树,然后全部加入优先队列\n each_tree = HaffmanNode(i)\n trees.enqueue(each_tree)\n\n while trees.number() > 1: # 直到队列中只剩下一个树,就是哈夫曼树\n t1 = trees.dequeue()\n t2 = trees.dequeue() # 取出最小的两棵树\n x = t1.data + t2.data # 得到新树的权重\n new_tree = HaffmanNode(x, t1, t2) # 构造新树\n trees.enqueue(new_tree) # 将新树加入优先队列\n\n return trees.dequeue()\n","sub_path":"haffman_tree.py","file_name":"haffman_tree.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"607778442","text":"def chop(lst):\n del lst[0]\n del lst[-1]\n \n\n\ndef middle(lst):\n new = lst[1:-1]\n \n return new\n\n\nmy_list = [1, 2, 3, 4]\nmy_list2 = [1, 2, 3, 4]\n\nchop_list = chop(my_list)\nprint(my_list) # Should be [2,3]\nprint(chop_list) # Should be None\n\nmiddle_list = middle(my_list2)\nprint(my_list2) # Should be unchanged\nprint(middle_list) \n","sub_path":"Python txt book exercise/8.1.py","file_name":"8.1.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"369459412","text":"import argparse\nimport numpy as np\n\nimport utils\nimport os\nimport time\nimport sys\nfrom concurrent.futures import (\n ProcessPoolExecutor,\n as_completed\n)\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--inString',\n '-i',\n type=str,\n help=' factorization target')\n parser.add_argument('--K', '-K', type=int, help='Number of process')\n\n return parser.parse_args()\n\n\ndef factor_opt(M, search_range):\n\n for i in search_range:\n if M % i == 0:\n return str(i)\n return 'no'\n\n\n\ndef main(args):\n\n M = int(args.inString)\n M_prime = int(np.sqrt(M))\n search_range = np.array([i + 2 for i in range(M_prime - 2)])\n splitted = np.array_split(search_range, args.K)\n\n res = 'no'\n\n with ProcessPoolExecutor() as e:\n futures = set([e.submit(factor_opt, M, split_range)\n for split_range in splitted])\n\n\n for future in as_completed(futures):\n temp = future.result()\n if temp !='no':\n res = temp\n\n return res\n\n # すべてnoだった場合はこちらを返す\n return res\n\n\nif __name__ == '__main__':\n\n arg = get_arguments()\n res = main(arg)\n print(res)\n","sub_path":"problemsets/chapter8/factor_multiprocess.py","file_name":"factor_multiprocess.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401876263","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 5 17:00:59 2021\n\n@author: 孔湘涵\n\"\"\"\nimport numpy as np\nimport time\nimport random\nimport matplotlib.pyplot as plt\n\n# =============================================================================\n# Spectral clustering/ Normalized cut functions\n# =============================================================================\n\ndef spectral_cluster_slow(data):\n '''\n The classic spetral clustering algorithm.\n arguments:\n - data: np.ndarray of shape [no_data, no_dimensions]\n input data points\n returns:\n - U: top k eigenvectors, np.ndarray of shape [no_data, k] \n '''\n time1 = time.time()\n number = data.shape[0]\n #Step 1: compute matrix W\n w1 = np.broadcast_to(data, (number,number)) #w1[:,0,:] is same\n w2 = w1.T\n w = np.float32(w1)-np.float32(w2)\n W = np.exp(-w**2)\n time2 = time.time()\n print('Time of compute matrix W is ',time2-time1) \n \n #Step2: compute matrix D\n d = W.sum(axis = 0)\n D = np.diag(d)\n time3 = time.time()\n print('Time of compute matrix D is ',time3-time2) \n \n #Step3: compute Graph Laplacian matrix\n L = np.linalg.inv(D)**0.5 @ (D - W) @ np.linalg.inv(D)**0.5\n time4 = time.time()\n print('Time of compute matrix L is ',time4-time3)\n \n #Step4: do eigenvalue decomposition of L\n values, vectors = np.linalg.eigh(L)\n time5 = time.time()\n print('Time of compute eigendecomposition is ',time5-time4)\n\n #Step5: find k smallest eigenvalues\n gap = np.zeros(len(values)-1)\n for i in range(gap.shape[0]):\n gap[i] = values[i+1] - values[i]\n k = np.argmax(gap)+1\n U = vectors[:,:k]\n print('Total time is ',time.time()-time1)\n \n return U\n\ndef spectral_cluster_nystrom(A, B, sample_indices, remain_indices):\n '''\n The fast spetral clustering algorithm using Nystrom method.\n arguments:\n - A: similarity sub-matrix shape [no_samples, no_samples]\n - B: similarity sub-matrix shape [no_samples, no_remaining_points]\n -sample_indices: np.ndarray of shape [no_samples]\n -remain_indices: np.ndarray of shape [no_remaining_points]\n \n returns:\n - V: top k eigenvectors, np.ndarray of shape [no_data, k] \n '''\n num_points = A.shape[1] + B.shape[1]\n num_samples = sample_indices.shape[0]\n \n #1. compute row sums of w which is d, and reset the samples location at front\n d1 = np.sum(A,axis=1) + np.sum(B,axis=1)\n d2 = np.sum(B,axis=0) + np.dot(B.T,np.dot(np.linalg.pinv(A),np.sum(B,axis=1)))\n dhat = np.reshape(np.sqrt(1/np.concatenate([d1,d2])),[num_points,1])\n \n #2. get new A & B\n A = A * np.dot(dhat[0:num_samples],dhat[0:num_samples].T)\n B = B * np.dot(dhat[0:num_samples],dhat[num_samples:].T)\n \n #3.compute s and diagonalize it\n Asi = np.linalg.pinv(A**0.5)\n BBT = np.dot(B,B.T)\n S = A + np.dot(Asi,np.dot(BBT,Asi))\n us,gammas,_ = np.linalg.svd(S)\n gammas = np.diag(gammas)\n\n #4, choose the first k singular vectors\n k = 8\n ABT = np.zeros((num_points,num_samples))\n ABT[sample_indices,:] = A\n ABT[remain_indices,:] = B.T\n V = ABT @ Asi @ us[:,1:k] @ np.linalg.pinv(gammas[1:k,1:k]**0.5) \n v = V / np.broadcast_to(np.linalg.norm(V,axis=1).reshape(-1,1), (V.shape)) #data normalization\n return v\n\n# =============================================================================\n# Similarity matrix construction functions\n# =============================================================================\n\ndef sample(row,col,num_sample):\n '''\n Produce some samples.\n arguments:\n - row,col: image size.\n - num_sample: no_samples, number of samples\n \n returns:\n - sample_indices: np.ndarray of shape [no_samples]\n - remain_indices: np.ndarray of shape [no_remaining_points]\n '''\n sample_indices = np.array(random.sample(range(row*col), num_sample))\n remain_indices = np.delete(range(row*col), sample_indices)\n return sample_indices, remain_indices\n\ndef similarity(data, sample_indices, method='fully'):\n '''\n Compute similarity sub-matrix A & B.\n arguments:\n - data: np.ndarray of shape [no_data, no_dimensions]\n - sample_indices np.ndarray of shape [no_samples]\n - method choose the type of similaritymatrix, default is 'fully connected graph',\n if want to use 'ε- neighborhood graph', set method as other value\n \n returns:\n - A similarity sub-matrix shape [no_samples, no_samples]\n - B similarity sub-matrix shape [no_samples, no_remaining_points]\n \n Hint: data needs normalized to 1, in case AB=0.\n '''\n data=np.float32(data/data.max())\n length = data.shape[0]\n AB = np.zeros((len(sample_indices),length)) \n samples = data[sample_indices,:]\n sigma=1 \n for i in range(len(sample_indices)):\n # use Gaussian kernel to define the similarity\n AB[i,:] = np.exp((-np.linalg.norm((samples[i,:] - data), axis = 1)**2)/sigma) #fully connected sigma=1 \n if method != 'fully':\n AB = AB[AB>np.exp(-0.8)**2].reshape((len(sample_indices),length)) #ε- neighborhood default is 0.8\n print('ε- neighborhood graph.')\n else:\n print('fully connected graph.')\n A = AB[:,sample_indices]\n B = AB[:,np.delete(range(length), sample_indices)]\n return A,B\n\n# =============================================================================\n# K-means functions\n# =============================================================================\n\ndef k_means_1d(X, centroids, n_iterations):\n '''\n standard k-means algorithm\n arguments:\n - X: np.ndarray of shape [no_data]\n input data points\n - centroids: np.ndarray of shape [k]\n centres of initial custers\n - n_iterations: integer, number of iterations to run k-means for\n returns:\n - which_component: np.ndarray of shape [no_data] and integer data\n type, contains values in [0, k-1] indicating which\n cluster each data point belongs to\n - centroids: np.ndarray of shape [k], centres of \n final custers, ordered in such way as indexed by\n `which_component`\n '''\n k = centroids.shape[0]\n for _ in range(n_iterations):\n # reassign data points to components\n distances = np.linalg.norm(np.expand_dims(X, axis=1) - centroids, axis=-1, ord=2)\n \n which_component = np.argmin(distances, axis=-1)\n # calcuate centroid for each component\n centroids = np.stack(list( X[which_component==i].mean(axis=0) for i in range(k) ), axis=0)\n\n return which_component, centroids\n\ndef k_means_pp_1d(X, k):\n '''\n Compute initial custer for k-means\n arguments:\n - X: np.ndarray of shape [no_data]\n input data points\n returns:\n - centroids: np.ndarray of shape [k]\n centres of initial custers\n '''\n channels = 1\n num_data = X.shape[0] \n\n #step1: get a random point as the first center\n index1 = int(np.random.random_sample()*num_data) \n centroids = np.zeros((k,channels))\n centroids[0] = X[index1] \n index = np.zeros(k) #the index of centers in dataset\n index[0] = index1\n for i in range(1,k):\n #step2: compute every point's distance to the nearest existing centroid\n distance = np.ones(num_data) \n for j in range(num_data): #for all data\n dis = np.ones(i+1)\n for m in range(0,i+1):\n #each distance between center m and every point\n dis[m] = np.linalg.norm(X[j] - centroids[m])\n #assign each point to the nearest center with minimum distance\n distance[j] = dis.min() \n if distance[j] == 0:\n distance[j] += 1e-5\n #step3: choose one point as the centre of a new cluster with probability proportional to distance**2\n index[i] = np.argmax(distance) \n centroids[i] = X[int(index[i])]\n return centroids\n\ndef k_means(X, centroids, n_iterations):\n '''\n standard k-means algorithm\n arguments:\n - X: np.ndarray of shape [no_data, no_dimensions]\n input data points\n - centroids: np.ndarray of shape [k, no_dimensions]\n centres of initial custers\n - n_iterations: integer, number of iterations to run k-means for\n returns:\n - which_component: np.ndarray of shape [no_data] and integer data\n type, contains values in [0, k-1] indicating which\n cluster each data point belongs to\n - centroids: np.ndarray of shape [k, no_dimensions], centres of \n final custers, ordered in such way as indexed by\n `which_component`\n '''\n k = centroids.shape[0]\n for _ in range(n_iterations):\n # reassign data points to components\n distances = np.linalg.norm(np.expand_dims(X, axis=1) - centroids, axis=-1, ord=2)\n which_component = np.argmin(distances, axis=-1)\n # calcuate centroid for each component\n centroids = np.stack(list( X[which_component==i].mean(axis=0) for i in range(k) ), axis=0)\n\n return which_component, centroids\ndef k_means_pp(X, k):\n '''\n Compute initial custer for k-means\n arguments:\n - X: np.ndarray of shape [no_data, no_dimensions]\n input data points\n returns:\n - centroids: np.ndarray of shape [k, no_dimensions]\n centres of initial custers\n '''\n num_data, channels = X.shape \n\n #step1: get a random point as the first center\n index1 = int(np.random.random_sample()*num_data) \n centroids = np.zeros((k,channels))\n centroids[0,:] = X[index1,:] \n index = np.zeros(k) #the index of centers in dataset\n index[0] = index1\n for i in range(1,k):\n #step2: compute every point's distance to the nearest existing centroid\n distance = np.ones(num_data) \n for j in range(num_data): #for all data\n dis = np.ones(i+1)\n for m in range(0,i+1):\n #each distance between center m and every point\n dis[m] = np.linalg.norm(X[j,:] - centroids[m,:])\n #assign each point to the nearest center with minimum distance\n distance[j] = dis.min() \n if distance[j] == 0:\n distance[j] += 1e-5\n #step3: choose one point as the centre of a new cluster with probability proportional to distance**2\n index[i] = np.argmax(distance) \n centroids[i,:] = X[int(index[i]),:]\n return centroids\n\n# =============================================================================\n# Display functions\n# =============================================================================\n\ndef display_clusters(img, which_component, k=-1):\n '''\n Display the cluster result as the row image color.\n \n Param:\n img color RGB image, row*col*channels\n which_component 1d size = row*col, each point represent which cluster belongs to.\n (k) set by default, number of clusters\n '''\n row,col = img.shape[:2]\n which_component = which_component.astype(np.int64)\n if k==-1:\n k=which_component.max()+1\n else:\n pass\n center_value = np.zeros((k,3))\n result = np.zeros(img.shape)\n #calculate the mean value of each clusters\n for n in range(k):\n mask = np.array([which_component==n]).reshape((row,col))\n number = mask.sum()\n center_value[n,0] = (mask*img[:,:,0]).sum()/number\n center_value[n,1] = (mask*img[:,:,1]).sum()/number\n center_value[n,2] = (mask*img[:,:,2]).sum()/number\n result[:,:,0] += mask*center_value[n,0]\n result[:,:,1] += mask*center_value[n,1]\n result[:,:,2] += mask*center_value[n,2]\n plt.figure()\n plt.imshow(np.uint8(result)) \n# plt.title('Clustering result (RGB & fully).')\n return np.uint8(result)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":12251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"587567591","text":"import time\nplayer_life=130\nplayer_attack=15\n\nenemy_life=150\nenemy_attack=10\n\nwhile player_life>0 and enemy_life>0:\n player_life-=enemy_attack\n enemy_life-=player_attack\n print('敌人发动攻击后,玩家的血量:'+str(player_life))\n print('玩家发动攻击后,敌人的学量:'+str(enemy_life))\n time.sleep(1.5)\nif player_life>0 and enemy_life<=0:\n print(\"玩家获胜\")\nelse:\n print(\"敌人获胜\")","sub_path":"first_step/a_game.py","file_name":"a_game.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"537253643","text":"# Copy List with Random Pointer\n\n# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\n\n# Return a deep copy of the list.\n\n# The Linked List is represented in the input/output as a list of n nodes. Each node is represented as a pair of [val, random_index] where:\n\n# val: an integer representing Node.val\n# random_index: the index of the node (range from 0 to n-1) where random pointer points to, or null if it does not point to any node.\n\n# Example 1:\n# Input: head = [[7,null],[13,0],[11,4],[10,2],[1,0]]\n# Output: [[7,null],[13,0],[11,4],[10,2],[1,0]]\n\n# Example 2:\n# Input: head = [[1,1],[2,1]]\n# Output: [[1,1],[2,1]]\n\n# Example 3:\n# Input: head = [[3,null],[3,0],[3,null]]\n# Output: [[3,null],[3,0],[3,null]]\n\n# Example 4:\n# Input: head = []\n# Output: []\n# Explanation: Given linked list is empty (null pointer), so return null.\n\n# Constraints:\n\n# -10000 <= Node.val <= 10000\n# Node.random is null or pointing to a node in the linked list.\n# Number of Nodes will not exceed 1000.\n\n\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, next, random):\n self.val = val\n self.next = next\n self.random = random\n\"\"\"\nclass Solution(object):\n def copyRandomList(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n \"\"\"\n nodeDict = dict()\n dummy = Node(0, None, None)\n nodeDict[head] = dummy\n newHead, pointer = dummy, head\n while pointer:\n node = Node(pointer.val, pointer.next, None)\n nodeDict[pointer] = node\n newHead.next = node\n newHead, pointer = newHead.next, pointer.next\n pointer = head\n while pointer:\n if pointer.random:\n nodeDict[pointer].random = nodeDict[pointer.random]\n pointer = pointer.next\n return dummy.next\n","sub_path":"138.py","file_name":"138.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"375063585","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0357\n\nPrime generating integers\n\nCreated on Thu Nov 3 11:31:19 2016\n@author: mbh\n\"\"\"\nimport numpy as np\nimport time\n\ndef p357(limit):\n t=time.clock()\n \n primes=np.ones(limit+1,dtype=bool) \n for i in range(2, int((limit+1)**0.5+1)):\n if primes[i]:\n primes[2*i::i]=False\n\n sf=np.ones(limit+1,dtype=bool) \n for i in range(2, int((limit+1)**0.5+1)):\n if sf[i]:\n sf[i**2::i**2]=False\n \n nsum=1 \n for n in range(2,limit,4):\n if primes[n+1] and primes[n//2+2] and sf[n] and all(primes[d+n//d] for d in range(3,int(n**.5)+1) if not n%d):\n nsum+=n\n\n print(nsum,time.clock()-t)\n \n\n","sub_path":"PE_0357/PE_0357.py","file_name":"PE_0357.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"449040498","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport json\nimport locale\nimport sys\nimport random\n\nfrom flask_login import current_user\n\nfrom dash import Dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objs as go\nfrom plotly.subplots import make_subplots\n\nfrom dashboards.dash_functions import apply_layout_with_auth, create_toolip\nfrom dashboards.dash_configs import layout, engine, colors_le, tooltip_text\n\nif sys.platform == 'win32':\n locale.setlocale(locale.LC_ALL, 'rus_rus')\nelse:\n locale.setlocale(locale.LC_ALL, 'ru_RU.UTF-8')\n\nurl_base = '/dash/dashfte/'\n\n\ndef input_to_list(input_value):\n if not isinstance(input_value, list):\n return [input_value]\n else:\n return input_value\n\n\ndates_list = sorted(list(\n pd.read_sql('SELECT DISTINCT month_start FROM hc_data_main',\n con=engine, parse_dates=['month_start'])['month_start'].dropna()))\ndates_marks = {i: \"\" for i in range(len(dates_list))}\ndates_slider = dcc.RangeSlider(\n id='dates_slider',\n value=[len(dates_list)-13, len(dates_list)-1],\n min=0,\n max=len(dates_list),\n className=\"dcc_control\"\n)\n\nn_function_show_slider = dcc.Slider(\n id='n_functions_show_slider',\n value=10,\n min=0,\n max=40,\n className='dcc_control'\n)\n\nle_list = list(pd.read_sql('''\nSELECT DISTINCT legal_entity_short\nFROM hc_data_main\n''', con=engine)['legal_entity_short'])\nle_options = [{'label': str(dept), 'value': str(dept)} for dept in le_list]\nle_checklist = dcc.Checklist(\n id='le_checklist',\n options=le_options,\n value='ГРС',\n className='dcc_control',\n labelStyle={'display': 'block'}\n )\n\nle_dict = pd.read_sql('SELECT DISTINCT legal_entity_short_eng, legal_entity_short FROM hc_data_main',\n con=engine).set_index('legal_entity_short_eng').to_dict()['legal_entity_short']\nle_options_dict = [{'label': str(value), 'value': str(key)} for key, value in le_dict.items()]\n\nle_dropdown = dcc.Dropdown(\n id='le_dropdown',\n options=le_options_dict,\n multi=True,\n value=['grs', 'medcorp', 'inrosmed', 'renprime', 'holdingrs', 'renconsult', 'renfinance'],\n className='dcc_control'\n)\n\nle_radio = dcc.RadioItems(\n id='le_radio',\n options=[\n {'label': 'Все', 'value': 'all'},\n {'label': 'Все активные', 'value': 'active'},\n {'label': 'Только ГРС', 'value': 'grs_only'},\n {'label': 'Только РЗ', 'value': 'rz_only'},\n ],\n value='all',\n labelStyle={'display': 'inline-block'},\n className='dcc_control'\n )\n\n\nfunctions_list = sorted(list(\n pd.read_sql('SELECT DISTINCT function FROM hc_data_main',\n con=engine)['function'].dropna()))\nfunctions_options = [{'label': str(item), 'value': str(item)} for item in functions_list]\nfunctions_dropdown = dcc.Dropdown(\n id='functions_dropdown',\n options=functions_options,\n multi=True,\n value=functions_list[0],\n className='dcc_control'\n)\n\nwf_type_radio = dcc.RadioItems(\n id='wf_type_radio',\n options=[\n {'label': 'Топ N изменений', 'value': 'top_n'},\n {'label': 'По выбранным функциям', 'value': 'selected_functions'},\n ],\n value='top_n',\n labelStyle={'display': 'inline-block'},\n className='dcc_control'\n)\n\ninclude_maternity_checkbox = dcc.Checklist(\n id='include_maternity_checkbox',\n options=[\n {'label': 'Включить декретниц', 'value': 'include_maternity'},\n ],\n value=[]\n)\n\ninclude_all_function_selected = dcc.Checklist(\n id='include_all_function_selected',\n options=[\n {'label': 'Суммировать выбранные функции', 'value': 'include_all_function_selected'},\n ],\n value=[]\n)\n\nlayout = html.Div([\n\n dcc.Tabs([\n dcc.Tab(label='Динамика по Юр. Лицам.',\n className='custom-tab',\n selected_className='custom-tab--selected',\n children=[\n html.Div([\n html.Div([\n create_toolip('fte_main_controls', tooltip_text['fte_main_controls']),\n html.Div(id='selected_dates_text', className=\"control_label\"),\n dates_slider,\n html.Div(\"Отобрать по Юр. лицам\", className=\"control_label\"),\n le_radio,\n le_dropdown,\n ],\n className='pretty_container six columns'),\n html.Div([\n create_toolip('current_fte_card', tooltip_text['current_fte_card']),\n html.H6(id='current_fte_value'),\n html.P(id='current_fte_text')],\n id='current_fte_container',\n className=\"mini_container three columns\",\n ),\n\n html.Div(\n [\n create_toolip('changes_fte_card', tooltip_text['changes_fte_card']),\n html.H6(id='change_fte_value'),\n html.P(id='change_fte_text')],\n id='change_fte_container',\n className=\"mini_container three columns\",\n )],\n id=\"info-container\",\n className=\"row flex_display\",\n ),\n html.Div([\n dcc.Graph(id='total_fte_graph'),\n create_toolip('main_fte_graph', tooltip_text['main_fte_graph']),\n ], className='pretty_container'),\n html.Div(\n id='fte_table_container',\n className='pretty_container'),\n html.Div([\n create_toolip('main_hc_graph', tooltip_text['main_hc_graph']),\n include_maternity_checkbox,\n dcc.Graph(id='total_hc_graph')\n ], className='pretty_container'),\n ]),\n dcc.Tab(label='Изменения по функциям',\n className='custom-tab',\n selected_className='custom-tab--selected',\n children=[\n html.Div([\n html.Div([\n wf_type_radio,\n html.Div(\n id='selected_n_functions_text',\n className=\"control_label\"),\n n_function_show_slider,\n functions_dropdown,\n create_toolip('fte_functions_controls', tooltip_text['fte_functions_controls']),\n ], className='pretty_container three columns'),\n html.Div([\n dcc.Graph(id='change_fte_wf'),\n create_toolip('fte_waterfall', tooltip_text['fte_waterfall']),\n ], className='pretty_container nine columns'),\n ], className=\"row flex_display\"),\n html.Div([\n html.Div([\n dcc.Graph(id='function_fte_graph'),\n create_toolip('fte_graph_function', tooltip_text['fte_graph_function'])\n ], className='pretty_container four columns'),\n html.Div([\n html.Div(id='detailed_function_table_container'),\n create_toolip('fte_table_detailed', tooltip_text['fte_table_detailed'])\n ], className='pretty_container eight columns'),\n ], className=\"row flex_display\"),\n html.Div(\n id='detailed_people_table_container',\n className='pretty_container'),\n ]),\n ])\n])\n\n\ndef register_dash(server):\n external_stylesheets = [\n {\n 'href': '../../static/build/css/dash_styles.css',\n 'rel': 'stylesheet'\n },\n {\n 'href': '../../static/vendors/font-awesome/css/font-awesome.min.css',\n 'rel': 'stylesheet'\n },\n ]\n app = Dash(server=server, url_base_pathname=url_base, external_stylesheets=external_stylesheets)\n apply_layout_with_auth(app, layout)\n\n @app.callback(\n Output(\"le_dropdown\", \"value\"),\n [Input(\"le_radio\", \"value\")]\n )\n def selection_drop(selected_radio):\n if selected_radio == \"all\":\n return list(le_dict.keys())\n elif selected_radio == 'active':\n return [x for x in le_dict.keys() if x not in ['bos', 'intouch', 'welbi']]\n elif selected_radio == 'grs_only':\n return ['grs']\n elif selected_radio == 'rz_only':\n return ['rz']\n else:\n return list(le_dict.keys())\n\n # Панелька с датаслайдером\n @app.callback(Output('selected_dates_text', 'children'),\n [Input('dates_slider', 'value')])\n def get_selected_dates_text(dates_range):\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n text_string = 'Данные за период: {}'.format(\n str(datetime.strftime(start_date, '%b %Y')) +\n \" - \" +\n str(datetime.strftime(end_date, '%b %Y')))\n return text_string\n\n # Панель с текущей численностью\n @app.callback(Output('current_fte_value', 'children'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value')])\n def get_current_fte_value(dates_range, selected_le):\n current_date = dates_list[dates_range[1] - 1]\n period = datetime.strftime(current_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT legal_entity_short_eng, fte\n FROM hc_data_main\n WHERE period = \"{}\"\n '''.format(period), con=engine)\n dff = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n current_fte = round(dff['fte'].sum(), 1)\n return current_fte\n\n # Текст в панельке с текущей численностью (какой месяц)\n @app.callback(Output('current_fte_text', 'children'),\n [Input('dates_slider', 'value')])\n def get_current_fte_text(dates_range):\n current_date = dates_list[dates_range[1] - 1]\n current_period = datetime.strftime(current_date, '%Y_%m')\n date_for_card = pd.read_sql('SELECT DISTINCT month_end FROM hc_data_main WHERE period = \"{}\"'.format(\n current_period), con=engine\n )['month_end'][0]\n period_text = \"Численность на \" + datetime.strftime(date_for_card, '%d.%m.%Y')\n return period_text\n\n # Панель с изменением численности в процентах (значение)\n @app.callback(Output('change_fte_value', 'children'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value')])\n def get_change_fte_value(dates_range, selected_le):\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT period, legal_entity_short_eng, SUM(fte) AS fte\n FROM hc_data_main\n WHERE period = \"{}\" OR period = \"{}\"\n GROUP BY period, legal_entity_short_eng\n '''.format(start_period, end_period), con=engine)\n df = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n start_fte = df[df['period'] == start_period]['fte'].sum()\n end_fte = df[df['period'] == end_period]['fte'].sum()\n fte_change_percent = \"{0:+.1%}\".format(end_fte / start_fte - 1)\n fte_change_absolute = \"{0:+}\".format(round(end_fte - start_fte, 1))\n fte_change = \"{} ({} FTE)\".format(fte_change_percent, fte_change_absolute)\n return fte_change\n\n @app.callback(Output('change_fte_text', 'children'),\n [Input('dates_slider', 'value')])\n def get_change_fte_text(dates_range):\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n text_string = 'Изменение FTE с {}'.format(\n str(datetime.strftime(start_date, '%B %Y')) +\n \" по \" +\n str(datetime.strftime(end_date, '%B %Y')))\n return text_string\n\n @app.callback(\n Output('total_fte_graph', 'figure'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value')])\n def get_total_fte_graph(dates_range, selected_le):\n df = pd.read_sql('''\n SELECT month_start, legal_entity_short_eng, legal_entity_group, SUM(fte) AS fte\n FROM hc_data_main\n GROUP BY month_start, legal_entity_group, legal_entity_short_eng\n ''', con=engine, parse_dates=['month_start'])\n df = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n df = df.groupby(['month_start', 'legal_entity_group']).agg(\n fte=('fte', 'sum')\n ).reset_index()\n\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n traces = []\n if 'ГРС' in df['legal_entity_group'].unique():\n le_for_graph = ['ГРС'] + [le for le in df['legal_entity_group'].unique() if le != 'ГРС']\n else:\n le_for_graph = df['legal_entity_group'].unique()\n for le in le_for_graph:\n colors = []\n for clr in dates_list:\n if start_date <= clr <= end_date:\n colors.append(colors_le[le][0])\n else:\n colors.append(colors_le[le][1])\n df_le = df[df['legal_entity_group'] == le]\n trace = go.Bar(\n x=df_le['month_start'],\n y=df_le['fte'],\n name=le,\n marker={\n 'color': colors,\n },\n hovertemplate='' + le + ': %{y:,.0f}',\n )\n traces.append(trace)\n df_total = df.groupby('month_start').agg(fte=('fte', 'sum')).reset_index()\n totals_trace = go.Scatter(\n x=df_total['month_start'],\n y=df_total['fte'].round(0),\n name='Всего',\n text=df_total['fte'].round(0),\n textposition='top center',\n mode='text',\n hoverinfo='skip',\n hovertemplate='Всего: %{y:,.0f}'\n )\n traces.append(totals_trace)\n xaxis_range = [dates_list[-36] + timedelta(days=15),\n dates_list[-1] + timedelta(days=15)]\n fte_graph_layout = go.Layout(\n title_text=\"Динамика FTE группы компаний\",\n autosize=True,\n margin=dict(l=30, r=30, b=20, t=40),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n barmode='stack',\n legend=dict(font=dict(size=10), orientation=\"h\"),\n xaxis=dict(range=xaxis_range, tickformat='%m.%Y', nticks=12),\n\n )\n figure = {'data': traces, 'layout': fte_graph_layout}\n return figure\n\n @app.callback(\n Output('fte_table_container', 'children'),\n [Input('le_dropdown', 'value')])\n def get_fte_table(le_selected):\n # dates_selected = dates_list[dates_range[0]:dates_range[1]]\n le_selected = input_to_list(le_selected)\n print(le_selected)\n df = pd.read_sql('''\n SELECT legal_entity_short_eng, month, year, SUM(fte) AS fte\n FROM hc_data_main\n GROUP BY year, month, legal_entity_short_eng\n ''', con=engine)\n df = df[df['legal_entity_short_eng'].isin(le_selected)]\n df = df.groupby(['year', 'month']).agg(fte=('fte', 'sum')).reset_index()\n df['rounded_fte'] = df['fte'].round(1)\n df['month'] = df['month'].astype('int')\n dff = pd.pivot_table(\n df,\n index='year',\n columns='month',\n values='rounded_fte',\n aggfunc=np.sum,\n fill_value='-'\n ).reset_index()\n dff_t = dff.round(1)\n data = dff.to_dict('records')\n result_table = dash_table.DataTable(\n data=data,\n id='total_fte_table',\n style_as_list_view=True,\n columns=[\n {'name': 'Год', 'id': 'year'},\n {'name': 'Янв', 'id': '1'},\n {'name': 'Фев', 'id': '2'},\n {'name': 'Мар', 'id': '3'},\n {'name': 'Апр', 'id': '4'},\n {'name': 'Май', 'id': '5'},\n {'name': 'Июн', 'id': '6'},\n {'name': 'Июл', 'id': '7'},\n {'name': 'Авг', 'id': '8'},\n {'name': 'Сен', 'id': '9'},\n {'name': 'Окт', 'id': '10'},\n {'name': 'Ноя', 'id': '11'},\n {'name': 'Дек', 'id': '12'},\n ],\n style_cell={\n 'backgroundColor': '#EDEDED',\n 'textOverflow': 'ellipsis',\n }\n )\n\n return result_table\n\n @app.callback(\n Output('total_hc_graph', 'figure'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value'),\n Input('include_maternity_checkbox', 'value')])\n def get_total_hc_graph(dates_range, selected_le, include_maternity):\n df = pd.read_sql('''\n SELECT\n month_start,\n legal_entity_short_eng,\n legal_entity_group,\n state_maternity_month_end,\n SUM(main_employee_entry) as headcount\n FROM\n hc_data_main\n WHERE\n headcount_month_end_raw = 1\n GROUP BY \n month_start,\n legal_entity_group,\n legal_entity_short_eng,\n state_maternity_month_end\n ''', con=engine, parse_dates=['month_start'])\n df = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n if 'include_maternity' not in include_maternity:\n df = df[df['state_maternity_month_end'] != 1]\n df = df.groupby(['month_start', 'legal_entity_group']).agg(\n headcount=('headcount', 'sum')).reset_index()\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n traces = []\n if 'ГРС' in df['legal_entity_group'].unique():\n le_for_graph = ['ГРС'] + [le for le in df['legal_entity_group'].unique() if le != 'ГРС']\n else:\n le_for_graph = df['legal_entity_group'].unique()\n for le in le_for_graph:\n colors = []\n for clr in dates_list:\n if start_date <= clr <= end_date:\n colors.append(colors_le[le][0])\n else:\n colors.append(colors_le[le][1])\n df_le = df[df['legal_entity_group'] == le]\n trace = go.Bar(\n x=df_le['month_start'],\n y=df_le['headcount'],\n name=le,\n marker={\n 'color': colors,\n },\n hovertemplate='' + le + ': %{y:,.0f}',\n )\n traces.append(trace)\n df_total = df.groupby('month_start').agg(headcount=('headcount', 'sum')).reset_index()\n totals_trace = go.Scatter(\n x=df_total['month_start'],\n y=df_total['headcount'].round(0),\n name='Всего',\n text=df_total['headcount'].round(0),\n textposition='top center',\n mode='text',\n hoverinfo='skip',\n hovertemplate='Всего: %{y:,.0f}'\n )\n traces.append(totals_trace)\n xaxis_range = [dates_list[-36] + timedelta(days=15),\n dates_list[-1] + timedelta(days=15)]\n fte_graph_layout = go.Layout(\n title_text=\"Динамика численности группы компаний\",\n autosize=True,\n margin=dict(l=30, r=30, b=20, t=40),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n barmode='stack',\n legend=dict(font=dict(size=10), orientation=\"h\"),\n xaxis=dict(range=xaxis_range, tickformat='%m.%Y', nticks=12),\n )\n figure = {'data': traces, 'layout': fte_graph_layout}\n return figure\n\n @app.callback(\n Output('selected_n_functions_text', 'children'),\n [Input('n_functions_show_slider', 'value')])\n def get_selected_n_functions_text(n_functions_selected):\n text_string = 'Показать детализацию по {} функциям'.format(n_functions_selected)\n return text_string\n\n @app.callback(\n Output('change_fte_wf', 'figure'),\n [Input('dates_slider', 'value'),\n Input('wf_type_radio', 'value'),\n Input('n_functions_show_slider', 'value'),\n Input('functions_dropdown', 'value'),\n Input('le_dropdown', 'value')])\n def get_change_fte_wf(dates_range, wf_type, n_functions_selected, functions_selected, le_selected):\n if not isinstance(le_selected, list):\n le_selected = [le_selected]\n if not isinstance(functions_selected, list):\n functions_selected = [functions_selected]\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n n_cases = n_functions_selected\n df_periods_total = pd.read_sql('''\n SELECT period, legal_entity_short_eng, function, SUM(fte) as fte \n FROM hc_data_main\n WHERE period = \"{}\" OR period = \"{}\" \n GROUP BY month_start, legal_entity_short_eng, function\n '''.format(start_period, end_period), con=engine)\n df_start = df_periods_total[df_periods_total['legal_entity_short_eng'].isin(le_selected) &\n (df_periods_total['period'] == start_period)]\n df_start = df_start.groupby('period').agg(fte=('fte', 'sum')).reset_index()\n df_start['title'] = 'FTE на ' + datetime.strftime(start_date, '%B %Y')\n df_start['measure'] = 'absolute'\n df_end = df_periods_total[df_periods_total['legal_entity_short_eng'].isin(le_selected) &\n (df_periods_total['period'] == end_period)]\n df_end = df_end.groupby('period').agg(fte=('fte', 'sum')).reset_index()\n df_end['title'] = 'FTE на ' + datetime.strftime(end_date, '%B %Y')\n df_end['measure'] = 'absolute'\n\n df_change = df_periods_total[df_periods_total['legal_entity_short_eng'].isin(le_selected)].copy()\n df_change['function'].fillna('Не опознаны', inplace=True)\n dff = pd.pivot_table(\n df_change,\n index='function',\n columns='period',\n values='fte',\n aggfunc='sum',\n fill_value=0\n ).reset_index()\n\n dff.rename(columns={start_period: 'start_fte', end_period: 'end_fte'},\n inplace=True)\n dff['change'] = dff['end_fte'] - dff['start_fte']\n dff['rank'] = dff['change'].abs().rank(method='first', ascending=False)\n if wf_type == 'top_n':\n dff.loc[dff['rank'] > n_cases, 'function'] = 'Другие'\n else:\n dff.loc[~dff['function'].isin(functions_selected), 'function'] = 'Другие'\n\n dff = dff.groupby(['function']).agg(change=('change', 'sum')).reset_index()\n dff['measure'] = 'relative'\n dff.sort_values(by='change', ascending=False, inplace=True)\n dff.loc[dff['function'] == 'Другие', 'sorter'] = 1\n dff.loc[dff['function'] != 'Другие', 'sorter'] = 0\n dff.sort_values(by='sorter', ascending=True, inplace=True)\n dff.rename(columns={'function': 'title', 'change': 'fte'}, inplace=True)\n df_result = pd.concat([\n df_start[['title', 'fte', 'measure']],\n dff[['title', 'fte', 'measure']],\n df_end[['title', 'fte', 'measure']],\n ]).round({'fte': 1})\n fig = go.Figure(\n go.Waterfall(\n orientation=\"v\",\n measure=df_result['measure'],\n x=df_result['title'],\n textposition=\"outside\",\n text=df_result['fte'],\n y=df_result['fte'],\n hovertemplate='%{x}: %{text}',\n decreasing={\"marker\": {\"color\": 'rgba(211, 94, 96, 1)'}},\n increasing={\"marker\": {\"color\": 'rgba(135, 186, 91, 1)'}},\n totals={\"marker\": {\"color\": 'rgba(114, 147, 203, 1)'}}\n ))\n title_string = 'Изменения по функциям в рамках общего изменения чис��енности с {}'.format(\n str(datetime.strftime(start_date, '%b %Y')) +\n \" по \" +\n str(datetime.strftime(end_date, '%b %Y')))\n fig.update_layout(\n title_text=title_string,\n autosize=True,\n margin=dict(l=30, r=30, b=100, t=60),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n legend=dict(font=dict(size=10), orientation=\"h\")\n )\n max_value = df_result['fte'].max()\n fig.update_yaxes(range=[0, max_value * 1.3])\n\n return fig\n\n @app.callback(\n Output('function_fte_graph', 'figure'),\n [Input('change_fte_wf', 'clickData'),\n Input('dates_slider', 'value'),\n Input('functions_dropdown', 'value')])\n def get_functions_fte(clickData, dates_range, selected_functions):\n functions = list(pd.read_sql('''\n SELECT DISTINCT function\n FROM hc_data_main''', con=engine)['function'].dropna())\n try:\n function = clickData['points'][0]['x']\n except TypeError:\n function = None\n if function is None or function not in functions:\n function = input_to_list(selected_functions)[0]\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT month_start, legal_entity_group, function, SUM(fte) AS fte\n FROM hc_data_main\n WHERE function = \"{}\"\n GROUP BY month_start, legal_entity_group\n '''.format(function), con=engine, parse_dates=['month_start'])\n df = df[(df['month_start'] >= start_date) & (df['month_start'] <= end_date)]\n traces = []\n for le in df['legal_entity_group'].unique():\n df_le = df[df['legal_entity_group'] == le]\n trace = go.Bar(\n x=df_le['month_start'],\n y=df_le['fte'],\n name=le,\n marker={\n 'color': colors_le[le][0],\n }\n )\n traces.append(trace)\n\n fte_graph_layout = go.Layout(\n title_text=\"Динамика FTE \" + function,\n autosize=True,\n margin=dict(l=30, r=30, b=20, t=40),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n barmode='stack',\n legend=dict(font=dict(size=10), orientation=\"h\"),\n xaxis=dict(tickformat='%m.%Y', nticks=5),\n )\n figure = {'data': traces, 'layout': fte_graph_layout}\n return figure\n\n @app.callback(\n Output('detailed_function_table_container', 'children'),\n [Input('change_fte_wf', 'clickData'),\n Input('dates_slider', 'value'),\n Input('functions_dropdown', 'value')])\n def get_detailed_functions_table(clickData, dates_range, selected_functions):\n functions = list(pd.read_sql('''\n SELECT DISTINCT function\n FROM hc_data_main''', con=engine)['function'].dropna())\n try:\n function = clickData['points'][0]['x']\n except TypeError:\n function = None\n if function is None or function not in functions:\n function = input_to_list(selected_functions)[0]\n\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT\n hc.period,\n hc.cost_center,\n SUM(hc.fte) AS fte,\n ccf.function_detailed\n FROM hc_data_main hc\n LEFT JOIN ref_cost_center_functions ccf\n ON hc.cost_center = ccf.cost_center\n WHERE \n hc.function = \"{}\" AND\n (hc.period = \"{}\" OR hc.period = \"{}\")\n GROUP BY hc.period, hc.cost_center, ccf.function_detailed\n '''.format(function, start_period, end_period), con=engine)\n df['fte'] = df['fte'].round(1)\n dff = pd.pivot_table(\n df,\n index='function_detailed',\n columns='period',\n values='fte',\n aggfunc='sum',\n fill_value=0\n ).reset_index()\n dff.rename(columns={start_period: 'start_period', end_period: 'end_period'},\n inplace=True)\n dff['change'] = (dff['end_period'] - dff['start_period']).round(1)\n dff.sort_values(by=['change'], ascending=False, inplace=True)\n data = dff.to_dict('records')\n result_table = dash_table.DataTable(\n data=data,\n id='detailed_function_table',\n style_as_list_view=True,\n columns=[\n {'name': 'Функция', 'id': 'function_detailed'},\n {'name': 'Было', 'id': 'start_period'},\n {'name': 'Стало', 'id': 'end_period'},\n {'name': 'Изменение', 'id': 'change'},\n ],\n fixed_rows={'headers': True, 'data': 0},\n style_cell_conditional=[\n {'if': {'column_id': 'start_period'},\n 'width': '50px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'end_period'},\n 'width': '50px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'change'},\n 'width': '50px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'function_detailed'},\n 'width': '35%',\n 'textAlign': 'left'}\n ],\n style_cell={\n 'backgroundColor': '#EDEDED',\n 'textOverflow': 'ellipsis',\n 'font-size': '0.8rem',\n },\n style_table={\n 'width': '98%',\n 'maxHeight': '450px'\n },\n sort_action=\"native\",\n sort_mode=\"multi\",\n )\n\n return result_table\n\n\n @app.callback(\n Output('detailed_people_table_container', 'children'),\n [Input('change_fte_wf', 'clickData'),\n Input('dates_slider', 'value'),\n Input('functions_dropdown', 'value')])\n def get_detailed_people_table(clickData, dates_range, selected_functions):\n functions = list(pd.read_sql('''\n SELECT DISTINCT function\n FROM hc_data_main''', con=engine)['function'].dropna())\n try:\n function = clickData['points'][0]['x']\n except TypeError:\n function = None\n if function is None or function not in functions:\n function = input_to_list(selected_functions)[0]\n\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT\n hc.period,\n hc.cost_center,\n hc.city,\n hc.employee_id,\n hc.employee_name,\n hc.position,\n hc.hire_date,\n hc.exit_date,\n hc.fte,\n ccf.function_detailed\n FROM hc_data_main hc\n LEFT JOIN ref_cost_center_functions ccf\n ON hc.cost_center = ccf.cost_center\n WHERE \n hc.function = \"{}\" AND\n (hc.period = \"{}\" OR hc.period = \"{}\")\n '''.format(function, start_period, end_period), con=engine)\n df['fte'] = df['fte'].round(1)\n\n if current_user.role_id not in [1, 2, 3]:\n accesses = current_user.accesses\n test_list = [f'{x}' for x in accesses]\n df_ac = pd.DataFrame({\n 'access': accesses\n })\n df_ac['access'] = df_ac['access'].astype('str')\n df_ac = df_ac['access'].str.split(': ', expand=True)\n df_ac.columns = ['user_id', 'cost_center', 'city']\n available_records = list(df_ac['cost_center'] + '_' + df_ac['city'])\n df['check'] = df['cost_center'] + '_' + df['city']\n df = df[df['check'].isin(available_records)]\n df.drop(columns=['check'])\n if df.shape[0] == 0:\n return ''\n\n fill_date = pd.Timestamp(2099, 1, 1)\n df['exit_date'].fillna(fill_date, inplace=True)\n\n dff = pd.pivot_table(\n df,\n index=['function_detailed',\n 'employee_id',\n 'employee_name',\n 'position',\n 'hire_date',\n 'exit_date',\n ],\n columns='period',\n values='fte',\n aggfunc='sum',\n fill_value=0\n ).reset_index()\n dff.rename(columns={start_period: 'start_period', end_period: 'end_period'},\n inplace=True)\n dff['change'] = (dff['end_period'] - dff['start_period']).round(1)\n hire_check_date = start_date + pd.DateOffset(month=1)\n exit_check_date = end_date + pd.offsets.MonthEnd(0)\n dff.loc[dff['change'] == 0, 'type'] = 'Без изменений'\n dff.loc[(dff['change'] > 0) &\n (dff['hire_date'] >= hire_check_date) &\n (dff['hire_date'] <= exit_check_date),\n 'type'] = 'Прием'\n dff.loc[(dff['change'] > 0) &\n (dff['exit_date'] >= (hire_check_date - pd.Timedelta(days=1))) &\n (dff['exit_date'] < exit_check_date),\n 'type'] = 'Увольнение'\n dff['type'].fillna('Перевод/Декрет', inplace=True) # todo check with olga if other cases\n dff['hire_date'] = dff['hire_date'].dt.strftime('%d.%m.%Y')\n dff['exit_date'] = dff['exit_date'].dt.strftime('%d.%m.%Y')\n dff['exit_date'] = dff['exit_date'].replace({'01.01.2099': '-'})\n dff.drop(columns=['employee_id'], inplace=True)\n data = dff.to_dict('records')\n result_table = dash_table.DataTable(\n data=data,\n id='detailed_people_table',\n style_as_list_view=True,\n columns=[\n {'name': 'Функция', 'id': 'function_detailed'},\n {'name': 'Сотрудник', 'id': 'employee_name'},\n {'name': 'Должность', 'id': 'position'},\n {'name': 'Дата приема', 'id': 'hire_date'},\n {'name': 'Дата увольнения', 'id': 'exit_date'},\n {'name': 'Было', 'id': 'start_period'},\n {'name': 'Стало', 'id': 'end_period'},\n {'name': 'Изменение', 'id': 'change'},\n {'name': 'Причина', 'id': 'type'},\n ],\n fixed_rows={'headers': True, 'data': 0},\n style_cell_conditional=[\n {'if': {'column_id': 'start_period'}, 'width': '80px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'end_period'}, 'width': '80px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'change'}, 'width': '80px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'hire_date'}, 'width': '100px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'exit_date'}, 'width': '100px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'type'},\n 'width': '150px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'function_detailed'},\n 'width': '15%',\n 'textAlign': 'left'},\n {'if': {'column_id': 'employee_name'},\n 'width': '16%',\n 'textAlign': 'left'},\n {'if': {'column_id': 'position'},\n 'width': '7%',\n 'textAlign': 'left'}\n ],\n style_cell={\n 'backgroundColor': '#EDEDED',\n 'textOverflow': 'ellipsis',\n },\n style_table={\n 'width': '98%',\n 'maxHeight': '450px',\n 'font-size': '0.8rem',\n 'text-align': 'left'\n },\n filter_action=\"native\",\n sort_action=\"native\",\n sort_mode=\"multi\",\n )\n return result_table\n\n return app.server\n","sub_path":"dashboards/dash_fte.py","file_name":"dash_fte.py","file_ext":"py","file_size_in_byte":38583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401756935","text":"#!/usr/bin/env python\n\n# File bloodPressure.py\n\nimport datetime\nimport json\nimport sys\nsys.path.append('./healthFact')\nimport measurements\nimport datastoreDAO\n\nif __name__ == '__main__':\n# bp = measurements.BloodPressure(120,80)\n\n datastoreDAO.makeBPEntry(datetime.datetime.today(), 120, 80, 60)\n datastoreDAO.makeWeightEntry(datetime.datetime.today(), 172, 'lbm', 14.2)\n\n w = measurements.Weight(172.7, 'lbm')\n\n try:\n w1=measurements.Weight(172.7, 'g')\n except AttributeError as ae:\n print (ae.message)\n \n try:\n print(w.convert(1.0, 'kg', 'lbm'))\n print(w.convert(1.0, 'lbm', 'kg'))\n print(w.convert(1.0, 'g', 'kg'))\n except KeyError as ke:\n print(ke.message)\n \n print(json.dumps(w.toEntity()))\n\n b = measurements.BodyFat(15.5)\n print(json.dumps(b.toEntity()))\n\n (lbm, bmr) = measurements.calcLBM(w,b)\n print (lbm, bmr)\n \n","sub_path":"measuresTest.py","file_name":"measuresTest.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"68685299","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('pollsnew', '0008_auto_20160124_1500'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='choice',\n name='addedBy',\n field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='category',\n name='date_created',\n field=models.DateTimeField(default=datetime.datetime(2016, 1, 24, 10, 7, 44, 275000, tzinfo=utc), verbose_name=b'date created'),\n ),\n migrations.AlterField(\n model_name='question',\n name='pub_date',\n field=models.DateTimeField(default=datetime.datetime(2016, 1, 24, 10, 7, 44, 275000, tzinfo=utc), verbose_name=b'date published'),\n ),\n ]\n","sub_path":"mysite-project/pollsnew/migrations/0009_auto_20160124_1537.py","file_name":"0009_auto_20160124_1537.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"568932735","text":"\"\"\"\r\nplot confusion_matrix of PublicTest and PrivateTest\r\n\"\"\"\r\nimport itertools\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport os\r\nimport argparse\r\n#from utils import dataloader\r\nfrom utils.dataloader import DataLoader\r\nfrom torch.autograd import Variable\r\nimport torchvision\r\nimport transforms as transforms\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom models import *\r\nfrom models.resnet_cut import *\r\nfrom models.peleenet import *\r\n\r\nparser = argparse.ArgumentParser(description='plot_rgb_confusion_matrix')\r\nparser.add_argument('--model_path', help='input model path', type=str)\r\nparser.add_argument('--input_shape', help='data type', default=128, type=int)\r\nparser.add_argument('--split', type=str, default='PrivateTest', help='split')\r\nopt = parser.parse_args()\r\n\r\ninput_shape = opt.input_shape\r\ntransform_test = transforms.Compose([\r\n transforms.CenterCrop(input_shape),\r\n transforms.ToTensor(),\r\n #normalize,\r\n])\r\ndef plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title, fontsize=16)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n\r\n plt.ylabel('True label', fontsize=18)\r\n plt.xlabel('Predicted label', fontsize=18)\r\n plt.tight_layout()\r\n\r\nclass_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']\r\n\r\n# Model\r\ndevice_ids=[0]\r\n#net = ResNet18_cut()\r\nnet = Peleenet()\r\nnet = torch.nn.DataParallel(net, device_ids=device_ids)\r\ncheckpoint = torch.load(opt.model_path)\r\nnet.load_state_dict(checkpoint['net'])\r\nnet.cuda()\r\nnet.eval()\r\nPrivateTestset = DataLoader(split = opt.split, transform=transform_test)\r\nTestloader = torch.utils.data.DataLoader(PrivateTestset, batch_size=16, shuffle=False)\r\ncorrect = 0\r\ntotal = 0\r\nall_target = []\r\nfor batch_idx, (inputs, targets) in enumerate(Testloader):\r\n use_cuda = torch.cuda.is_available()\r\n if use_cuda:\r\n inputs, targets = inputs.cuda(device=device_ids[0]), targets.cuda(device=device_ids[0])\r\n inputs, targets = Variable(inputs), Variable(targets)\r\n outputs = net(inputs)\r\n score = F.softmax(outputs)\r\n #print(score)\r\n _, predicted = torch.max(outputs, 1)\r\n total += targets.size(0)\r\n correct += predicted.eq(targets.data).cpu().sum()\r\n # inputs, targets = Variable(inputs), Variable(targets)\r\n # outputs = net(inputs)\r\n # _, predicted = torch.max(outputs.data, 1)\r\n\r\n # total += targets.size(0)\r\n # correct += predicted.eq(targets.data).cpu().sum()\r\n if batch_idx == 0:\r\n all_predicted = predicted\r\n all_targets = targets\r\n else:\r\n all_predicted = torch.cat((all_predicted, predicted),0)\r\n all_targets = torch.cat((all_targets, targets),0)\r\n\r\nacc = 100. * float(correct) / total\r\nprint(\"accuracy: %0.3f\" % acc)\r\n\r\n# Compute confusion matrix\r\nmatrix = confusion_matrix(all_targets.data.cpu().numpy(), all_predicted.cpu().numpy())\r\nnp.set_printoptions(precision=2)\r\n\r\n# Plot normalized confusion matrix\r\nplt.figure(figsize=(10, 8))\r\nplot_confusion_matrix(matrix, classes=class_names, normalize=True,\r\n title= opt.split+' Confusion Matrix (Accuracy: %0.3f%%)' %acc)\r\nplt.savefig(os.path.join('./output_results/', opt.split + '_cm.png'))\r\nplt.close()","sub_path":"plot_rgb_confusion_matrix.py","file_name":"plot_rgb_confusion_matrix.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"93690215","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nHow to call this function:\n\nIn the header of your program, type:\n\nfrom speccounts import *\n\nusing the code, type into your program:\n\nspecin_countsout(x_array, y_array)\n\nNOTE: the inputs given to w_f_in do not literally have to be 'wavez' and 'fluxz'.\nThey are just place holder variables for the code below.\n\nMachinery of this code is similar to spectrophot_v2, except without the prompt asking you for a supernova.\nNeed the all the filter curve .txt in the same directory to run successfully\n\nThe variables within the function only exist within the defined function. They can not be called outside of the\nfunction. What this function does is the desired spectrophotometry and spits out the calculated magnitudes\nin order w2,m2,w1,u,b,v. So I just recommend running the function with inputs and copy and paste the result into\na new array if you plan to do anything else with the magnitudes.\n\n'''\n\n\n\n#Vega for reference#\n\nvega_wave,vega_flux = np.loadtxt('../spectra/vega.dat',dtype=float,usecols=(0,1),unpack=True)\n\n# input vega_wave and vega_flux into w_f_in to test #\n\n#####################\n\ndef specin_countsout(wavez,fluxz):\n\n h = 6.6260755e-27\n c = 2.99792458e18\n hc = h*c\n\n\n files = ['filters/UVW2_2010.txt','filters/UVM2_2010.txt','filters/UVW1_2010.txt','filters/U_UVOT.txt','filters/B_UVOT.txt', 'filters/V_UVOT.txt']\n\n filter_WL = []\n filter_A = []\n\n for item in files:\n #Necessary to have \"../\" when running in /python/ directory\n f = open(\"../\" + item,'r')\n\n#\tprint(item)\n\n filter_lambda = []\n filter_area = []\n for line in f:\n \tline = line.rstrip()\n column = line.split()\n#\t\tprint(column)\n wavelen = column[0]\n area = column[1]\n filter_lambda.append(float(wavelen))\n filter_area.append(float(area))\n\n filter_lambda = np.asarray(filter_lambda,dtype=float)\n filter_area = np.asarray(filter_area,dtype=float)\n\n nonzero = np.where(filter_area > 0.0)\n\n filter_lambda = filter_lambda[nonzero]\n filter_area = filter_area[nonzero]\n\n filter_WL.append(filter_lambda)\n filter_A.append(filter_area)\n\n f.close()\n\n\n\n ##########################################\n\n\n filtercurves = ['UVW2_2010','UVM2_2010','UVW1_2010','U_UVOT','B_UVOT','V_UVOT'] ### STRING LIST\n\n zeropoints = [17.38, 16.85, 17.44, 18.34, 19.11, 17.89] ### PHOTOMETRIC ZEROPOINTS BASED ON VEGA\n\n\n filtereffwavelength=[2030,2231,2634,3501,4329,5402] ### EFFECTIVE VEGA WAVELENGTH FOR EACH FILTER (IN SAME ORDER)\n\n mag_array = np.zeros(len(filtercurves))\n\n counts_array = np.zeros(len(filtercurves))\n\n\n filter_array = np.array([filter_A[0],filter_A[1],filter_A[2],filter_A[3],filter_A[4],filter_A[5]])\n\n filter_wave = np.array([filter_WL[0],filter_WL[1],filter_WL[2],filter_WL[3],filter_WL[4],filter_WL[5]])\n\n\n\n for x in range(len(filtercurves)):\n\n sp_ea = np.interp(wavez,filter_wave[x],filter_array[x]) ### spectrum effective area\n\n counts_array[x] = np.trapz(sp_ea*fluxz*wavez/hc,wavez) ### Integrating under the curve using numpy\n\n mag_array[x] = -2.5*np.log10(counts_array[x])+zeropoints[x] ### Calculated magnitudes\n\n return counts_array, mag_array\n\n\n\n'''\nNOTE on mag_array: mag_array has 6 components, one for each filter used. This means that the first\ncomponent is the calculated w2 magnitude, the second component is the m2 calculated magnitude, all\nthe way to v band calculated magnitude. The order of the magnitude reflects the order of filtercurves.\n'''\n\n","sub_path":"python/speccounts.py","file_name":"speccounts.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429658139","text":"#!/usr/bin/python3\n\nfrom time import sleep\nimport sys, os\nfrom ev3dev.ev3 import *\n\n#connect motors\nrightMotor = LargeMotor(OUTPUT_A)\nassert rightMotor.connected\nleftMotor = LargeMotor(OUTPUT_D)\nassert leftMotor.connected\n\nprint(\"Motors connected\")\n\n#connect gyro\ngs = GyroSensor()\nassert gs.connected\nprint(\"Gyro connected\")\ngs.mode = 'GYRO-RATE'\t# Changing the mode resets the gyro\ngs.mode = 'GYRO-ANG'\n\n#connect servo\nservo = Motor(OUTPUT_C)\nassert servo.connected\nservo.reset()\nservo.stop()\n\nprint(\"Servo connected\")\n\n#connect ultrasonic\nus = UltrasonicSensor()\nassert us.connected\n\nprint(\"Ultrasonic Connected\")\n\n#all connected\nSound.speak('Get Ready... Go!').wait()\nprint(\"Everything connected\")\n\n#DEFINE GLOBAL VARIABLES\n\n\n#FUNCTION DECLARATIONS\n\ndef stop():\n # Brake the motors of the robot.\n leftMotor.stop(stop_action='brake')\n rightMotor.stop(stop_action='brake')\n\ndef drive_square():\n #drive only a certain time\n rightMotor.run_timed(time_sp=3000, speed_sp=100)\n leftMotor.run_timed(time_sp=3000, speed_sp=100)\n print(\"moving forward 1 square\")\n\n#def turn(clockwise):\n\ndef scan(destination):\n servo.run_to_abs_pos(position_sp=destination, speed_sp=75, ramp_down_sp=90)\n #print(\"destination angle \", destination)\n\n'''\n the main loop of this program will\n move forward a certain distance\n scan in 3 directions\n store each direction results\n print out scan results\n'''\n\ndef print_array(input):\n detection_distance = 60\n\n output_string = \"\"\n\n #left\n if input[0] <= detection_distance:\n output_string += \"left clear\"\n else:\n output_string += \"left blocked\"\n output_string += str(input[0])\n\n #center\n if input[1] <= detection_distance:\n output_string += \" center clear\"\n else:\n output_string += \" center blocked\"\n output_string += str(input[1])\n\n #right\n if input[2] <= detection_distance:\n output_string += \" right clear\"\n else:\n output_string += \" right blocked\"\n output_string += str(input[2])\n\n print(output_string)\n\n\n\ndef main():\n # Left Center Right Array\n LCR = [0,0,0]\n\n while True:\n drive_square()\n # It will return to the main area while the robot moves\n scan(0)\n sleep(5)\n #set array front value\n LCR[1] = us.value()\n\n\n scan(90)\n sleep(5)\n # set array left value\n LCR[0] = us.value()\n\n scan(-90)\n sleep(5)\n # set array front value\n LCR[2] = us.value()\n\n print_array(LCR)\n #reset array\n LCR = [0,0,0]\n\n print(\"It has slept for 5 seconds\")\n\n\n\nmain()\n","sub_path":"Lyall-s_Files/basic_maze_nav.py","file_name":"basic_maze_nav.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"218411425","text":"from threading import Thread\nfrom queue import Queue\nimport re\nimport json\nimport lxml.html as lxhtml\nimport time\nfrom components import Attr, Template, Getter\n\n\nclass ParseWorker(Thread):\n '''\n A generic parser that executes the functions specified in the\n self.css variable. For use without parent Thread supply keyword\n arguments:\n name = str,\n domain = str,\n next_q = queue.Queue(),\n store_q = queue.Queue(),\n\n The ParseWorker expects the following tuple to be present in the queue:\n (url_meta[dict], html[str], url[str])\n '''\n def __init__(self, parent=None, objects=dict, raw_html=dict,\n next_q=Queue(), **kwargs):\n super(ParseWorker, self).__init__()\n if parent or kwargs and next_q:\n self.parent = parent\n self.name = parent.name\n self.domain = parent.domain\n # self.templates = templates\n self.raw_html = raw_html\n self.get_q = Queue()\n self.next_q = parent.get_q\n self.output_q = parent.output_q\n self.seen = set()\n self.forward = set()\n self.average = []\n self.parsed = 0\n\n else:\n raise Exception('Not enough specified, please read the docstring')\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def run(self):\n while True:\n item = self.get_q.get()\n if item is None:\n break\n\n getter = item\n self.seen.add(getter.url)\n\n html = lxhtml.fromstring(getter.got)\n html.make_links_absolute(self.domain)\n\n start = time.time()\n for template in self.templates:\n to_store = template.store\n selected = self._get_selected(html, template)\n\n if template.store:\n to_store.objects = self.make_objects(template,\n selected, getter)\n\n if not to_store.objects and template.required:\n print('nothing found')\n self._handle_empty()\n self.output_q.put(to_store)\n else:\n self.make_objects(template, selected, getter)\n took = time.time() - start\n self.average.append(took)\n self.get_q.task_done()\n\n def _get_selected(self, html, template):\n if not template.js_regex:\n selected = template.selector(html) if template.selector else [html]\n else:\n regex = re.compile(template.js_regex)\n selected = []\n # Find all the scripts that match the regex.\n scripts = (regex.findall(s.text_content())[0] for s in\n html.cssselect('script')\n if regex.search(s.text_content()))\n\n # Set selected to the scripts\n for script in scripts:\n selected.extend(json.loads(script))\n return selected\n\n def make_objects(self, template, selected, getter):\n objects = []\n # print('aantal links', len(selected))\n for sel in selected:\n objct = Template(name=template.name)\n objct.url = getter.url\n\n # Set predefined attributes from the getter.\n #print('aantal attrs', len(getter.attrs))\n for attr in getter.attrs:\n objct.attrs.append(attr.duplicate())\n\n # Set the attribute values\n for temp_attr in template.attrs:\n parsed = temp_attr.func(sel, temp_attr.selector,\n **temp_attr.kws)\n attr = Attr(name=temp_attr.name, value=parsed)\n objct.attrs.append(attr)\n\n # Create a request from the attribute if desirable\n if temp_attr.getter and parsed:\n if type(parsed) != list:\n parsed = [parsed]\n\n for value in parsed:\n new_getter = Getter(**temp_attr.getter)\n new_getter.url = value\n self._handle_getter(new_getter)\n\n if template.getter:\n self._handle_object_getter(objct)\n objects.append(objct)\n return objects\n\n def _handle_object_getter(self, objct):\n getter = objct.getter\n url_params = {attr.name: attr.value for attr in objct.attrs}\n\n if getter.method == 'post':\n getter.data = url_params\n else:\n getter.params = url_params\n self._handle_getter(objct.getter, url_params)\n\n def _handle_getter(self, getter):\n if getter.url and getter.url not in self.seen:\n if getter.active:\n self.next_q.put(getter)\n else:\n self.forward.add(getter)\n\n self.seen.add(getter.url)\n\n def _handle_empty(self):\n '''\n Gracefull shutdown if no more objects are found.\n with self.next_q.mutex:\n print('clearing')\n self.next_q.queue.clear()\n self.get_q.queue.clear()\n\n for _ in self.parent.get_workers:\n self.next_q.put(None)\n self.get_q.put(None)\n '''\n\n while not self.next_q.empty():\n try:\n self.next_q.get(False)\n except Empty:\n continue\n self.next_q.task_done()\n","sub_path":"modelscraper/workers/parse_worker.py","file_name":"parse_worker.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"457040424","text":"class LightState(object):\n on = False\n brightness = 0\n hue = 0\n saturation = 0\n xy = []\n ct = 0\n alert = 'none'\n effect = 'none'\n colormode = 'hs' # hs,xy,ct\n reachable = True # currently always true\n\n def __init__(self, data):\n if data == None: return\n self.bulkset(data)\n\n def set(self, stateKey, stateValue):\n if stateKey in ['bri', 'hue', 'sat', 'ct']:\n stateValue = int(stateValue)\n\n setattr(self, stateKey, stateValue)\n\n def bulkset(self, data):\n if 'on' in data:\n self.on = data['on']\n\n if 'bri' in data:\n self.brightness = int(data['bri'])\n\n if 'hue' in data:\n self.hue = int(data['hue'])\n\n if 'sat' in data:\n self.saturation = int(data['sat'])\n\n if 'xy' in data:\n self.xy = data['xy']\n\n if 'ct' in data:\n self.ct = int(data['ct'])\n\n if 'alert' in data:\n self.alert = data['alert']\n\n if 'effect' in data:\n self.effect = data['effect']\n\n if 'colormode' in data:\n self.colormode = data['colormode']\n\n if 'reachable' in data:\n self.reachable = data['reachable']","sub_path":"pyhueapi/light_state.py","file_name":"light_state.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534013172","text":"from django.contrib import admin\r\n\r\nfrom titles.models import Title\r\n\r\n\r\nclass TitleAdmin(admin.ModelAdmin):\r\n list_display = (\"id\", \"name\", 'rating')\r\n search_fields = (\"text\",)\r\n empty_value_display = '-пусто-'\r\n\r\n\r\nadmin.site.register(Title, TitleAdmin)\r\n","sub_path":"titles/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"301166716","text":"\"\"\"\nnpy2categories_utils.py: link the obtained categories to the already existing npy_sentece files\n\nnpy2sentences_utils.py path_to_npy_file path_to_sentence_file path_to_target_folder\n\npath_to_npy_file: set path to the .npy file containing all the train, val or test data\npath_to_sentence_file: set path to transformed (cleaned, processed) .txt-file containing all sentences\ne.g. how2sign.train.id_transformed.txt\n - e.g. a line in the file: ad4_GWc5XRo_10 one two three\npath_to_target_folder where the new file should be saved to\n\nDIFFERENCE to npy2sentences:\n vid_speaker = kp[:11]\n\n Just compare the first 11 characters instead of adding the part to it\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport sys\nfrom pathlib import Path\n\n\nclass CategoriesToNpy:\n\n def __init__(self, path_to_numpy_file, path_to_csv, path_to_target):\n self.path_to_numpy_file = Path(path_to_numpy_file)\n self.path_to_csv = Path(path_to_csv)\n self.path_to_target = Path(path_to_target)\n old = np.load\n np.load = lambda *a, **k: old(*a, **k, allow_pickle=True)\n\n def main(self):\n self.categories2sentence()\n\n def categories2sentence(self):\n \"\"\" load from .npy file \"\"\"\n kp_files = np.load(self.path_to_numpy_file).item()\n df_kp = pd.DataFrame(kp_files.keys(), columns=[\"keypoints\"])\n kp2sentence = []\n\n d = {'keypoints': [], 'text': []}\n with open(self.path_to_csv) as f:\n for line in f:\n d['keypoints'].append(line.split(\" \")[0])\n d['text'].append(\" \".join(line.split()[1:]))\n df_text = pd.DataFrame(d)\n\n speaker = []\n counter = 0\n for kp in df_kp[\"keypoints\"]:\n vid_speaker = kp[:11]\n speaker.append(vid_speaker)\n for idx in range(len(df_text['keypoints'])):\n if vid_speaker in df_text['keypoints'][idx]:\n kp2sentence.append([kp, df_text['text'][idx]])\n break\n\n if counter % 250 == 0:\n print(\"Folder %d of %d\" % (counter, len(df_kp[\"keypoints\"])))\n counter += 1\n df_kp_text_train = pd.DataFrame(kp2sentence, columns=[\"keypoints\", \"text\"])\n df_kp_text_train.to_csv(self.path_to_target / str(str(self.path_to_csv.name) + \"_2npy.txt\"), index=False)\n\n\nif __name__ == '__main__':\n # file with sentences\n if len(sys.argv) > 1:\n path_to_numpy_file = sys.argv[1]\n else:\n print(\"Set path to npy file\")\n sys.exit()\n\n # sentences file\n if len(sys.argv) > 2:\n path_to_csv = sys.argv[2]\n else:\n print(\"Set path to transformed file containing categories\")\n sys.exit()\n\n # target folder\n if len(sys.argv) > 3:\n path_to_target = sys.argv[3]\n else:\n print(\"Set path to target folder\")\n sys.exit()\n\n npy = CategoriesToNpy(path_to_numpy_file, path_to_csv, path_to_target)\n npy.main()\n","sub_path":"utils/npy2categories_utils.py","file_name":"npy2categories_utils.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"281319463","text":"import sys\nsys.path.insert(0, '/tensorflowvgg')\nimport os\nimport pickle\nfrom os.path import isfile, isdir\nimport numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.model_selection import train_test_split, cross_val_score, ShuffleSplit, cross_val_predict, RepeatedKFold\nfrom sklearn.utils import shuffle\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.manifold import TSNE\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score, confusion_matrix, auc, roc_curve, mean_absolute_error\nimport argparse\nimport tensorflowvgg.vgg19 as vgg19\nimport utility_functions\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport scipy\nimport json\n\ncodes_path = './codes'\nlabels_path = './labels'\nnames_path = './names'\nradio_input_classify, radio_input_confidence = utility_functions.loadRadiologistData(\"../RadiologistData/radiologistInput.csv\", 1, 0)\n\n\nimages_normal_train, labels_normal_train, names_normal_train = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/NormalTrain\",), (0,))\nimages_normal_test, labels_normal_test, names_normal_test = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/NormalTest\",), (0,))\nimages_abnormal_train, labels_abnormal_train, names_abnormal_train = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/AbnormalTrain\",), (1,))\nimages_abnormal_test, labels_abnormal_test, names_abnormal_test = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/AbnormalTest\",), (1,))\nimages_contralateral_test, labels_contralateral_test, names_contralateral_test = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/ContralateralTest\",), (0,))\nnames_all = np.append(np.append(np.append(names_normal_train, names_normal_test, axis=0), names_abnormal_train, axis=0), names_abnormal_test, axis=0)\nlabels_all = np.append(np.append(np.append(labels_normal_train, labels_normal_test, axis=0), labels_abnormal_train, axis=0), labels_abnormal_test, axis=0)\n\nsess = tf.Session()\nprint(\"Session start\")\n\nvgg = vgg19.Vgg19()\ninput_ = tf.placeholder(tf.float32, [None, 224, 224, 3])\nwith tf.name_scope(\"content_vgg\"):\n vgg.build(input_)\n# Get the values from the relu6 layer of the VGG network\nfeed_dict_normal_train = {input_: images_normal_train}\nfeed_dict_normal_test = {input_: images_normal_test}\nfeed_dict_cancer_train = {input_: images_abnormal_train}\nfeed_dict_cancer_test = {input_: images_abnormal_test}\nfeed_dict_contralateral = {input_: images_contralateral_test}\n\ncodes_normal_train = sess.run(vgg.relu6, feed_dict=feed_dict_normal_train)\ncodes_normal_test = sess.run(vgg.relu6, feed_dict=feed_dict_normal_test)\ncodes_cancer_train = sess.run(vgg.relu6, feed_dict=feed_dict_cancer_train)\ncodes_cancer_test = sess.run(vgg.relu6, feed_dict=feed_dict_cancer_test)\ncodes_contralateral = sess.run(vgg.relu6, feed_dict=feed_dict_contralateral)\nsess.close()\n\n\"\"\" next block is for TSNE plot \"\"\"\n\ncodes_all = np.append(np.append(np.append(codes_normal_train, codes_normal_test, axis=0), codes_cancer_train, axis=0), codes_cancer_test, axis=0)\n#codes_all = PCA(n_components=50).fit_transform(codes_all)\ntsne_embedding = TSNE(n_components=2, perplexity=5).fit_transform(codes_all)\njson_dict = {}\ni=0\nfor name in names_all:\n json_dict[name] = {}\n json_dict[name][\"position\"] = tsne_embedding[i].tolist()\n json_dict[name][\"label\"] = str(labels_all[i])\n i = i + 1\n\"\"\"\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.scatter(tsne_embedding[0:len(codes_normal_train)+len(codes_normal_test),0], tsne_embedding[0:len(codes_normal_train)+len(codes_normal_test),1], edgecolors='none', c=\"blue\", label=\"normal\")\nax.scatter(tsne_embedding[len(codes_normal_train)+len(codes_normal_test):,0], tsne_embedding[len(codes_normal_train)+len(codes_normal_test):,1], edgecolors='none', c=\"red\", label=\"cancer\")\nplt.legend(loc='lower right', fontsize='x-large')\nplt.title(\"t-sne embedding\")\nplt.xlim([min(tsne_embedding[:,0]-1), max(tsne_embedding[:,0]+1)])\nplt.ylim([min(tsne_embedding[:,1]-1), max(tsne_embedding[:,1]+1)])\nplt.show()\n\"\"\"\n\nclf = LinearSVC(C=0.0001)\n\nX_train = np.append(codes_normal_train, codes_cancer_train, axis=0)\nX_test = np.append(codes_normal_test, codes_cancer_test, axis=0)\n\ny_train = np.append(labels_normal_train, labels_abnormal_train, axis=0)\ny_test = np.append(labels_normal_test, labels_abnormal_test, axis=0)\n\nnames_train = np.append(names_normal_train, names_abnormal_train, axis=0)\nnames_test = np.append(names_normal_test, names_abnormal_test, axis=0)\n\"\"\"\nC_values = [1000000, 500000, 100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001, 0.000005, 0.000001]\nC_value_scores = []\nfor spot in range(len(C_values)):\n clf = LinearSVC(C=C_values[spot])\n kFolds = 5\n iterations = 50\n random_state = 4597834\n i = 0\n averageScore = 0\n rollingAverage = 0\n rkf = RepeatedKFold(n_splits=kFolds, n_repeats=iterations, random_state=random_state)\n\n for train_index, test_index in rkf.split(X_train):\n X_train_CV, X_test_CV = X_train[train_index], X_train[test_index]\n y_train_CV, y_test_CV = y_train[train_index], y_train[test_index]\n clf.fit(X_train_CV, y_train_CV)\n score = clf.score(X_test_CV, y_test_CV)\n averageScore = averageScore + score\n rollingAverage = rollingAverage + score\n i = i + 1\n if i % kFolds == 0:\n print(\"Average for \" + str(kFolds) + \"-split \" + str(i / kFolds) + \": \" + str (rollingAverage / kFolds))\n rollingAverage = 0\n\n averageScore = averageScore / i\n\n print(\"Average score: \" + str(averageScore))\n C_value_scores.append(averageScore)\nprint(C_value_scores)\n\"\"\"\nclf.fit(X_train, y_train)\nscore = clf.score(X_test, y_test)\nfpr, tpr, thresholds = roc_curve(y_test, clf.decision_function(X_test))\nroc_auc = auc(fpr, tpr)\nprint(\"AUC for model: \" + str(roc_auc))\nprint(\"Stdev for AUC: \" + str())\nprint(\"Final accuracy for model: \" + str(score))\n#print(\"Overall score: \" + str(clf.score(np.append(X_train, X_test, axis=0), np.append(y_train, y_test, axis=0))))\n\n\nmodel_confidence = {}\nmodel_classification = {}\nmodel_classification_contralateral = {}\nmodel_confidence_contralateral = {}\n\nconfidence_values = clf.decision_function(X_test)\nscaler = MinMaxScaler(feature_range=(-1, 1))\nconfidence_values = scaler.fit_transform(np.array(confidence_values).reshape(-1, 1)).reshape(-1)\ni = 0\nfor item in confidence_values:\n model_confidence[names_test[i]] = abs(item)\n i = i + 1\n\npredictions = clf.predict(X_test)\ni = 0\nfor item in predictions:\n model_classification[names_test[i]] = item\n i = i + 1\n\npredictions_contralateral = clf.predict(codes_contralateral)\ni = 0\nfor item in predictions_contralateral:\n model_classification_contralateral[names_contralateral_test[i]] = item\n i = i + 1\n\nconfidence_values_contralateral = clf.decision_function(codes_contralateral)\ni = 0\nfor item in confidence_values_contralateral:\n model_confidence_contralateral[names_contralateral_test[i]] = abs(item)\n i = i + 1\n\nmodel_confidence_all = {}\nmodel_classification_all = {}\n\nconfidence_values_all = clf.decision_function(np.append(X_train, X_test, axis=0))\ni = 0\nfor item in confidence_values_all:\n model_confidence_all[names_all[i]] = abs(item)\n i = i + 1\n\npredictions_all = clf.predict(np.append(X_train, X_test, axis=0))\ni = 0\nfor item in predictions_all:\n model_classification_all[names_all[i]] = item\n i = i + 1\n\nfor name in names_all:\n if name in model_confidence_all.keys():\n json_dict[name][\"model_confidence\"] = str(model_confidence_all[name])\n if name in model_classification_all.keys():\n json_dict[name][\"model_classification\"] = str(model_classification_all[name])\n if name in radio_input_classify.keys():\n json_dict[name][\"radiologist_classification\"] = str(radio_input_classify[name])\n else:\n json_dict[name][\"radiologist_classification\"] = \"N/A\"\n if name in radio_input_confidence.keys():\n json_dict[name][\"radiologist_confidence\"] = str(radio_input_confidence[name])\n else:\n json_dict[name][\"radiologist_confidence\"] = \"N/A\"\n i = i + 1\nwith open('js/VisualizationInformation.txt', 'w') as json_file:\n json.dump(json_dict, json_file)\n\n#utility_functions.printListInOrder(y_test)\n#print(\"break\")\n#utility_functions.printDictionaryInOrder(names_test, radio_input_classify)\n#print(\"break\")\n#utility_functions.printDictionaryInOrder(names_test, radio_input_confidence)\n\n\nradio_confidence = []\nfor name in names_test:\n radio_confidence.append(radio_input_classify[name])\nfpr, tpr, thresholds = roc_curve(y_test, radio_confidence)\nroc_auc = auc(fpr, tpr)\nprint(\"AUC for radiologists: \" + str(roc_auc))\n\nconfidence_values_model = []\nconfidence_values_radiologist = []\nfor i in range(len(names_test)):\n if names_test[i] in radio_input_confidence.keys():\n if model_classification[names_test[i]] == 1:\n confidence_values_model.append(-model_confidence[names_test[i]])\n else: \n confidence_values_model.append(model_confidence[names_test[i]])\n if radio_input_confidence[names_test[i]] == 1:\n confidence_values_radiologist.append(-radio_input_confidence[names_test[i]])\n else:\n confidence_values_radiologist.append(radio_input_confidence[names_test[i]])\nscaler = MinMaxScaler(feature_range=(0, 1))\nconfidence_values_model = scaler.fit_transform(np.array(confidence_values_model).reshape(-1, 1)).reshape(-1)\nr, p = scipy.stats.pearsonr(confidence_values_model, confidence_values_radiologist)\n#print(\"Pearson r: \" + str(r) + \", p-value: \" + str(p))\n\nplt.plot(fpr, tpr, 'darkorange',\n label='AUC = %0.2f'% roc_auc)\nplt.legend(loc='lower right', fontsize='x-large')\nplt.title(\"ROC Curve - Linear SVM\")\nplt.plot([0, 1], [0, 1], color='#67809f', linestyle='--')\nplt.xlim([-0.1, 1.0])\nplt.ylim([-0.1, 1.0])\nplt.ylabel('True Positive Rate', fontsize=14)\nplt.xlabel('False Positive Rate', fontsize=14)\nplt.show()\n\n\n\"\"\"\n\nThe following code is to add a voting system in hopes to increase accuracy\n\n\"\"\"\nfor i in range(len(names_test)):\n name = names_test[i]\n radio_score = radio_input_confidence[name]\n model_score = model_confidence[name]\n if radio_score > model_score:\n predictions[i] = radio_input_classify[name]\n confidence_values[i] = radio_score\n else:\n predictions[i] = model_classification[name]\n confidence_values[i] = model_score\n if predictions[i] == 0:\n confidence_values[i] = -confidence_values[i]\n\nnumCorrect = 0\nfor i in range(len(names_test)):\n if predictions[i] == y_test[i]:\n numCorrect = numCorrect + 1\nnewAccuracy = float(numCorrect) / len(names_test)\nprint(\"Voting system accuracy: \" + str(newAccuracy))\nfpr, tpr, thresholds = roc_curve(y_test, confidence_values)\nroc_auc = auc(fpr, tpr)\nprint(\"Voting system AUC: \"+str(roc_auc))\nplt.plot(fpr, tpr, 'darkorange',\n label='AUC = %0.2f'% roc_auc)\nplt.legend(loc='lower right', fontsize='x-large')\nplt.title(\"ROC Curve - Voting System\")\nplt.plot([0, 1], [0, 1], color='#67809f', linestyle='--')\nplt.xlim([-0.1, 1.0])\nplt.ylim([-0.1, 1.0])\nplt.ylabel('True Positive Rate', fontsize=14)\nplt.xlabel('False Positive Rate', fontsize=14)\nplt.show()\n\n\nutility_functions.printListInOrder(predictions)\nprint(\"break\")\nutility_functions.printDictionaryInOrder(names_test, model_confidence)\nprint(\"break\")\nutility_functions.printDictionaryInOrder(names_test, model_classification)\n","sub_path":"Solution3/Code/CherryPickedClassifier.py","file_name":"CherryPickedClassifier.py","file_ext":"py","file_size_in_byte":12150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"408542523","text":"import unittest\nfrom sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Table\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_testing import TestCase\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom flask import Flask\nfrom openpatch_core.database.elastic_query import ElasticQuery\nfrom openpatch_core.models import Base\nfrom openpatch_core.database import db\nfrom sqlalchemy.dialects import mysql\n\n\nclass City(Base):\n __tablename__ = \"city\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n users = relationship(\"User\", back_populates=\"city\")\n\n def __repr__(self):\n return str(self.id)\n\n\nclass UserColor(Base):\n __tablename__ = \"user_color\"\n\n user_id = Column(Integer, ForeignKey(\"users.id\"), primary_key=True)\n color_id = Column(Integer, ForeignKey(\"colors.id\"), primary_key=True)\n\n user = relationship(\"User\", back_populates=\"user_colors\")\n color = relationship(\"Color\", back_populates=\"user_colors\")\n\n\nclass User(Base):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n lastname = Column(String)\n uid = Column(Integer)\n city_id = Column(Integer, ForeignKey(City.id))\n city = relationship(City, back_populates=\"users\")\n\n user_colors = relationship(\"UserColor\", back_populates=\"user\")\n\n def __repr__(self):\n return str(self.id)\n\n\nassociation_art_color = Table(\n \"art_color\",\n Base.metadata,\n db.Column(\"art_id\", Integer, ForeignKey(\"arts.id\")),\n db.Column(\"color_id\", Integer, ForeignKey(\"colors.id\")),\n)\n\n\nclass Color(Base):\n __tablename__ = \"colors\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n user_colors = relationship(\"UserColor\", back_populates=\"color\")\n arts = relationship(\"Art\", secondary=association_art_color, back_populates=\"colors\")\n\n\nclass Art(Base):\n __tablename__ = \"arts\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n colors = relationship(\n \"Color\", secondary=association_art_color, back_populates=\"arts\"\n )\n\n\nclass ElasticQueryTest(TestCase):\n def create_app(self):\n app = Flask(__name__)\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///:memory:\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.init_app(app)\n return app\n\n def setUp(self):\n db.create_all()\n db.session.add_all(\n [\n User(name=\"Jhon\", lastname=\"Galt\", uid=\"19571957\", city_id=1),\n User(name=\"Steve\", lastname=\"Jobs\", uid=\"20092009\", city_id=2),\n User(name=\"Iron\", lastname=\"Man\", uid=\"19571957\", city_id=1),\n City(name=\"Cordoba\"),\n City(name=\"New York\"),\n Color(name=\"red\"),\n Color(name=\"blue\"),\n UserColor(user_id=1, color_id=1),\n UserColor(user_id=1, color_id=2),\n UserColor(user_id=2, color_id=2),\n Art(name=\"Starry Night\"),\n Art(name=\"Mona Lisa\"),\n ]\n )\n db.session.commit()\n\n art1 = Art.query.get(1)\n art1.colors.append(Color.query.get(1))\n art2 = Art.query.get(2)\n art2.colors.append(Color.query.get(2))\n\n db.session.commit()\n\n def test_setup_is_ok(self):\n \"\"\" Demo test \"\"\"\n assert User.query.count() == 3\n\n def test_simple_query(self):\n \"\"\" test simple query \"\"\"\n query_string = '{\"filter\" : {\"uid\" : {\"like\" : \"%1957%\"} } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 2\n assert count == 2\n query_string = (\n '{\"filter\" : {\"name\" : {\"like\" : \"%Jho%\"}, \"lastname\" : \"Galt\" } }'\n )\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 1\n assert count == 1\n\n def test_limit_operator(self):\n query_string = '{\"limit\": 2}'\n query, count, page = User.elastic_query(query_string)\n query = query = page()\n assert query.count() == 2\n assert count == User.query.count()\n\n def test_offset_operator(self):\n query_string = '{\"offset\": 2}'\n query, count, page = User.elastic_query(query_string)\n query = page()\n assert query.count() == 1\n assert count == User.query.count()\n\n query_string = '{\"limit\": 2, \"offset\": 2}'\n query, count, page = User.elastic_query(query_string)\n query = page()\n assert query.count() == 1\n assert count == User.query.count()\n\n def test_and_operator(self):\n \"\"\" test and operator \"\"\"\n query_string = '{\"filter\" : {\"and\" : {\"name\" : {\"like\" : \"%Jho%\"}, \"lastname\" : \"Galt\", \"uid\" : {\"like\" : \"%1957%\"} } } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 1\n assert count == 1\n\n def test_or_operator(self):\n \"\"\" test or operator \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jobs\", \"lastname\" : \"Man\", \"uid\" : \"19571957\" } } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 2\n assert count == 2\n\n def test_or_and_operator(self):\n \"\"\" test or and operator \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jhon\", \"lastname\" : \"Galt\" }, \"and\" : { \"uid\" : \"19571957\" } } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 1\n assert count == 1\n\n def test_sorting(self):\n \"\"\" test operator levels \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jhon\", \"lastname\" : \"Man\" } }, \"sort\": { \"name\" : \"asc\" } }'\n results = User.elastic_query(query_string)[0].all()\n assert results[0].name == \"Iron\"\n\n def test_in_operator(self):\n \"\"\" test operator in \"\"\"\n query_string = '{\"filter\" : {\"name\" : {\"in\" : [\"Jhon\", \"Peter\", \"Iron\"] } } }'\n assert User.elastic_query(query_string)[0].count() == 2\n\n query_string = '{\"filter\" : {\"name\" : {\"in\" :[\"Jhon\", \"Peter\", \"Iron\"]}, \"lastname\" : \"Galt\" } }'\n assert User.elastic_query(query_string)[0].count() == 1\n\n def test_allow_fields_option(self):\n \"\"\" test allow_fields option \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jhon\", \"lastname\" : \"Man\" } }, \"sort\": { \"name\" : \"asc\" } }'\n enabled_fields = [\"name\"]\n results = User.elastic_query(query_string, enabled_fields=enabled_fields)[\n 0\n ].all()\n assert results[0].name == \"Jhon\"\n\n def test_search_for_levels(self):\n \"\"\" test search for levels \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"city.name\" : \"New York\", \"lastname\" : \"Man\" } }, \"sort\": { \"name\" : \"asc\" } }'\n results = User.elastic_query(query_string)[0].all()\n assert results[0].name == \"Iron\"\n\n query_string = '{\"filter\" : { \"city.name\" : \"New York\" } }'\n results = User.elastic_query(query_string)[0].all()\n assert results[0].name == \"Steve\"\n\n query_string = '{\"filter\" : { \"city.name\" : {\"like\" : \"%New%\"} } }'\n query = User.elastic_query(query_string)\n results = query[0].all()\n assert results[0].name == \"Steve\"\n\n def test_many_to_many_relationship(self):\n query_string = '{\"filter\" : { \"user_colors.color.name\" : \"red\" } }'\n query = User.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 1\n\n query_string = '{\"filter\" : { \"user_colors.color.name\" : \"blue\" } }'\n query = User.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 2\n\n query_string = '{\"filter\": { \"colors.name\": \"blue\" }}'\n query = Art.elastic_query(query_string)\n\n results = query[0].all()\n assert len(results) == 1\n\n def test_many_to_many_relationship_deep(self):\n query_string = (\n '{\"filter\" : { \"user_colors.color.arts.name\" : \"Starry Night\" } }'\n )\n query = User.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 1\n assert results[0].name == \"Jhon\"\n\n def test_one_to_many_relationship(self):\n query_string = '{\"filter\" : { \"users.name\" : \"Jhon\" } }'\n query = City.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 1\n\n\ndef print_query(query):\n print(\n query[0].statement.compile(\n dialect=mysql.dialect(), compile_kwargs={\"literal_binds\": True}\n )\n )\n","sub_path":"tests/database/test_elastic_query.py","file_name":"test_elastic_query.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"230265906","text":"import ffmpeg\nimport cv2\nimport subprocess\n\nppath = \"C:\\\\Users\\\\CrymeAriver\\\\PycharmProjects\\\\tomwaitforitmy_v_scr\\\\\"\nfile_name = 'qwe.flv'\n\nprobe = ffmpeg.probe(ppath+file_name)\nvideo_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')\nwidth = int(video_info['width']/20)\nheight = int(video_info['height']/8)\n\ncap = cv2.VideoCapture(ppath+file_name)\n\ndef cutf(path, name, startframe, fps, endframe, outname):\n command = [\"ffmpeg\", '-i', path+name, '-ss', str(startframe/fps), '-t', str((endframe-startframe)/fps),\n '-c:v', 'libx264',\n '-c:a', 'aac',\n path+outname]\n return command\n\nind = 0\nchanged = False\ncounter = 0\narr = []\ns_counter = 0\n\nwhile(cap.isOpened()):\n start_pos = 0\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n # hist = cv2.calcHist([gray], [0], mask, [256], [0, 256])\n # print(len(hist))\n video = gray[0:height]\n tt = []\n for i in video:\n tt.append(i[0:width])\n val = 0\n for i in tt:\n for o in i:\n val += o[2]\n x = val / (width * height)\n\n if x < 15 and not changed:\n print(x, ind)\n changed = True\n arr.append(ind)\n print(arr)\n if len(arr) == 2:\n a = cutf(ppath, file_name, arr[0], 30, arr[1], 'out' + str(ind) + '.flv')\n arr = []\n process = subprocess.Popen(a, stdout=subprocess.PIPE)\n output, error = process.communicate()\n arr = []\n\n if x > 72 and changed:\n print(x, ind)\n arr.append(ind)\n print(arr)\n changed = False\n\n ind += 1\n # cv2.imshow('frame', frame)\n # print(gray)\n # break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"lfs_cutter.py","file_name":"lfs_cutter.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"94508094","text":"from tkinter import *\nmaster = Tk()\nmaster.geometry(\"700x600\")\n#==================Label\nl_show= Label(master, text=\"三酷猫:\")\nphoto=PhotoImage(file=\"kwsupicon1.gif\")\nl_show1= Label(master,image=photo)\nl_show.pack(side=\"left\")\nl_show1.pack(side=\"left\")\n#===================Entry\ne_show=Entry(master,width=10)\ne_show.pack(side=\"left\")\n#===================Text\nt_show=Text(master,width=10,height=4)\nt_show.pack(side=\"bottom\")\n#===================Checkbutton\nvar = StringVar()\nc_show=Checkbutton(master,text=\"蓝猫\", variable=var,\n onvalue=\"RGB\", offvalue=\"L\",fg=\"blue\")\nc_show.pack(side=\"top\")\n#=====================Radiobutton\nv = IntVar()\nr_show=Radiobutton(master,text=\"One\",variable=v,value=1)\nr_show.pack(anchor=W)\n#=====================Frame\nf_show=Frame(master,height=200,width=200,bd=1,bg='white',relief=SUNKEN)\nf_show.pack(anchor=\"center\")\n#======================LabelFrame\nlf_show=LabelFrame(master, text=\"Group\",padx=5, pady=5)\nlf_show.pack(padx=10, pady=10,expand=\"yes\")\ne1=Entry(lf_show,width=10)\ne1.pack()\ne2=Entry(lf_show,width=10)\ne2.pack()\n#======================Listbox\nlb_show=Listbox(master,bg=\"yellow\",height=5,width=20)\nlb_show.pack(side=\"top\")\nfor item in [\"one\",\"two\",\"three\",\"four\"]:\n lb_show.insert(END, item)\n#=======================Scrollbar\ns_show=Scrollbar(master)\ns_show.pack(side=RIGHT, fill=Y)\nlb_show1=Listbox(master,fg=\"red\",height=5,width=20)\nlb_show1['yscrollcommand']=s_show.set\nlb_show1.pack(side=\"right\")\nfor item in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\"]:\n lb_show1.insert(END, item)\ns_show.config(command=lb_show.yview)\n#========================Scale\nsc_show= Scale(master,from_=0,to=100)\nsc_show.pack(side=\"right\")\n#========================Message及Button\ndef showMessage(event):\n m1=Message(master,text=\"非常好!\",width=60)\n m1.pack()\nb_show=Button(master,text=\"确认\",fg=\"black\")\nb_show.bind(\"\",showMessage) \nb_show.pack(side=\"left\")\n#========================Spinbox\nsb_show=Spinbox(master,from_=0,to=10)\nsb_show.pack(side=\"left\")\n#========================Toplevel\ntL_show=Toplevel(master)\ntL_show.wm_attributes(\"-topmost\",1)\ntL_show.title(\"OK!\")\nt1_show=Text(tL_show,width=10,height=4)\nt2_show=Text(tL_show,width=10,height=4)\nt1_show.pack()\nt2_show.pack()\n#========================PanedWindow\npw=PanedWindow(orient=VERTICAL,bg=\"green\")\npw.pack(fill=BOTH,expand=1)\nfor w in [Label,Button,Checkbutton,Radiobutton]:\n pw.add(w(pw,text = 'hello'))\nmainloop()\n\n","sub_path":"第11章/base_easy.py","file_name":"base_easy.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"312497899","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pypeerassets/pavoteproto_pb2.py\n# Compiled at: 2018-10-13 10:32:19\n# Size of source mod 2**32: 5042 bytes\nimport sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='0005-on-chain-voting-transaction-specification.proto',\n package='',\n syntax='proto3',\n serialized_pb=(_b('\\n40005-on-chain-voting-transaction-specification.proto\"Ý\\x01\\n\\x04Vote\\x12\\x0f\\n\\x07version\\x18\\x01 \\x01(\\r\\x12\\x13\\n\\x0bdescription\\x18\\x02 \\x01(\\t\\x12\\x13\\n\\x0bstart_block\\x18\\x03 \\x01(\\r\\x12\\x11\\n\\tend_block\\x18\\x04 \\x01(\\r\\x12\\x12\\n\\ncount_mode\\x18\\x05 \\x01(\\r\\x12\\x0f\\n\\x07choices\\x18\\x06 \\x03(\\t\\x12\\x15\\n\\rvote_metainfo\\x18\\x07 \\x01(\\x0c\"K\\n\\x04MODE\\x12\\x08\\n\\x04NONE\\x10\\x00\\x12\\n\\n\\x06SIMPLE\\x10\\x01\\x12\\x17\\n\\x13WEIGHT_CARD_BALANCE\\x10\\x03\\x12\\x14\\n\\x10WEIGHT_CARD_DAYS\\x10\\x07b\\x06proto3')))\n_VOTE_MODE = _descriptor.EnumDescriptor(name='MODE',\n full_name='Vote.MODE',\n filename=None,\n file=DESCRIPTOR,\n values=[\n _descriptor.EnumValueDescriptor(name='NONE',\n index=0,\n number=0,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='SIMPLE',\n index=1,\n number=1,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='WEIGHT_CARD_BALANCE',\n index=2,\n number=3,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='WEIGHT_CARD_DAYS',\n index=3,\n number=7,\n options=None,\n type=None)],\n containing_type=None,\n options=None,\n serialized_start=203,\n serialized_end=278)\n_sym_db.RegisterEnumDescriptor(_VOTE_MODE)\n_VOTE = _descriptor.Descriptor(name='Vote',\n full_name='Vote',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(name='version',\n full_name='Vote.version',\n index=0,\n number=1,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='description',\n full_name='Vote.description',\n index=1,\n number=2,\n type=9,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=(_b('').decode('utf-8')),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='start_block',\n full_name='Vote.start_block',\n index=2,\n number=3,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='end_block',\n full_name='Vote.end_block',\n index=3,\n number=4,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='count_mode',\n full_name='Vote.count_mode',\n index=4,\n number=5,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='choices',\n full_name='Vote.choices',\n index=5,\n number=6,\n type=9,\n cpp_type=9,\n label=3,\n has_default_value=False,\n default_value=[],\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='vote_metainfo',\n full_name='Vote.vote_metainfo',\n index=6,\n number=7,\n type=12,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=(_b('')),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None)],\n extensions=[],\n nested_types=[],\n enum_types=[\n _VOTE_MODE],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[],\n serialized_start=57,\n serialized_end=278)\n_VOTE_MODE.containing_type = _VOTE\nDESCRIPTOR.message_types_by_name['Vote'] = _VOTE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nVote = _reflection.GeneratedProtocolMessageType('Vote', (_message.Message,), dict(DESCRIPTOR=_VOTE,\n __module__='0005_on_chain_voting_transaction_specification_pb2'))\n_sym_db.RegisterMessage(Vote)","sub_path":"pycfiles/pypeflow-0.0.1-py3-none-any/pavoteproto_pb2.cpython-36.py","file_name":"pavoteproto_pb2.cpython-36.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"393881411","text":"# -*- encoding: utf-8 -*-\nfrom PyQt4 import QtGui\n\n\nVERSION_NUMBER = 5\n\nMRAY_URI = \"http://mRay.org\"\nMRAY_VERSION_FILE = MRAY_URI + \"/currentVersion.txt\"\nMRAY_ONLINE_HELP = MRAY_URI + \"/documentation/index.html\"\nMRAY_WEBSITE = MRAY_URI + \"/index.html\"\nGITHUB_SITE = \"https://github.com/hmcontroller/microRay\"\n\n\nAVAILABLE_FRAMEWORKS = [\n # {\"macroName\": \"MBED_2_UDP\", \"displayName\": u\"Mbed 2 UDP\", \"interface\": \"UDP\", \"template\": \"mbed_2_udp.c\"},\n # {\"macroName\": \"MBED_2_SERIAL\", \"displayName\": u\"Mbed 2 Serial\", \"interface\": \"SERIAL\", \"template\": \"mbed_2_serial.c\"},\n {\"macroName\": \"MBED_OS_UDP\", \"displayName\": u\"Mbed OS UDP\", \"interface\": \"UDP\", \"template\": \"mbed_os_udp.c\"},\n {\"macroName\": \"MBED_OS_SERIAL\", \"displayName\": u\"Mbed OS Serial\", \"interface\": \"SERIAL\", \"template\": \"mbed_os_serial.c\"},\n {\"macroName\": \"ARDUINO_UDP\", \"displayName\": u\"Arduino UDP\", \"interface\": \"UDP\", \"template\": \"arduino_udp.c\"},\n {\"macroName\": \"ARDUINO_SERIAL\", \"displayName\": u\"Arduino Serial\", \"interface\": \"SERIAL\", \"template\": \"arduino_serial.c\"},\n {\"macroName\": \"CUBE_IDE_UDP\", \"displayName\": u\"Cube IDE UDP\", \"interface\": \"UDP\", \"template\": \"cube_ide_udp.c\"},\n {\"macroName\": \"CUBE_IDE_SERIAL\", \"displayName\": u\"Cube IDE Serial\", \"interface\": \"SERIAL\", \"template\": \"cube_ide_serial.c\"}\n]\n\nRELATIVE_PATH_TO_APPLICATION_SETTINGS = \"applicationSettings.json\"\n\n\nCHECK_BOX_FONT = QtGui.QFont()\nCHECK_BOX_FONT.setPointSize(8)\n\nUSER_INPUT_WARNING_COLOR = QtGui.QColor(255, 165, 0)\nCONFIRMATION_TIMEOUT_WARNING_COLOR = QtGui.QColor(210, 0, 0)\n# NEGATIVE_CONFIRMATION_WARNING_COLOR = QtGui.QColor(50, 200, 50)\nNEGATIVE_CONFIRMATION_WARNING_COLOR = QtGui.QColor(210, 30, 0)\n\nHOVER_COLOR = QtGui.QColor(200, 200, 200)\nMOUSE_DOWN_COLOR = QtGui.QColor(150, 150, 150)\n\nPENDING_VALUE_COLOR = QtGui.QColor(210, 0, 0)\n\nCABLE_PEN = QtGui.QPen()\nCABLE_PEN.setColor(QtGui.QColor(0, 0, 0))\nCABLE_PEN.setCosmetic(True)\nCABLE_PEN.setWidth(2)\n\n","sub_path":"gui/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"373671889","text":"from __future__ import division\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nimport torch.nn.init as weight_init\nimport torch.nn.functional as F\nimport numpy as np\nfrom cuda_IndRNN_onlyrecurrent import IndRNN_onlyrecurrent as IndRNN\n#if no cuda, then use the following line\n#from IndRNN_onlyrecurrent import IndRNN_onlyrecurrent as IndRNN \n\n\nfrom __main__ import parser,args,U_bound\nMAG=args.MAG\n#U_bound=np.power(10,(np.log10(MAG)/args.seq_len))\nU_lowbound=np.power(10,(np.log10(1.0/MAG)/args.seq_len)) \nfrom utils import Batch_norm_overtime,Linear_overtime_module,Dropout_overtime\nBN=Batch_norm_overtime\nLinear_overtime=Linear_overtime_module\ndropout_overtime=Dropout_overtime.apply\n\n\nclass IndRNNwithBN(nn.Sequential):\n def __init__(self, hidden_size, seq_len,bn_location='bn_before'):\n super(IndRNNwithBN, self).__init__()\n # print(bn_location)\n # if bn_location==\"bn_before\":\n # self.add_module('norm1', BN(hidden_size, args.seq_len))\n # self.add_module('indrnn1', IndRNN(hidden_size))\n # if bn_location==\"bn_after\":\n # self.add_module('norm1', BN(hidden_size, args.seq_len))\n # if (bn_location!='bn_before') and (bn_location!='bn_after'):\n # print('Please select a batch normalization mode.')\n # assert 2==3\n self.add_module('norm1', BN(hidden_size, args.seq_len))\n\nclass stackedIndRNN_encoder(nn.Module):\n def __init__(self, input_size, outputclass):\n super(stackedIndRNN_encoder, self).__init__() \n hidden_size=args.hidden_size\n\n self.DIs=nn.ModuleList()\n denseinput=Linear_overtime(input_size, hidden_size)\n self.DIs.append(denseinput)\n for x in range(args.num_layers - 1):\n denseinput = Linear_overtime(hidden_size, hidden_size)\n self.DIs.append(denseinput)\n\n self.RNNs = nn.ModuleList()\n for x in range(args.num_layers):\n rnn = IndRNNwithBN(hidden_size=hidden_size, seq_len=args.seq_len,bn_location=args.bn_location) #IndRNN\n self.RNNs.append(rnn) \n \n self.classifier = nn.Linear(hidden_size, outputclass, bias=True)\n self.init_weights()\n\n def init_weights(self):\n for name, param in self.named_parameters():\n if 'weight_hh' in name:\n param.data.uniform_(0,U_bound) \n if args.u_lastlayer_ini and 'RNNs.'+str(args.num_layers-1)+'.weight_hh' in name:\n param.data.uniform_(U_lowbound,U_bound) \n if ('fc' in name) and 'weight' in name:#'denselayer' in name and \n nn.init.kaiming_uniform_(param, a=8, mode='fan_in')#\n if 'classifier' in name and 'weight' in name:\n nn.init.kaiming_normal_(param.data)\n if ('norm' in name or 'Norm' in name) and 'weight' in name:\n param.data.fill_(1)\n if 'bias' in name:\n param.data.fill_(0.0)\n\n\n def forward(self, input):\n rnnoutputs={} \n rnnoutputs['outlayer-1']=input\n for x in range(len(self.RNNs)):\n rnnoutputs['dilayer%d'%x]=self.DIs[x](rnnoutputs['outlayer%d'%(x-1)])\n rnnoutputs['outlayer%d'%x]= self.RNNs[x](rnnoutputs['dilayer%d'%x]) \n if args.dropout>0:\n rnnoutputs['outlayer%d'%x]= dropout_overtime(rnnoutputs['outlayer%d'%x],args.dropout,self.training)\n temp=rnnoutputs['outlayer%d'%(len(self.RNNs)-1)][-1]\n output = self.classifier(temp)\n # output=F.softmax(output,dim=1)\n return output \n \n \n","sub_path":"Indrnn_plainnet.py","file_name":"Indrnn_plainnet.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"402613799","text":"# rospy for the subscriber\nimport rospy\nimport threading\nfrom thread import start_new_thread\nimport json\nimport urllib2\n# ROS Image message\nfrom sensor_msgs.msg import Image\n# ROS Image message -> OpenCV2 image converter\nfrom cv_bridge import CvBridge, CvBridgeError\n# OpenCV2 for saving an image\nimport cv2\nimport base64\nimport requests\n# Instantiate CvBridge\n# from config import cpp_server\nbridge = CvBridge()\n\nlock = threading.Lock()\ncv = threading.Condition(lock)\nbuffer = {}\n\nfrom ws4py.client.threadedclient import WebSocketClient\nclass CameraSocket(WebSocketClient):\n def opened(self):\n print(\"Connection opened\")\n\n# soc = CameraSocket('ws://localhost:9980')\n\n\ndef image_callback(msg):\n #print(\"Received an image!\")\n \n image = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')\n image = cv2.resize(image, (50, 50)) \n _, image = cv2.imencode('.png', image)\n lock.acquire()\n buffer['image'] = base64.b64encode(image)\n cv.notifyAll()\n lock.release()\n\n\ndef depth_callback(msg):\n #print(\"Received an image!\")\n # Convert your ROS Image message to OpenCV2\n depth = bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')\n depth = cv2.resize(depth, (50, 50)) \n depth = cv2.imencode('.png', depth)[1]\n\n lock.acquire()\n buffer['depth'] = base64.b64encode(depth)\n cv.notifyAll()\n lock.release()\n\ndef socket_writer():\n print('bonjour!')\n\n while True:\n cv.acquire()\n while not len(buffer.keys()) == 2:\n cv.wait()\n\n message = json.dumps(buffer)\n kek = json.loads(message)\n\n print('depth len', len(buffer['depth']))\n print('image len', len(buffer['image']))\n print('len of depth', len(kek['depth']))\n print('len of image', len(kek['image']))\n\n r = requests.post('http://psi:9980', data=message)\n \n buffer.clear()\n cv.release()\n break\n\ndef main():\n rospy.init_node('image_listener')\n # Define your image topic\n image_topic = \"/camera/color/image_raw\"\n depth_topic = \"/camera/depth/image_raw\"\n\n lock = threading.Lock()\n thr1 = start_new_thread(socket_writer, ())\n\n # Set up your subscriber and define its callback\n rospy.Subscriber(image_topic, Image, image_callback, queue_size=1)\n rospy.Subscriber(depth_topic, Image, depth_callback, queue_size=1)\n\n # Spin until ctrl + c\n rospy.spin()\n thr1.join()\n\nif __name__ == '__main__':\n main()","sub_path":"frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"452156509","text":"import hashlib, time, json, requests, node, threading, os\n\nHASHALGO = hashlib.blake2s\nENCTYPE = 'utf-8'\n\nFORCE_MINING_THRESHOLD = 10\n\ndef validateChain(chain):\n\tif isinstance(chain, Blockchain):\n\t\tchain = chain.chain\n\telif isinstance(chain, dict):\n\t\tchain = chain['chain']\n\telif isinstance(chain, list):\n\t\tpass\n\telse:\n\t\traise TypeError('Expected type of chain: either instance of Blockchain or a Blockchain object\\'s dict')\n\tfor i in range(1, len(chain)):\n\t\tif chain[i]['prev hash'] != hash(chain[i - 1]):\n\t\t\treturn False\n\t\tif not validateProof(chain[i]['proof'], chain[i - 1]['proof'], hash(chain[i - 1])):\n\t\t\treturn False\n\treturn True\n\t\ndef hash(block):\n\tblockString = json.dumps(block, sort_keys = True).encode(ENCTYPE)\n\treturn HASHALGO(blockString).hexdigest()\n\t\ndef validateProof(proof, lastProof, prevHash):\n\tguessString = '{}{}{}'.format(prevHash, lastProof, proof).encode(ENCTYPE)\n\tguessHash = HASHALGO(guessString).hexdigest()\n\treturn guessHash[:4] == '0000'\n\t\ndef proofOfWork(lastProof, prevHash):\n\tproof = 0\n\twhile not validateProof(proof, lastProof, prevHash):\n\t\tproof += 1\n\treturn proof\n\t\nclass Blockchain:\n\tdef __init__(self, dictionary = None):\n\t\tif dictionary:\n\t\t\tif isinstance(dictionary, dict):\n\t\t\t\tself.__dict__ = dictionary\n\t\t\telse:\n\t\t\t\traise TypeError('dictionary is required as a arg, given: {}'.format(type(dictionary)))\n\t\telse:\n\t\t\tself.chain = [\n\t\t\t\t{\n\t\t\t\t\t'index': 1,\n\t\t\t\t\t'timestamp': time.time(),\n\t\t\t\t\t'proof': 1,\n\t\t\t\t\t'transactions': {'msg': 'Genesis'},\n\t\t\t\t\t'total transactions': 1,\n\t\t\t\t\t'prev hash': '1',\n\t\t\t\t}\n\t\t\t]\n\t\t\tself.pendingTransactions = {}\n\t\t\tself.mineableTransactions = {}\n\t\t\tself.totalTransactionsInChain = 1\n\t\tself.mineableTransactionsMutex = threading.BoundedSemaphore()\n\t\tself.pendingTransactionsMutex = threading.BoundedSemaphore()\n\t\n\tdef __len__(self):\n\t\treturn len(self.chain)\n\t\t\n\tdef isMineable(self):\n\t\tif len(self.pendingTransactions) > 0 or len(self.mineableTransactions) > 0:\n\t\t\tself.pendingTransactionsMutex.acquire()\n\t\t\tfor k, v in self.pendingTransactions.items():\n\t\t\t\tif k not in self.mineableTransactions:\n\t\t\t\t\tself.mineableTransactionsMutex.acquire()\n\t\t\t\t\tself.mineableTransactions[k] = v\n\t\t\t\t\tself.mineableTransactionsMutex.release()\n\t\t\tself.pendingTransactions = {}\n\t\t\tself.pendingTransactionsMutex.release()\n\t\t\treturn True\n\t\treturn False\n\t\n\tdef gottaMine(self):\n\t\tself.mineableTransactionsMutex.acquire()\n\t\tl = len(self.mineableTransactions) >= FORCE_MINING_THRESHOLD\n\t\tself.mineableTransactionsMutex.release()\n\t\treturn l\n\t\t\n\tdef __repr__(self):\n\t\tA = '[\\n'\n\t\tfor i in self.chain:\n\t\t\tA += '\\t{\\n'\n\t\t\tfor k, v in sorted(i.items()):\n\t\t\t\tA += '\\t\\t{}: {}\\n'.format(k, v)\n\t\t\tA += '\\t}\\n'\n\t\tA += ']\\n'\n\t\treturn A\n\t\n\tdef printChain(self):\n\t\tprint('[')\n\t\tfor i in self.chain:\n\t\t\tprint('\\t{')\n\t\t\tfor k, v in sorted(i.items()):\n\t\t\t\tprint('\\t\\t{}: {}'.format(k, v))\n\t\t\tprint('\\t}')\n\t\tprint(']')\n\t\t\n\t@property\n\tdef lastBlock(self):\n\t\tif len(self) != 0:\n\t\t\treturn self.chain[-1]\n\t\t\t\n\tdef validateTransaction(self, key, transaction):\n\t\tself.pendingTransactionsMutex.acquire()\n\t\tif key not in self.pendingTransactions:\n\t\t\tself.pendingTransactionsMutex.release()\n\t\t\tself.mineableTransactionsMutex.acquire()\n\t\t\tif key not in self.mineableTransactions:\n\t\t\t\tself.mineableTransactionsMutex.release()\n\t\t\t\tfor block in self.chain:\n\t\t\t\t\tif key in block['transactions']:\n\t\t\t\t\t\treturn False\n\t\t\t\treturn True\n\t\t\tself.mineableTransactionsMutex.release()\n\t\t\treturn False\n\t\tself.pendingTransactionsMutex.release()\n\t\treturn False\n\t\t\n\tdef addTransaction(self, key, transaction):\n\t\tif self.validateTransaction(key, transaction):\n\t\t\tself.pendingTransactionsMutex.acquire()\n\t\t\tself.pendingTransactions[key] = transaction\n\t\t\tself.pendingTransactionsMutex.release()\n\t\t\treturn True\n\t\treturn False\n\t\n\tdef validateBlock(self, block):\n\t\tfor k, v in block['transactions'].items():\n\t\t\tfor blk in self.chain:\n\t\t\t\tif k in blk['transactions']:\n\t\t\t\t\treturn False\n\t\treturn validateProof(block['proof'], self.lastBlock['proof'], hash(self.lastBlock))\n\t\t\t\n\tdef addBlock(self, block):\n\t\tif self.validateBlock(block):\n\t\t\tself.chain.append(block)\n\t\t\tself.totalTransactionsInChain += block['total transactions']\n\t\t\treturn True\n\t\treturn False\n\t\t\n\tdef writeChain(self):\n\t\tf = open('chain.json', 'w')\n\t\tf.write(json.dumps(self.chain, sort_keys = True))\n\t\tf.close()\n\t\t\n\tdef delChain(self):\n\t\tos.remove('chain.json')\n","sub_path":"agent/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"398293661","text":"import matplotlib.pyplot as plt\n\nimport streamlit as st\n\ndef show_image_ui(image, **imshow_arg):\n \"\"\"Show image in UI.\n \n It is instead of cv2.imshow to wait for user to check a image.\n Usually it blocked, but this doesn't block and just draw in streamtlit \n when call in streamlit thread.\n \"\"\"\n is_streamlit_thread = st._is_running_with_streamlit\n fig = plt.figure(frameon=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(image, aspect='auto', **imshow_arg)\n\n if is_streamlit_thread:\n st.pyplot(fig)\n else:\n plt.show()","sub_path":"remimi/utils/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"609320936","text":"import datetime\nfrom discord_components import Button, ButtonStyle, Select, SelectOption\nimport validators\nfrom src import db\nfrom src import utils\nfrom src import office_hours\nfrom src import cal\n\n\nasync def get_times(ctx, bot, event_type):\n \"\"\"\n Function:\n get_times\n Description:\n Helper function for acquiring the times an instructor wants an event to be held during\n Inputs:\n - ctx: context of the message\n - bot: discord bot object\n - event_type: type of the event\n Output:\n The begin & end times of the event\n \"\"\"\n\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid time is entered.\n while True:\n await ctx.send(\n 'Enter in format `-`, and times should be in 24-hour format.\\n'\n f'For example, setting {event_type} from 9:30am to 1pm can be done as 9:30-13\\n'\n + \"Type 'NA' if none. Type 'quit' to abort.\"\n )\n\n msg = await bot.wait_for('message', check=check)\n user_input = msg.content\n\n # Checking whether user entered 'quit' or 'NA'.\n if await check_quit(ctx, user_input):\n return\n elif user_input == 'NA':\n return False\n\n times = msg.content.strip().split('-')\n if len(times) != 2:\n await ctx.send(\"Incorrect input. Please enter the time in the expected format.\\n\")\n continue\n\n new_times = []\n new_time = None\n for t in times:\n parts = t.split(':')\n if len(parts) == 1:\n new_time = (int(parts[0]), 0)\n elif len(parts) == 2:\n new_time = (int(parts[0]), int(parts[1]))\n new_times.append(new_time)\n\n if len(new_times) != 2:\n await ctx.send(\"Incorrect input. Please enter the time in the expected format.\\n\")\n continue\n return new_times\n\n\nasync def get_due_time(ctx, bot):\n \"\"\"\n Function:\n get_due_time\n Description:\n Helper function for acquiring the due time of an event\n Inputs:\n - ctx: context of the message\n - bot: discord bot object\n Output:\n The begin & end times of the event\n \"\"\"\n\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid time is entered.\n while True:\n await ctx.send(\"Enter in 24-hour format. e.g. an assignment due at 11:59pm \"\n \"can be inputted as 23:59. Type 'NA' if none. Type 'quit to abort.\")\n msg = await bot.wait_for(\"message\", check=check)\n time = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, time):\n return\n elif time == 'NA':\n return False\n\n # Checking whether the format is valid. If invalid, continue the loop.\n try:\n time = datetime.datetime.strptime(time, '%H:%M')\n except ValueError:\n try:\n time = datetime.datetime.strptime(time, '%H')\n except ValueError:\n await ctx.send(\"Incorrect input. Please enter the time in the expected format.\\n\")\n continue\n return time\n\n\nasync def check_quit(ctx, value):\n \"\"\"\n Function:\n check_quit\n Description:\n Helper function for checking whether user entered 'quit'.\n Input:\n - ctx: context of the message\n - value: parameter that holds user input\n Output:\n True if user input is 'quit', False otherwise.\n \"\"\"\n if value == 'quit':\n await ctx.send(\"Aborting event creation. Type '!create' to restart.\")\n return True\n return False\n\n\nasync def get_date(ctx, bot):\n \"\"\"\n Function:\n get_date\n Description:\n Helper function for acquiring the date or due date of an event\n Input:\n - ctx: context of the message\n - bot: discord bot object\n Output:\n The date or the due date of the event.\n \"\"\"\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid date is entered.\n while True:\n await ctx.send(\"Enter in format `MM-DD-YYYY`. Type NA if none. Type 'quit' to abort\")\n msg = await bot.wait_for(\"message\", check=check)\n date = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, date):\n return\n elif date == 'NA':\n return False\n\n # Checking whether the format is valid. If invalid, continue the loop.\n try:\n datetime.datetime.strptime(date, '%m-%d-%Y')\n except ValueError:\n await ctx.send(\"Invalid date. Please enter the date in the expected format.\\n\")\n continue\n return date\n\n\nasync def get_url(ctx, bot):\n \"\"\"\n Function:\n get_url\n Description:\n Helper function for acquiring the associated url of an event\n Input:\n - ctx: context of the message\n - bot: discord bot object\n Output:\n The url associated with the event, or False if user enters 'NA'.\n \"\"\"\n\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid URL is entered (or 'quit'/'NA' is entered).\n while True:\n await ctx.send(\"Enter the URL. Type NA if none. Type 'quit' to abort.\")\n msg = await bot.wait_for(\"message\", check=check)\n link = msg.content.strip()\n\n if await check_quit(ctx, link):\n return\n elif link == 'NA':\n return False\n elif link and not validators.url(link):\n await ctx.send(\"Invalid URL. Please enter a valid URL.\\n\")\n else:\n return link\n\n\nasync def create_event(ctx, bot, testing_mode):\n \"\"\"\n Function:\n create_event\n Description:\n Event creation subroutine\n Input:\n - ctx: context of the message\n - bot: discord bot object\n - testing_mode: flag indicating whether this event is being created during a system test\n Output:\n A new event is created in the database and calendar is updated with the new event.\n \"\"\"\n # creating buttons for event types\n if ctx.channel.name == 'instructor-commands':\n await ctx.send(\n 'Which type of event would you like to create?',\n components=[\n Button(style=ButtonStyle.blue, label='Assignment', custom_id='assignment'),\n Button(style=ButtonStyle.green, label='Exam', custom_id='exam'),\n Button(style=ButtonStyle.red, label='Office Hour', custom_id='office-hour'),\n Button(style=ButtonStyle.gray, label='Custom Event', custom_id='custom-event')\n ],\n )\n # Getting the ID of the clicked button\n button_clicked = ((await utils.wait_for_msg(bot, ctx.channel)).content\n if testing_mode else (await bot.wait_for('button_click')).custom_id)\n\n # If 'Assignment' is clicked, this will run\n if button_clicked == 'assignment':\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n await ctx.send(\"What would you like the assignment to be called? \"\n \"(Type 'quit' to abort)\")\n msg = await bot.wait_for(\"message\", check=check)\n title = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, title):\n return\n\n # Getting associated url of the event.\n await ctx.send(\"Is there a link associated with this assignment?\\n \")\n link = await get_url(ctx, bot)\n if link is None:\n return\n\n await ctx.send(\"Extra description for assignment? Type NA if none. \"\n \"Type 'quit' to abort\")\n msg = await bot.wait_for(\"message\", check=check)\n description = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, description):\n return\n\n # Getting the due date.\n await ctx.send(\"What is the due date of this assignment?\\n \")\n date = await get_date(ctx, bot)\n if date is None:\n return\n\n # Getting the due time.\n await ctx.send(\"What time is this assignment due?\\n \")\n time = await get_due_time(ctx, bot)\n if time is None:\n return\n # If due time is entered as 'NA', this part will run\n elif not time:\n db.mutation_query(\n 'INSERT INTO assignments VALUES (?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, 0, 0]\n )\n await ctx.send('Assignment successfully created!')\n await cal.display_events(None)\n return\n\n # If there's a valid due time, this will execute\n db.mutation_query(\n 'INSERT INTO assignments VALUES (?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, time.hour, time.minute]\n )\n\n await ctx.send('Assignment successfully created!')\n await cal.display_events(None)\n return\n\n # If 'exam' is clicked, this will run\n elif button_clicked == 'exam':\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n await ctx.send(\"What is the title of this exam? (Type 'quit' to abort)\")\n msg = await bot.wait_for(\"message\", check=check)\n title = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, title):\n return\n\n await ctx.send(\"What content is this exam covering? (Type 'quit' to abort)\")\n msg = await bot.wait_for('message', check=check)\n description = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, description):\n return\n\n # Getting the date.\n await ctx.send(\"What is the date of this exam?\\n \")\n date = await get_date(ctx, bot)\n if date is None:\n return\n\n # Getting the exam start/end times.\n await ctx.send(\"Type the start & end times of the exam\\n\")\n times = await get_times(ctx, bot, 'exam')\n if times is None:\n return\n # This part will run if user entered 'NA'.\n elif not times:\n db.mutation_query(\n 'INSERT INTO exams VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, description, date,\n 0, 0, 0, 0]\n )\n await ctx.send('Exam successfully created!')\n await cal.display_events(ctx)\n return\n\n ((begin_hour, begin_minute), (end_hour, end_minute)) = times\n db.mutation_query(\n 'INSERT INTO exams VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, description, date,\n begin_hour, begin_minute, end_hour, end_minute]\n )\n\n await ctx.send('Exam successfully created!')\n await cal.display_events(ctx)\n return\n\n # If 'Office Hour' is clicked, this will run\n elif button_clicked == 'office-hour':\n # Adding instructors in the server to a list\n all_instructors = []\n for mem in ctx.guild.members:\n is_instructor = next((role.name == 'Instructor'\n for role in mem.roles), None) is not None\n if is_instructor:\n all_instructors.append(mem)\n\n if len(all_instructors) < 1:\n await ctx.send('There are no instructors in the server. Aborting event creation.')\n return\n\n options = [SelectOption(label=instr.name, value=instr.name)\n for instr in all_instructors]\n\n await ctx.send(\n 'Which instructor will this office hour be for?',\n components=[\n Select(\n placeholder='Select an instructor',\n options=options\n )\n ]\n )\n\n instructor = ((await utils.wait_for_msg(bot, ctx.channel)).content\n if testing_mode else (await bot.wait_for('select_option')).values[0])\n\n await ctx.send(\n 'Which day would you like the office hour to be on?',\n components=[\n Select(\n placeholder='Select a day',\n options=[\n SelectOption(label='Monday', value='Mon'),\n SelectOption(label='Tuesday', value='Tue'),\n SelectOption(label='Wednesday', value='Wed'),\n SelectOption(label='Thursday', value='Thu'),\n SelectOption(label='Friday', value='Fri'),\n SelectOption(label='Saturday', value='Sat'),\n SelectOption(label='Sunday', value='Sun')\n ]\n )\n ]\n )\n\n day = (\n (await utils.wait_for_msg(bot, ctx.channel)).content\n if testing_mode else\n (await bot.wait_for('select_option', check=lambda x: x.values[0] in ('Mon', 'Tue', 'Wed', 'Thu', 'Fri',\n 'Sat', 'Sun'))).values[0]\n )\n\n day_num = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun').index(day)\n\n # Looping until a valid time range is entered (or 'quit' is entered).\n await ctx.send(\"Type the start & end times of your office hours.\\n\")\n while True:\n times = await get_times(ctx, bot, 'office hour')\n if times is None:\n return\n if not times:\n await ctx.send(\"You must enter a time range for office hours\\n\")\n continue\n break\n ((begin_hour, begin_minute), (end_hour, end_minute)) = times\n\n office_hours.add_office_hour(\n ctx.guild,\n office_hours.TaOfficeHour(\n instructor,\n day_num,\n (datetime.time(hour=begin_hour, minute=begin_minute),\n datetime.time(hour=end_hour, minute=end_minute))\n )\n )\n\n db.mutation_query(\n 'INSERT INTO ta_office_hours VALUES (?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, instructor, day_num, begin_hour, begin_minute, end_hour, end_minute]\n )\n\n await ctx.send('Office hour successfully created!')\n\n # If 'Custom Event' is clicked, this will run\n elif button_clicked == 'custom-event':\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n await ctx.send(\"What would you like this event to be called? \"\n \"(Type 'quit' to abort)\")\n msg = await bot.wait_for(\"message\", check=check)\n title = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, title):\n return\n\n await ctx.send(\"Extra description for the event? Type 'NA' if none. \"\n \"Type 'quit' to abort\")\n msg = await bot.wait_for(\"message\", check=check)\n description = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, description):\n return\n\n # Getting associated url of the event.\n await ctx.send(\"Is there an associated link for this event?\")\n link = await get_url(ctx, bot)\n if link is None:\n return\n\n # Getting the associated date.\n await ctx.send(\"Is there a date or a due date for this event?\\n\")\n date = await get_date(ctx, bot)\n if date is None:\n return\n\n # send this message if there's an associated date.\n if date:\n await ctx.send(\"Is there a due time for this event?\\n\")\n time = await get_due_time(ctx, bot)\n if time is None:\n return\n elif time:\n db.mutation_query(\n 'INSERT INTO custom_events VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, time.hour, time.minute, 0, 0, 0, 0]\n )\n await ctx.send('Event successfully created!')\n await cal.display_events(None)\n return\n\n await ctx.send(\"What are the start & end times of this event?\\n\")\n times = await get_times(ctx, bot, 'event')\n if times is None:\n return\n elif not times:\n db.mutation_query(\n 'INSERT INTO custom_events VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, 0, 0, 0, 0, 0, 0]\n )\n await ctx.send('Event successfully created!')\n await cal.display_events(None)\n return\n\n ((begin_hour, begin_minute), (end_hour, end_minute)) = times\n db.mutation_query(\n 'INSERT INTO custom_events VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, 0, 0, begin_hour, begin_minute, end_hour, end_minute]\n )\n\n await ctx.send('Assignment successfully created!')\n await cal.display_events(None)\n return\n\n else:\n await ctx.author.send('`!create` can only be used in the `instructor-commands` channel')\n await ctx.message.delete()\n return\n","sub_path":"src/event_creation.py","file_name":"event_creation.py","file_ext":"py","file_size_in_byte":18623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"379337252","text":"def agrupa_por_idade(x):\n fet = {'crianca': [], 'adolescente': [], 'adulto': [], 'idoso': []}\n for k, v in x.items():\n if v <= 11:\n fet['crianca'].append(k)\n if v <= 17:\n fet['adolescente'].append(k)\n if v <= 59:\n fet['adulto'].append(k)\n else:\n fet['idoso'].append(k)\n print(fet)\n","sub_path":"backup/user_079/ch153_2020_04_13_20_27_38_934479.py","file_name":"ch153_2020_04_13_20_27_38_934479.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"452825518","text":"import sys\r\nimport os\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy import create_engine\r\nfrom db import Base, Pylypets, Drahobrat, Podobovets, Slavsko, Krasiya\r\nfrom psr import Weather_Data\r\nimport datetime\r\nimport tkinter as tk\r\nimport tkinter.messagebox as box\r\n\r\nURL_DRAG = 'http://ru.snow-forecast.com/resorts/Drahobrat/6day/mid'\r\nURL_PYL = 'https://ru.snow-forecast.com/resorts/Pylypets/6day/mid'\r\nURL_POD = 'https://ru.snow-forecast.com/resorts/Podobovets/6day/mid'\r\nURL_SLV = 'https://ru.snow-forecast.com/resorts/Slavsko/6day/mid'\r\nURL_KRS = 'https://ru.snow-forecast.com/resorts/Krasiya/6day/mid'\r\n\r\n\r\ndef err():\r\n root = tk.Tk()\r\n root.overrideredirect(1)\r\n root.withdraw()\r\n box.showerror('Error!', 'Check internet connection')\r\n root.quit()\r\n sys.exit()\r\n\r\n\r\ndef msg():\r\n root = tk.Tk()\r\n root.overrideredirect(1)\r\n root.withdraw()\r\n box.showinfo('Snow_Alert', '{}: snow coming!!!'.format(resort))\r\n root.quit()\r\n\r\n\r\ndef alert(resort):\r\n total_snow = 0\r\n for snow in resort.snow_list:\r\n try:\r\n total_snow += int(snow)\r\n except ValueError:\r\n pass\r\n if total_snow >= 50:\r\n msg()\r\n\r\n\r\ndef update_db(resort, db):\r\n day = 0\r\n for i in range(0, 18, 3):\r\n upd = session.merge(db(date=datetime.date.today() + datetime.timedelta(days=day),\r\n t_max_m=resort.temp_max[i],\r\n t_max_d=resort.temp_max[i + 1],\r\n t_max_n=resort.temp_max[i + 2],\r\n t_min_m=resort.temp_min[i],\r\n t_min_d=resort.temp_min[i + 1],\r\n t_min_n=resort.temp_min[i + 2],\r\n snow_m=resort.snow_list[i],\r\n snow_d=resort.snow_list[i + 1],\r\n snow_n=resort.snow_list[i + 2],\r\n wind_speed_m=resort.wind_list[i][0],\r\n wind_speed_d=resort.wind_list[i + 1][0],\r\n wind_speed_n=resort.wind_list[i + 2][0],\r\n wind_dir_m=resort.wind_list[i][1],\r\n wind_dir_d=resort.wind_list[i + 1][1],\r\n wind_dir_n=resort.wind_list[i + 2][1],\r\n weather_m=resort.weather_list[i][1],\r\n weather_d=resort.weather_list[i + 1][1],\r\n weather_n=resort.weather_list[i + 2][1]))\r\n day += 1\r\n session.add(upd)\r\n session.commit()\r\n\r\n\r\ntry:\r\n pyl = Weather_Data(URL_PYL)\r\n drag = Weather_Data(URL_DRAG)\r\n pod = Weather_Data(URL_POD)\r\n slv = Weather_Data(URL_SLV)\r\n krs = Weather_Data(URL_KRS)\r\nexcept AttributeError:\r\n err()\r\n\r\nengine = create_engine('sqlite:///weather_stats.db')\r\nBase.metadata.bind = engine\r\nDBSession = sessionmaker(bind=engine)\r\nsession = DBSession()\r\n\r\nfor resort, db in zip((drag, pyl, pod, slv, krs), \\\r\n (Drahobrat, Pylypets, Podobovets, Slavsko, Krasiya)):\r\n update_db(resort, db)\r\n\r\nfor resort in (drag, pyl, pod, slv, krs):\r\n alert(resort)\r\n\r\n# time check before update!!!\r\n","sub_path":"db_upd.py","file_name":"db_upd.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"54388138","text":"\n# Find Eulerian Tour\n#\n# Write a function that takes in a graph\n# represented as a list of tuples\n# and return a list of nodes that\n# you would follow on an Eulerian Tour\n#\n# For example, if the input graph was\n# [(1, 2), (2, 3), (3, 1)]\n# A possible Eulerian tour would be [1, 2, 3, 1]\n\ngraph = [(1, 2), (2, 3), (3, 1)]\ngraph = [(1, 2), (2, 3), (3, 1), (3, 4)]\ngraph = [(1, 2), (2, 3), (3, 1), (3, 4), (4,2)]\ngraph = [(0, 1), (1, 5), (1, 7), (4, 5),(4, 8), (1, 6), (3, 7), (5, 9),(2, 4), (0, 4), (2, 5), (3, 6), (8, 9)]\n\n\ndef get_degrees(graph):\n degrees = {}\n for e in graph:\n for i in e:\n if i not in degrees:\n degrees[i] = 1\n else:\n degrees[i] += 1\n return degrees\n\n#print graph\n#print get_degrees(graph)\n\ndef get_nodes(graph):\n nodes = []\n for e in graph:\n if e[0] not in nodes:\n nodes.append(e[0])\n if e[1] not in nodes:\n nodes.append(e[1])\n return nodes\n\n#print get_nodes(graph)\n\n\ndef find_edges_including(graph, node):\n edges = []\n for e in graph:\n if e[0]==node or e[1]==node:\n edges.append(e)\n return edges\n\n#print find_edges_including(graph, 2)\n\n\ndef is_eulerian(graph, starting_node):\n degrees = get_degrees(graph)\n num_odd = 0\n for d in degrees:\n if degrees[d]%2 != 0:\n num_odd += 1\n if num_odd>2:\n return False\n if num_odd>0:\n if degrees[starting_node]%2 == 0:\n return False #starting node must be odd if there are odd nodes\n if len(degrees)>0 and starting_node not in degrees:\n return False #must be able to move from starting node to remaining edges, if there are any\n return True\n\n#print is_eulerian(graph,1)\n\ndef subtract_edge(graph, edge):\n new_graph = []\n for e in graph:\n if e != edge:\n new_graph.append(e)\n return new_graph\n\n#print subtract_edge(graph, (1,2))\n\ndef find_eulerian_tour(graph):\n # your code here\n tour = []\n tour_nodes = []\n\n degrees = get_degrees(graph)\n nodes = get_nodes(graph)\n\n for n in nodes:\n if not is_eulerian(graph, n):\n continue\n cur_node = n\n tour_nodes.append(cur_node)\n next_edges = find_edges_including(graph, cur_node)\n while len(next_edges)>0:\n e = next_edges.pop()\n if e[0] == cur_node:\n next_node = e[1]\n else:\n next_node = e[0]\n next_graph = subtract_edge(graph, e)\n if is_eulerian(next_graph, next_node):\n tour_nodes.append(next_node)\n graph = next_graph\n cur_node = next_node\n next_edges = find_edges_including(graph, cur_node)\n break\n if tour_nodes == []:\n return None\n return tour_nodes\n\n#print find_eulerian_tour(graph)\n\n","sub_path":"Find_eulerian_tour.py","file_name":"Find_eulerian_tour.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"446539183","text":"\r\n'''\r\n*****************************************************************************************\r\n*\r\n* \t\t===============================================\r\n* \t\tRapid Rescuer (RR) Theme (eYRC 2019-20)\r\n* \t\t===============================================\r\n*\r\n* This script is to implement Task 1A of Rapid Rescuer (RR) Theme (eYRC 2019-20).\r\n*\r\n* This software is made available on an \"AS IS WHERE IS BASIS\".\r\n* Licensee/end user indemnifies and will keep e-Yantra indemnified from\r\n* any and all claim(s) that emanate from the use of the Software or\r\n* breach of the terms of this agreement.\r\n*\r\n* e-Yantra - An MHRD project under National Mission on Education using ICT (NMEICT)\r\n*\r\n*****************************************************************************************\r\n'''\r\n\r\n\r\n# Team ID:\t\t\t[ 5151 ]\r\n# Author List:\t\t[ Rohan Mehta, Arnav Saha ]\r\n# Filename:\t\t\ttask_1a.py\r\n# Functions:\t\treadImage, solveMaze, buildGraph, findNeighbours, isSafe, findPath,constructPath\r\n# \t\t\t\t\t[ Comma separated list of functions in this file ]\r\n# Global variables:\tCELL_SIZE\r\n# \t\t\t\t\t[ List of global variables defined in this file ]\r\n\r\n\r\n# Import necessary modules\r\n# Do not import any other modules\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\n\r\n\r\n# To enhance the maze image\r\nimport image_enhancer\r\n\r\n\r\n# Maze images in task_1a_images folder have cell size of 20 pixels\r\nCELL_SIZE = 20\r\n\r\n\r\ndef readImage(img_file_path):\r\n \"\"\"\r\n Purpose:\r\n ---\r\n the function takes file path of original image as argument and returns it's binary form\r\n\r\n Input Arguments:\r\n ---\r\n `img_file_path` :\t\t[ str ]\r\n file path of image\r\n\r\n Returns:\r\n ---\r\n `original_binary_img` :\t[ numpy array ]\r\n binary form of the original image at img_file_path\r\n\r\n Example call:\r\n ---\r\n original_binary_img = readImage(img_file_path)\r\n\r\n \"\"\"\r\n\r\n binary_img = None\r\n\r\n #############\tAdd your Code here\t###############\r\n # read image from given path\r\n binary_img = cv2.imread(img_file_path)\r\n # convert image to grayscale\r\n binary_img = cv2.cvtColor(binary_img, cv2.COLOR_BGR2GRAY)\r\n ###################################################\r\n\r\n return binary_img\r\n\r\n\r\ndef solveMaze(original_binary_img, initial_point, final_point, no_cells_height, no_cells_width):\r\n \"\"\"\r\n Purpose:\r\n ---\r\n the function takes binary form of original image, start and end point coordinates and solves the maze\r\n to return the list of coordinates of shortest path from initial_point to final_point\r\n\r\n Input Arguments:\r\n ---\r\n `original_binary_img` :\t[ numpy array ]\r\n binary form of the original image at img_file_path\r\n `initial_point` :\t\t[ tuple ]\r\n start point coordinates\r\n `final_point` :\t\t\t[ tuple ]\r\n end point coordinates\r\n `no_cells_height` :\t\t[ int ]\r\n number of cells in height of maze image\r\n `no_cells_width` :\t\t[ int ]\r\n number of cells in width of maze image\r\n\r\n Returns:\r\n ---\r\n `shortestPath` :\t\t[ list ]\r\n list of coordinates of shortest path from initial_point to final_point\r\n\r\n Example call:\r\n ---\r\n shortestPath = solveMaze(\r\n original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n \"\"\"\r\n\r\n shortestPath = []\r\n\r\n #############\tAdd your Code here\t###############\r\n lengthOfPath = 0\r\n # initialise visited cell array with 0\r\n visited = np.zeros([no_cells_width, no_cells_height], dtype=int)\r\n # build all neighbours graph for each cell\r\n graph = buildGraph(original_binary_img, no_cells_width, no_cells_height)\r\n # build a child:parent relationship to recontruct the shortest path\r\n parentDict = findPath(graph, visited,\r\n initial_point[0], initial_point[1], final_point[0], final_point[1], lengthOfPath)\r\n # reconstruct the shortest path\r\n shortestPath = constructPath(parentDict, initial_point, final_point)\r\n ###################################################\r\n\r\n return shortestPath\r\n\r\n\r\n#############\tYou can add other helper functions here\t\t#############\r\ndef buildGraph(original_binary_img, no_cells_width, no_cells_height):\r\n graph = {}\r\n for i in range(0, no_cells_width):\r\n for j in range(0, no_cells_height):\r\n graph[(i, j)] = findNeighbours(original_binary_img, i, j)\r\n return graph\r\n\r\n\r\ndef findNeighbours(original_binary_img, row, column):\r\n x_start = column * CELL_SIZE\r\n y_start = row * CELL_SIZE\r\n neighbours = []\r\n top = True\r\n bottom = True\r\n left = True\r\n right = True\r\n for x in range(x_start, x_start + CELL_SIZE):\r\n if(original_binary_img[y_start, x] == 255 and top):\r\n # when no black bars are found on top of any cell\r\n neighbours.append((row-1, column))\r\n top = False\r\n if(original_binary_img[y_start + CELL_SIZE - 1, x] == 255 and bottom):\r\n # when no black bars are found on bottom of any cell\r\n neighbours.append((row+1, column))\r\n bottom = False\r\n\r\n for y in range(y_start, y_start + CELL_SIZE):\r\n if(original_binary_img[y, x_start] == 255 and left):\r\n # when no black bars are found on left of any cell\r\n neighbours.append((row, column-1))\r\n left = False\r\n if(original_binary_img[y, x_start + CELL_SIZE - 1] == 255 and right):\r\n # when no black bars are found on right of any cell\r\n neighbours.append((row, column+1))\r\n right = False\r\n\r\n return neighbours\r\n\r\n\r\ndef isSafe(graph, visited, i, j, x, y):\r\n # a cell is safe to travel only if it is a neighbour and\r\n # it has not been visited\r\n if((x, y) in graph[(i, j)] and visited[x][y] == 0):\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef findPath(graph, visited, i, j, x, y, dist):\r\n # Implementing BFS using queue as list\r\n queue = []\r\n queue.insert(0, (i, j, dist))\r\n visited[i][j] = 1\r\n parentDict = {}\r\n\r\n while(len(queue)):\r\n node = queue[-1] # node = (i,j,dist)\r\n queue.pop()\r\n\r\n if(node[0] == x and node[1] == y):\r\n return parentDict\r\n\r\n # up\r\n if(isSafe(graph, visited, node[0], node[1], node[0]-1, node[1])):\r\n queue.insert(0, ((node[0] - 1, node[1], node[2]+1)))\r\n parentDict[(node[0]-1, node[1])] = (node[0], node[1])\r\n visited[node[0]][node[1]] = 1\r\n\r\n # left\r\n if(isSafe(graph, visited, node[0], node[1], node[0], node[1]-1)):\r\n parentDict[(node[0], node[1]-1)] = (node[0], node[1])\r\n queue.insert(0, ((node[0], node[1]-1, node[2]+1)))\r\n visited[node[0]][node[1]] = 1\r\n\r\n # right\r\n if(isSafe(graph, visited, node[0], node[1], node[0], node[1] + 1)):\r\n parentDict[(node[0], node[1]+1)] = (node[0], node[1])\r\n queue.insert(0, ((node[0], node[1] + 1, node[2]+1)))\r\n visited[node[0]][node[1]] = 1\r\n\r\n # down\r\n if(isSafe(graph, visited, node[0], node[1], node[0]+1, node[1])):\r\n parentDict[(node[0]+1, node[1])] = (node[0], node[1])\r\n queue.insert(0, ((node[0] + 1, node[1], node[2]+1)))\r\n visited[node[0]][node[1]] = 1\r\n\r\n\r\ndef constructPath(parent, initial_point, final_point):\r\n children = []\r\n shortest = []\r\n\r\n for key in parent.keys():\r\n children.insert(0, key)\r\n while(1):\r\n shortest.insert(0, final_point)\r\n if(parent[final_point] == initial_point):\r\n shortest.insert(0, initial_point)\r\n break\r\n final_point = parent[final_point]\r\n return shortest\r\n#########################################################################\r\n\r\n\r\n# NOTE:\tYOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION\r\n#\r\n# Function Name:\tmain\r\n# Inputs:\t\t\tNone\r\n# Outputs: \t\t\tNone\r\n# Purpose: \t\t\tthe function first takes 'maze00.jpg' as input and solves the maze by calling readImage\r\n# \t\t\t\t\tand solveMaze functions, it then asks the user whether to repeat the same on all maze images\r\n# \t\t\t\t\tpresent in 'task_1a_images' folder or not\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n curr_dir_path = os.getcwd()\r\n # path to directory of 'task_1a_images'\r\n img_dir_path = curr_dir_path + '/../task_1a_images/'\r\n\r\n file_num = 0\r\n img_file_path = img_dir_path + 'maze0' + \\\r\n str(file_num) + '.jpg'\t\t# path to 'maze00.jpg' image file\r\n\r\n print('\\n============================================')\r\n\r\n print('\\nFor maze0' + str(file_num) + '.jpg')\r\n\r\n try:\r\n\r\n original_binary_img = readImage(img_file_path)\r\n height, width = original_binary_img.shape\r\n\r\n except AttributeError as attr_error:\r\n\r\n print('\\n[ERROR] readImage function is not returning binary form of original image in expected format !\\n')\r\n exit()\r\n\r\n # number of cells in height of maze image\r\n no_cells_height = int(height/CELL_SIZE)\r\n # number of cells in width of maze image\r\n no_cells_width = int(width/CELL_SIZE)\r\n initial_point = (0, 0)\t\t\t\t\t\t\t\t\t\t\t# start point coordinates of maze\r\n # end point coordinates of maze\r\n final_point = ((no_cells_height-1), (no_cells_width-1))\r\n\r\n try:\r\n\r\n shortestPath = solveMaze(\r\n original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n if len(shortestPath) > 2:\r\n\r\n img = image_enhancer.highlightPath(\r\n original_binary_img, initial_point, final_point, shortestPath)\r\n\r\n else:\r\n\r\n print(\r\n '\\n[ERROR] shortestPath returned by solveMaze function is not complete !\\n')\r\n exit()\r\n\r\n except TypeError as type_err:\r\n\r\n print('\\n[ERROR] solveMaze function is not returning shortest path in maze image in expected format !\\n')\r\n exit()\r\n\r\n print('\\nShortest Path = %s \\n\\nLength of Path = %d' %\r\n (shortestPath, len(shortestPath)))\r\n\r\n print('\\n============================================')\r\n\r\n cv2.imshow('canvas0' + str(file_num), img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n choice = input(\r\n '\\nWant to run your script on all maze images ? ==>> \"y\" or \"n\": ')\r\n\r\n if choice == 'y':\r\n\r\n file_count = len(os.listdir(img_dir_path))\r\n\r\n for file_num in range(file_count):\r\n\r\n img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'\r\n\r\n print('\\n============================================')\r\n\r\n print('\\nFor maze0' + str(file_num) + '.jpg')\r\n\r\n try:\r\n\r\n original_binary_img = readImage(img_file_path)\r\n height, width = original_binary_img.shape\r\n\r\n except AttributeError as attr_error:\r\n\r\n print(\r\n '\\n[ERROR] readImage function is not returning binary form of original image in expected format !\\n')\r\n exit()\r\n\r\n # number of cells in height of maze image\r\n no_cells_height = int(height/CELL_SIZE)\r\n # number of cells in width of maze image\r\n no_cells_width = int(width/CELL_SIZE)\r\n initial_point = (0, 0)\t\t\t\t\t\t\t\t\t\t\t# start point coordinates of maze\r\n # end point coordinates of maze\r\n final_point = ((no_cells_height-1), (no_cells_width-1))\r\n\r\n try:\r\n\r\n shortestPath = solveMaze(\r\n original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n if len(shortestPath) > 2:\r\n\r\n img = image_enhancer.highlightPath(\r\n original_binary_img, initial_point, final_point, shortestPath)\r\n\r\n else:\r\n\r\n print(\r\n '\\n[ERROR] shortestPath returned by solveMaze function is not complete !\\n')\r\n exit()\r\n\r\n except TypeError as type_err:\r\n\r\n print(\r\n '\\n[ERROR] solveMaze function is not returning shortest path in maze image in expected format !\\n')\r\n exit()\r\n\r\n print('\\nShortest Path = %s \\n\\nLength of Path = %d' %\r\n (shortestPath, len(shortestPath)))\r\n\r\n print('\\n============================================')\r\n\r\n cv2.imshow('canvas0' + str(file_num), img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n else:\r\n\r\n print('')\r\n","sub_path":"submit/RR_Task_1A_1B#5151/Task 1A/task_1a.py","file_name":"task_1a.py","file_ext":"py","file_size_in_byte":12437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"415373745","text":"import os\nimport signal\nimport subprocess\nimport sys\n\nfrom src.reversi_zero.lib.pipe_helper import dump_pipe_pairs_names\n\n\nchildren_processes = []\nexit_tasks = []\n\n\ndef build_child_cmd(type, config, pipe_pairs):\n cmd = ['python3.6', '-m', 'src.reversi_zero.run', type,\n '--env', f'{config.env.env_arg_name}',\n '--n-sims', f'{config.play.simulation_num_per_move}',\n '--pipe', dump_pipe_pairs_names(pipe_pairs),\n ]\n if config.opts.gpu_mem_frac is not None:\n cmd.append('--gpu-mem-frac')\n cmd.append(f'{config.opts.gpu_mem_frac}')\n\n return cmd\n\n\ndef start_child_proc(cmd, nocuda=None, stdin=None, stdout=None, stderr=None, cwd=None):\n global children_processes\n\n env = os.environ.copy()\n if nocuda:\n env['CUDA_VISIBLE_DEVICES'] = ''\n\n try:\n p = subprocess.Popen(cmd, env=env, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd)\n except Exception:\n print(cmd)\n raise\n\n children_processes.append(p)\n\n return p\n\n\ndef kill_children_processes(*args):\n for p in children_processes:\n if p and p.poll() is None:\n p.kill()\n\n\ndef add_exit_task(task):\n global exit_tasks\n exit_tasks.append(task)\n\n\ndef clean(*args):\n for t in exit_tasks:\n print(t)\n t(*args)\n sys.exit()\n\n\ndef signal_exit():\n for sig in (signal.SIGABRT, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM):\n signal.signal(sig, clean)\n","sub_path":"src/reversi_zero/lib/proc_helper.py","file_name":"proc_helper.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"172573274","text":"class TrieNode(object):\n def __init__(self):\n self.is_word = False\n self.children = [None] * 26\n \nclass WordDictionary(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = TrieNode()\n \n\n def addWord(self, word):\n \"\"\"\n Adds a word into the data structure.\n :type word: str\n :rtype: None\n \"\"\"\n p = self.root\n for c in word:\n index = ord(c) - ord('a')\n if not p.children[index]:\n p.children[index] = TrieNode()\n p = p.children[index]\n p.is_word = True\n\n \n\n def search(self, word):\n \"\"\"\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\n :type word: str\n :rtype: bool\n \"\"\"\n return self.find(word, self.root, 0)\n \n def find(self, word, cur, index):\n if index == len(word):\n return cur.is_word\n \n c = word[index]\n \n if c == \".\":\n for i in xrange(26):\n if cur.children[i] != None and self.find(word, cur.children[i], index+1):\n return True\n return False \n else:\n return cur.children[ord(c) - ord(\"a\")] != None and self.find(word, cur.children[ord(c) - ord(\"a\")], index + 1)\n\n \n\n\n# Your WordDictionary object will be instantiated and called as such:\n# obj = WordDictionary()\n# obj.addWord(word)\n# param_2 = obj.search(word)","sub_path":"LC211_Trie.py","file_name":"LC211_Trie.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"542222382","text":"import httplib2 as http\nimport json\n\n\ndef get_suggestions(text):\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Token 4bc501558cb7d702b435bfd6e0a6a026e99a87b4\",\n }\n\n url = \"https://suggestions.dadata.ru/suggestions/api/4_1/rs/suggest/party\"\n method = \"POST\"\n body = '{ \"query\": \"' + text + '\" }'\n\n h = http.Http()\n\n response, content = h.request(\n url,\n method,\n body.encode('utf-8'),\n headers)\n\n if response[\"status\"] == '200':\n data = json.loads(content)\n if (len(data[\"suggestions\"]) != 0):\n return data[\"suggestions\"]\n else:\n raise Exception(\"Bad request\")\n\nget_suggestions(\"сбер\")","sub_path":"dadata_wrapper.py","file_name":"dadata_wrapper.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"529052875","text":"import sys,os,glob,json\nimport random\nimport xgboost as xgb\nimport numpy as np\nimport time\n\nimport poc_expt_base_ml_utils as ml_utils\nimport poc_expt_base_extract_feature as extract_feature\n\nfrom sklearn.metrics import *\nfrom sklearn.externals import joblib\n\ndef read_meta(f):\n t = None\n with open(f) as file:\n t = json.load(file)\n return t\n\ndef read_meta_path(meta_path):\n return [read_meta(f) for f in sorted(glob.glob(os.path.join(meta_path,'*.meta')))]\n\ndef get_xgb_param(labels):\n ratio = float(labels.count(0)) / labels.count(1)\n param = dict()\n param['booster'] = 'gbtree'\n param['objective'] = 'multi:softprob'\n param['num_class'] = 2\n param['eval_metric'] = 'mlogloss'\n param['scale_pos_weight'] = ratio\n param['eta'] = 0.05\n param['n_estimators'] = 20\n param['max_depth'] = 6\n param['min_child_weight'] = 0.8\n param['colsample_bytree'] = 0.5\n param['subsample'] = 0.5\n param['silent'] = 1\n param['nthread'] = 4\n param['seed'] = 777\n return param\n\ndef model_training(mldataset,num_boost_round=0):\n params = get_xgb_param(mldataset['y_tr'])\n d_tr = xgb.DMatrix(data=mldataset['x_tr'],label=mldataset['y_tr'])\n # d_test = xgb.DMatrix(data=mldataset['x_ts'],label=mldataset['y_ts']) if len(mldataset['y_ts']) > 0 else None\n bst = xgb.train(params,d_tr,\n num_boost_round=num_boost_round if num_boost_round>0 else 1000,\n # early_stopping_rounds=None if num_boost_round>0 else 50,\n # evals=[(d_test,'test')] if d_test is not None else [],\n # verbose_eval=False\n )\n return bst\n\ndef get_performance(y_ts, y_pred):\n perf = dict()\n #y_pred_cat = np.argmax(y_pred, axis=1)\n y_pred_cat = [1 if i[1] > 0.7 else 0 for i in y_pred] #aggressive\n perf['accuracy'] = accuracy_score(y_true=y_ts, y_pred=y_pred_cat)\n perf['precision'] = precision_score(y_true=y_ts, y_pred=y_pred_cat)\n perf['recall'] = recall_score(y_true=y_ts, y_pred=y_pred_cat)\n perf['data_dim'] = len(y_ts)\n zero_num = len([i for i in y_ts if i==0])\n one_num = len([i for i in y_ts if i==1])\n perf['data_dist'] = '(0:1) {}:{}'.format(zero_num,one_num)\n return perf\n\nDATA_PATH = os.path.join(os.sep,'data','real_case',)\n\nuser = sys.argv[1]\n\nprint('poc_bench_base',user)\n\n#-- Read Meta --#\ntr_target = read_meta_path(os.path.join(DATA_PATH,user,'train'))\ntr_anti = read_meta_path(os.path.join(DATA_PATH,user,'anti'))\ntt_bec = read_meta_path(os.path.join(DATA_PATH,user,'bec'))\n\nt0 = time.time()\n#-- Feature Extraction --#\nt1 = time.clock()\nml_dataset,ext_cost = ml_utils.prepare_ml_dataset(tr_target,tr_anti,0,False,\n target_test=[],\n anti_test=[],\n bec_test=tt_bec)\nprint(time.clock() - t1)\n\nt1 = time.clock()\nmodel = model_training(ml_dataset,num_boost_round=200)\nif 'normalizer' in ml_dataset:\n normalizer = ml_dataset['normalizer']\njoblib.dump(model,'bench_base_model_'+user+'.pkl')\nprint(time.clock() - t1)\nprint(\"Model Training Time:\",time.time() - t0)\n\n#-- Prediction --#\nt0 = time.time()\nml_dataset_,ext_cost = ml_utils.prepare_ml_dataset(\n tr_target,\n tr_anti,\n 0,False,\n target_test=tt_bec,\n anti_test=tt_bec,\n bec_test=tt_bec)\ndtr = xgb.DMatrix(\n data=ml_dataset_['x_tr'],\nlabel=ml_dataset_['y_tr'])\ndtt = xgb.DMatrix(\n data=ml_dataset_['x_ts'],\n label=ml_dataset_['y_ts'])\nmodel = joblib.load('bench_base_model_'+user+'.pkl')\nbec_hat = model.predict(dtt)[-len(tt_bec):,0]\nprint(time.time() - t0)\n\nprint(\"Accuracy:\",accuracy_score([True]*len(tt_bec),bec_hat>0.3))\nprint()\n","sub_path":"poc_bench_base.py","file_name":"poc_bench_base.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"582325353","text":"\"\"\"\nCIS-15 Excercise 36\nEmanuel Navarro\n\"\"\"\nfrom sys import exit\ndef treasure(Hero):\n print('You enter the room.')\n print('The room if full of gold and artifacts and old scrolls.')\n Treasure=input('what do you take? The glowing shiny gold or a book that catches your eye.')\n if Treasure== 'gold':\n dead('The gold is cursed and you die and become a permanent soul lost in the dungeon')\n elif Treasure=='book':\n print('The temple starts to crumble and you proceed to exit the dungeon')\n print('You have your book in hand believing you lost your opportunity for gold and riches')\n print('You open the book and its a book of infinte knowledge that helps you become a famous rich adventure remembered for all time')\n print('Good job you win')\n print(Hero, ' you level up and all your stats go up!!')\n exit(0)\n else:\n dead('you take take to long to decide and a 30 foot snake eats while you were thinking')\n \n\ndef dead(why):\n print(why, \"Good job!\")\n exit(0)\ndef battle(hero):\n hp= 30\n squire=hero\n name= squire +\":\"+\"Hp\"\n return name\n\ndef waterdungeon(Hero):\n print('You walk into the temple you see two levers')\n print('And what seems to be a door.')\n print('Dead skulls eerily are spread through out the ground.')\n \n waterchoice=input('what lever do you pull? left or right?')\n \n if waterchoice== 'left':\n dead('The temple trembles and a 30 foor snake unburrows itself from the ground and eats you')\n elif waterchoice=='right':\n dead('The room collapese on top of you and you die')\n elif waterchoice=='both':\n print('The door opens')\n treasure(Hero)\n else:\n dead('You do nothing and die of starvation')\n \ndef deathmountain():\n dead('The air is rancid poison and you die.')\n\n\ndef gamestart(name):\n \n print(f\"Hello {name}\")\n print('Please select a class from the following')\n classes=['Archer', 'Knight', 'Mage']\n stats=['Str:4, Dex:10, Vit:6','Str:10, Vit:8, Spirit:6',' Magic:12, Spirit:8, Luck:6']\n Archer= classes[0] + stats[0]\n Knight= classes[1] + stats[1]\n Mage= classes[2] + stats[2]\n n= 0\n for i in classes:\n print(f\"Class#{n+1} {i} with Stats {stats[n]} \")\n n+=1\n while True:\n choice= input('Enter class name: ')\n if choice==classes[0]:\n return name+\":\"+Archer\n elif choice== classes[1]:\n return name+\":\"+Knight\n elif choice== classes[2]:\n return name+\":\"+Mage\n else:\n print(\"Name of the class is capatalized\")\n \n\n#print(gamestart())\n\ndef main() :\n\n name= str(input('Enter your character name: '))\n Hero=gamestart(name)\n print(\"Finaly\",name, \"Today is your graduation from Wisteria Academy\")\n print(\"Before you can gradute you must prove yourself.\")\n print(\"what dugeon shall you go conquer and show your worth\")\n print(\"Will it be waterdungeon? A place of unknown creatures and untold treasures.\")\n print(\"Death mountain where death is assured\")\n choice2=int(input(\"Option 1 waterdungeon, Option 2 Deathmountain: \"))\n if choice2==1:\n return waterdungeon(Hero)\n elif choice2==2:\n deathmountain()\n else:\n dead('You run away and become a farmer')\n \n \n \n \n \n\n\nif __name__ == '__main__' :\n main() ","sub_path":"week11/ex36.py","file_name":"ex36.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"236627731","text":"import os\nimport json\nimport xmltodict\nimport xml.etree.ElementTree as Et\nfrom unittest import TestCase\nfrom copy import deepcopy\nfrom piano_utils.xml_json_converter import xml_to_json, json_to_xml\n\n\nclass TestXmlJsonConverter(TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n create/setup test data\n :return:\n \"\"\"\n cls.simple_xml = open(os.path.join(os.path.dirname(__file__), 'simple_xml.xml')).read()\n\n cls.simple_json = {\n \"note_to\": \"Tove\",\n \"note_from\": \"Jani\",\n \"note_heading\": \"Reminder\",\n \"note_body\": \"Don't forget me this weekend!\",\n }\n\n cls.complex_xml = open(os.path.join(os.path.dirname(__file__), 'complex_xml.xml')).read()\n\n cls.catalog = [{\n \"title\": \"Empire Burlesque\",\n \"artist\": \"Bob Dylan\",\n \"prices\": (\"10.90\", \"8.99\"),\n \"year\": \"1985\",\n }, {\n \"title\": \"Hide Your Heart\",\n \"artist\": \"Bonnie Tyler\",\n \"prices\": (\"9.90\", \"7.50\"),\n \"year\": \"1988\",\n }, {\n \"title\": \"One Night Only\",\n \"artist\": \"Bee Gees\",\n \"prices\": (\"10.90\", \"5.99\"),\n \"year\": \"1998\",\n }]\n\n cls.complex_json = {}\n for idx, cd in enumerate(cls.catalog):\n for k in [\"title\", \"artist\", \"prices\", \"year\"]:\n key = \"catalog_cd_%s_%s\" % (str(idx), k)\n if k == \"prices\":\n cls.complex_json[key + \"_price_0_@type\"] = \"rrp\"\n cls.complex_json[key + \"_price_0_#text\"] = cd[k][0]\n cls.complex_json[key + \"_price_1_@type\"] = \"special\"\n cls.complex_json[key + \"_price_1_#text\"] = cd[k][1]\n else:\n cls.complex_json[key] = cd[k]\n\n cls.complex_json = [cls.complex_json]\n\n def test_xml_to_json(self):\n \"\"\"\n test conversion from xml to json\n :return:\n \"\"\"\n original_xml = self.simple_xml\n\n xml_as_json = json.loads(xml_to_json(original_xml))\n\n self.assertDictEqual(self.simple_json, xml_as_json[0])\n\n def test_xml_to_json__with_multiple(self):\n \"\"\"\n test conversion from xml to json with multiple tags\n :return:\n \"\"\"\n original_xml = deepcopy(self.complex_xml)\n\n # convert xml to json\n result = json.loads(xml_to_json(original_xml))\n self.assertTrue(isinstance(result, list))\n\n # check is flattened\n for article in result:\n for v in article.values():\n self.assertNotIsInstance(v, dict)\n self.assertNotIsInstance(v, list)\n\n # check values\n for idx, json_value in enumerate(self.complex_json):\n for k, v in json_value.iteritems():\n self.assertEquals(v, result[idx][k])\n\n def test_json_to_xml(self):\n \"\"\"\n test conversion from json to xml\n :return:\n \"\"\"\n original_json = deepcopy(self.simple_json)\n\n json_as_xml = json_to_xml(json.dumps(original_json))\n\n xml_tree = Et.fromstring(json_as_xml)\n self.assertEquals(\"note\", xml_tree.tag)\n self.assertEquals(original_json[\"note_to\"], xml_tree.find(\"to\").text)\n self.assertEquals(original_json[\"note_from\"], xml_tree.find(\"from\").text)\n self.assertEquals(original_json[\"note_heading\"], xml_tree.find(\"heading\").text)\n self.assertEquals(original_json[\"note_body\"], xml_tree.find(\"body\").text)\n\n def test_json_to_xml__with_multiple(self):\n \"\"\"\n test conversion from json to xml with multiple tags\n :return:\n \"\"\"\n original_json = deepcopy(self.complex_json)\n\n # convert from json to xml\n json_as_xml = json_to_xml(json.dumps(original_json[0]))\n\n for idx, json_value in enumerate(original_json):\n xml_tree = Et.fromstring(json_as_xml)\n cd = xml_tree[idx]\n self.assertEquals(self.catalog[idx][\"title\"], cd.find(\"title\").text)\n self.assertEquals(self.catalog[idx][\"artist\"], cd.find(\"artist\").text)\n self.assertEquals(self.catalog[idx][\"year\"], cd.find(\"year\").text)\n prices = cd.find(\"prices\")\n price_types = [\"rrp\", \"special\"]\n for p_idx, price in enumerate(prices.findall(\"price\")):\n self.assertEquals(self.catalog[idx][\"prices\"][p_idx], price.text)\n self.assertEquals(price_types[p_idx], price.get(\"type\", \"\"))\n\n def test_xml_to_json_to_xml(self):\n \"\"\"\n test conversion from xml to json, and back from json to xml\n :return:\n \"\"\"\n original_xml = deepcopy(self.simple_xml)\n\n # convert xml to json\n xml_as_json = json.loads(xml_to_json(deepcopy(original_xml)))\n\n # convert json back to xml\n json_as_xml = json_to_xml(json.dumps(xml_as_json[0]))\n\n # format result and expected result into dictionaries\n actual = json.loads(json.dumps(xmltodict.parse(json_as_xml)))\n expect = json.loads(json.dumps(xmltodict.parse(original_xml)))\n\n # compare dictionary values\n self.assertDictEqual(actual, expect)\n\n def test_xml_to_json_to_xml__with_multiple(self):\n \"\"\"\n test conversion from xml to json, and back from json to xml with multiple tags\n :return:\n \"\"\"\n original_xml = deepcopy(self.complex_xml)\n\n # convert xml to json\n xml_as_json = json.loads(xml_to_json(deepcopy(original_xml)))\n\n # convert json back to xml\n json_as_xml = json_to_xml(json.dumps(xml_as_json[0]))\n\n # format result and expected result into dictionaries\n result = json.loads(json.dumps(xmltodict.parse(json_as_xml)))\n expects = json.loads(json.dumps(xmltodict.parse(original_xml)))\n\n # compare dictionary values\n self.assertDictEqual(result, expects)\n\n def test_json_to_xml_to_json(self):\n \"\"\"\n test conversion from json to xml, and back from xml to json\n :return:\n \"\"\"\n original_json = deepcopy(self.simple_json)\n\n # convert json to xml\n json_as_xml = json_to_xml(json.dumps(original_json))\n\n # convert xml back to json\n xml_as_json = json.loads(xml_to_json(json_as_xml))\n\n # compare results\n self.assertDictEqual(original_json, xml_as_json[0])\n\n def test_json_to_xml_to_json__with_multiple(self):\n \"\"\"\n test conversion from json to xml, and back from xml to json with multiple tags\n :return:\n \"\"\"\n original_json = deepcopy(self.complex_json)\n\n # convert json to xml\n json_as_xml = json_to_xml(json.dumps(original_json[0]))\n\n # convert xml back to json\n xml_as_json = json.loads(xml_to_json(json_as_xml))\n\n # compare results\n self.assertEquals(len(original_json), len(xml_as_json))\n for idx, d in enumerate(original_json):\n self.assertDictEqual(d, xml_as_json[idx])\n","sub_path":"tests/test_xml_json_converter.py","file_name":"test_xml_json_converter.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"115094169","text":"# @author: Guy Zyskind\n# guy@zyskind.com\n# Created on June 18, 2013\n#\n# DESCRIPTION:\n# Pulls tweet data from Twitter because ToS prevents distributing it directly.\n#\n# This is an updated version of Niek Sanders Corpus Install Script, which is compliant\n# with twitter's new API v1.1. The old API (v1) has been deprecated and no longer works.\n# This version also supports OAuth2, which is now required, but will also significantly\n# improve the running time.\n#\n# Full information and credit - \n# - Niek Sanders\n# njs@sananalytics.com\n# http://www.sananalytics.com/lab/twitter-sentiment/\n#\n# USAGE:\n# 1. Fill in the following parameters (from your twitter's app):\n# CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET.\n# 2. Run the script (Optional: change paths).\n#\n# Twitter currently limits such requests to 900/window (15 minutes).\n# This will require around 1.5 hours for the script to complete.\n#\n##############################################################\n#\n# Updated to be Python 3 compatible and run against the current Twitter API.\n#\n# Tested Environment - Windows 10 Pro + Python 3.6.1 + tweepy 3.5.0\n#\n# - Mark\n#\n##############################################################\n#\n# New Dependency on tweepy - https://github.com/tweepy/tweepy\n# - Install tweepy using pip, as...\n# - pip is the preferred installer program - https://docs.python.org/3/installing/\n# - With Python 3.4, pip is included by default with the Python binary installers.\n# - Command\n# - pip install tweepy\n#\n#\nimport csv, getpass, json, os, time, urllib\n\nimport tweepy\n\nCONSUMER_KEY = 'Your twitter app key'\nCONSUMER_SECRET = 'Your twitter app secret'\nACCESS_TOKEN = 'Your access token key'\nACCESS_TOKEN_SECRET = 'Your access token secret'\n\ndef get_user_params():\n\n user_params = {}\n\n # get user input params\n user_params['inList'] = input( '\\nInput file [./corpus.csv]: ' )\n user_params['outList'] = input( 'Results file [./full-corpus.csv]: ' )\n user_params['rawDir'] = input( 'Raw data dir [./rawdata/]: ' )\n \n # apply defaults\n if user_params['inList'] == '': \n user_params['inList'] = './corpus.csv'\n if user_params['outList'] == '': \n user_params['outList'] = './full-corpus.csv'\n if user_params['rawDir'] == '': \n user_params['rawDir'] = './rawdata/'\n\n return user_params\n\n\ndef dump_user_params( user_params ):\n\n # dump user params for confirmation\n print('Input: ' + user_params['inList'])\n print('Output: ' + user_params['outList'])\n print('Raw data: ' + user_params['rawDir'])\n return\n\n\ndef read_total_list( in_filename ):\n\n # read total fetch list csv\n fp = open( in_filename, 'r', encoding=\"utf-8\" )\n reader = csv.reader( fp, delimiter=',', quotechar='\"' )\n\n total_list = []\n for row in reader:\n total_list.append( row )\n\n return total_list\n\n\ndef purge_already_fetched( fetch_list, raw_dir ):\n\n # list of tweet ids that still need downloading\n rem_list = []\n\n # check each tweet to see if we have it\n for item in fetch_list:\n\n # check if json file exists\n tweet_file = raw_dir + item[2] + '.json'\n if os.path.exists( tweet_file ):\n\n # attempt to parse json file\n try:\n parse_tweet_json( tweet_file )\n print('--> already downloaded #' + item[2])\n except RuntimeError:\n rem_list.append( item )\n else:\n rem_list.append( item )\n\n return rem_list\n\n\ndef get_time_left_str( cur_idx, fetch_list, download_pause ):\n\n tweets_left = len(fetch_list) - cur_idx\n total_seconds = tweets_left * download_pause\n\n str_hr = int( total_seconds / 3600 )\n str_min = int((total_seconds - str_hr*3600) / 60)\n str_sec = total_seconds - str_hr*3600 - str_min*60\n\n return '%dh %dm %ds' % (str_hr, str_min, str_sec)\n\ndef oauth_get_tweet(tweet_id, http_method=\"GET\", post_body='', http_headers=None):\n \n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n \n #print('Fetching tweet for ID %s', tweet_id)\n\n tweet = api.get_status(tweet_id)\n print(\"%s,%s\" % (tweet_id, tweet.text))\n return tweet\n\n\ndef download_tweets( fetch_list, raw_dir ):\n\n # ensure raw data directory exists\n if not os.path.exists( raw_dir ):\n os.mkdir( raw_dir )\n\n # Set rate limit and minus fudge factor of 100\n # https://dev.twitter.com/rest/public/rate-limits\n max_tweets_per_hr = 4 * 900 - 100\n download_pause_sec = 3600.0 / max_tweets_per_hr\n print(\"Tweet Throttle - Max tweets per hour = %d (One every %f seconds)\" % \\\n (max_tweets_per_hr, download_pause_sec))\n\n # download tweets\n for idx in range(0,len(fetch_list)):\n\n # current item\n item = fetch_list[idx]\n\n # print status\n trem = get_time_left_str( idx, fetch_list, download_pause_sec )\n print('--> downloading tweet #%s (%d of %d) (%s left)' % \\\n (item[2], idx+1, len(fetch_list), trem))\n\n # pull data\n try:\n data = oauth_get_tweet(item[2])\n with open(raw_dir + item[2] + '.json', 'w', encoding=\"utf-8\") as outfile:\n json.dump(data._json, outfile, ensure_ascii=False, indent=2)\n except tweepy.TweepError as te:\n print(\"Failed to get tweet ID %s: %s\" % (item[2], te))\n # traceback.print_exc(file=sys.stderr)\n pass\n \n # stay in Twitter API rate limits \n print(' pausing %f seconds to obey Twitter API rate limits' % \\\n (download_pause_sec))\n time.sleep( download_pause_sec )\n\n return\n\n\ndef parse_tweet_json( filename ):\n \n # read tweet\n print('opening: ' + filename)\n\t\n # parse json \n with open( filename, 'rt', encoding=\"utf-8\" ) as infile:\n try:\n tweet_json = json.load( infile )\n except ValueError as e:\n raise RuntimeError(\"Error parsing json - %s\" % str(e))\n except json.JSONDecodeError as e:\n raise RuntimeError(\"Error parsing json - %s\" % str(e))\n\n # look for twitter api error msgs\n if 'errors' in tweet_json:\n raise RuntimeError('error in downloaded tweet')\n\n # extract creation date and tweet text\n return [ tweet_json['created_at'], tweet_json['text'] ]\n\n\ndef build_output_corpus( out_filename, raw_dir, total_list ):\n\n # open csv output file\n fp = open( out_filename, 'w', newline='', encoding=\"utf-8\" )\n writer = csv.writer( fp, delimiter=',', quotechar='\"', escapechar='\\\\', \n quoting=csv.QUOTE_ALL )\n\n # write header row\n writer.writerow( ['Topic','Sentiment','TweetId','TweetDate','TweetText'] )\n\n # parse all downloaded tweets\n missing_count = 0\n for item in total_list:\n\n # ensure tweet exists\n if os.path.exists( raw_dir + item[2] + '.json' ):\n\n try: \n # parse tweet\n parsed_tweet = parse_tweet_json( raw_dir + item[2] + '.json' )\n full_row = item + parsed_tweet\n \n # write csv row\n writer.writerow( full_row )\n\n except RuntimeError:\n print('--> bad data in tweet #' + item[2])\n missing_count += 1\n\n else:\n print('--> missing tweet #' + item[2])\n missing_count += 1\n\n # indicate success\n if missing_count == 0:\n print('\\nSuccessfully downloaded corpus!')\n print('Output in: ' + out_filename + '\\n')\n else: \n print('\\nMissing %d of %d tweets!' % (missing_count, len(total_list)))\n print('Partial output in: ' + out_filename + '\\n')\n\n return\n\n\ndef main():\n\n # get user parameters\n user_params = get_user_params()\n dump_user_params( user_params )\n\n # get fetch list\n total_list = read_total_list( user_params['inList'] )\n fetch_list = purge_already_fetched( total_list, user_params['rawDir'] )\n\n # start fetching data from twitter\n download_tweets( fetch_list, user_params['rawDir'] )\n\n # second pass for any failed downloads\n print('\\nStarting second pass to retry any failed downloads')\n fetch_list = purge_already_fetched( total_list, user_params['rawDir'] )\n download_tweets( fetch_list, user_params['rawDir'] )\n\n # build output corpus\n build_output_corpus( user_params['outList'], user_params['rawDir'], \n total_list )\n\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":8560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"301171039","text":"from django.db import models\nfrom django.contrib.postgres.fields import JSONField\nfrom django.contrib.auth import get_user_model\n\n\n# Suggestions (needs refined algorithm):\n# Perform aggregate groupings on User search history by ingredients\n# Find most commonly searched ingredients\n# Get ingredients and cuisine category from a random saved bookmark\n# Generate new search suggestion from combined data\n\n\nclass History(models.Model):\n \"\"\"Log user searches from application to provide caching and\n adaptive suggestions.\"\"\"\n\n user = models.ForeignKey(\n get_user_model(),\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n related_name='searches',\n related_query_name='search'\n )\n ingredients = JSONField(default=dict)\n filters = JSONField(default=dict)\n request_url = models.CharField(max_length=200, default='')\n submitted_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Bookmarks(models.Model):\n \"\"\"Save recipe search results for future use.\"\"\"\n\n user = models.ForeignKey(\n get_user_model(),\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n related_name='bookmarks',\n related_query_name='bookmark'\n )\n url = models.CharField(max_length=400)\n created_at = models.DateTimeField(auto_now_add=True)\n","sub_path":"recipe-api/search/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"537519557","text":"import numpy as np\nimport pylab as pl\n\n\nKappa_In = np.genfromtxt('Plotters/Output/Average_Size_KappaEst_in_RA_Dec_Grid.dat')\nKappa = Kappa_In[1:Kappa_In.shape[0]-1, 1:Kappa_In.shape[1]-1]\n\nKappaErr_In = np.genfromtxt('Plotters/Output/Average_Size_KappaError_in_RA_Dec_Grid.dat')\nKappaErr = KappaErr_In[1:KappaErr_In.shape[0]-1, 1:KappaErr_In.shape[1]-1]\n\nRatioK = np.zeros(Kappa.shape)\n\nfor i in range(1,Kappa.shape[0]-1):\n for j in range(1,Kappa.shape[1]-1): \n if(KappaErr[i,j] != 0.):\n RatioK[i,j] = Kappa[i,j]/KappaErr[i,j]\n else:\n RatioK[i,j] = 0.\n\nf = pl.figure()\nax = f.add_subplot(1,1,1)\nim = ax.imshow(RatioK)\nf.colorbar(im)\n\nax.set_title(r'$\\kappa_{Size}/\\sigma_{\\kappa}$')\npl.show()\n\n\nSize_In = np.genfromtxt('Plotters/Output/Average_Size_in_RA_Dec_Grid.dat')\nSize = Size_In[1:Size_In.shape[0]-1, 1:Size_In.shape[1]-1]\n\nSizeErr_In = np.genfromtxt('Plotters/Output/Average_Size_Error_in_RA_Dec_Grid.dat')\nSizeErr = SizeErr_In[1:SizeErr_In.shape[0]-1, 1:SizeErr_In.shape[1]-1]\n\nRatio = np.zeros(Size.shape)\n\nfor i in range(1,Size.shape[0]-1):\n for j in range(1,Size.shape[1]-1): \n if(SizeErr[i,j] != 0.):\n Ratio[i,j] = Size[i,j]/SizeErr[i,j]\n else:\n Ratio[i,j] = 0.\n\nf = pl.figure()\nax = f.add_subplot(1,1,1)\nim = ax.imshow(Ratio)\nf.colorbar(im)\n\nax.set_title(r'$R/\\sigma_{R}$')\npl.show()\n\n\nRatioErr = np.zeros(SizeErr.shape)\nfor i in range(1,Size.shape[0]-1):\n for j in range(1,Size.shape[1]-1): \n if(SizeErr[i,j] != 0.):\n RatioErr[i,j] = KappaErr[i,j]/SizeErr[i,j]\n else:\n RatioErr[i,j] = 0.\n\nf = pl.figure()\nax = f.add_subplot(1,1,1)\nim = ax.imshow(RatioErr)\nf.colorbar(im)\n\nax.set_title(r'$\\sigma_{\\kappa}/\\sigma_{R}$')\npl.show()\n\n","sub_path":"Size_Err_Ratio.py","file_name":"Size_Err_Ratio.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"257525092","text":"Lista1 = []\n\ncontador = 0\nvalores = 0\nfor item in range(1, 10 +1):\n item = int(input(\" Insira os numeros que deseja: \"))\n if item != -1:\n Lista1.append(item)\n contador = contador + 1\n valores = valores + item\n print(Lista1)\n if contador == 10:\n conta1 = valores / 10\n print (\"conta1 : \", conta1)\n else:\n conta = valores / contador\n print (conta)\n break","sub_path":"FichasPraticas/Ficha6.ex2.py","file_name":"Ficha6.ex2.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"509080585","text":"import re\n\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.web import http\nfrom twisted.web.resource import Resource\n\nfrom vumi.transports.tests.test_base import TransportTestCase\nfrom vumi.tests.utils import (get_stubbed_worker, TestResourceWorker,\n RegexMatcher, UTCNearNow)\nfrom vumi.utils import http_request_full\nfrom vumi.message import TransportMessage, TransportEvent, TransportUserMessage\n\nfrom tests.utils import MessageMaker\nfrom transports import MobivateHttpTransport\n\nclass MobivateHttpTransportTestCase(MessageMaker, TransportTestCase):\n \n transport_name = 'mobivate'\n transport_type = 'sms'\n transport_class = MobivateHttpTransport\n \n @inlineCallbacks\n def setUp(self):\n yield super(MobivateHttpTransportTestCase, self).setUp()\n self.send_path = '/sendsms'\n self.send_port = 9999\n self.config ={\n 'transport_name': self.transport_name,\n 'url': 'http://localhost:%s%s' % (self.send_port, self.send_path),\n 'user_name': 'username',\n 'password': 'password',\n 'default_origin': '55882',\n 'receive_path': '/mobivate',\n 'receive_port': 9998}\n self.worker = yield self.get_transport(self.config)\n \n def make_resource_worker(self, response, code=http.OK, send_id=None):\n w = get_stubbed_worker(TestResourceWorker, {})\n w.set_resources([\n (self.send_path, TestResource, ( response, code, send_id))])\n self._workers.append(w)\n return w.startWorker()\n\n def get_dispatched(self, rkey):\n return self._amqp.get_dispatched('vumi', rkey)\n \n @inlineCallbacks\n def test_sending_one_sms_ok(self):\n mocked_message = \"0\"\n yield self.make_resource_worker(mocked_message)\n yield self.dispatch(self.mkmsg_out())\n [smsg] = self.get_dispatched('mobivate.event')\n self.assertEqual(\n self.mkmsg_ack(\n user_message_id='1',\n sent_message_id='1'),\n TransportMessage.from_json(smsg.body))\n\n @inlineCallbacks\n def test_sending_one_sms_fail(self):\n mocked_message = \"500\\nSome internal issue\"\n yield self.make_resource_worker(mocked_message)\n yield self.dispatch(self.mkmsg_out(to_addr=\"256\"))\n [smsg] = self.get_dispatched('mobivate.event')\n self.assertEqual(\n self.mkmsg_delivery(\n transport_name=self.transport_name,\n delivery_status='failed',\n failure_level='service',\n failure_code=\"500\",\n failure_reason=\"Some internal issue\",\n user_message_id='1',\n sent_message_id='1'),\n TransportMessage.from_json(smsg.body))\n\n @inlineCallbacks\n def test_receiving_sms(self):\n params = (\"ORIGINATOR=61412345678&RECIPIENT=1987654&PROVIDER=telstra\"\n \"&MESSAGE_TEXT=Hello%20There!\")\n url = (\"http://localhost:%s%s/SMSfromMobiles?%s\" %\n (self.config['receive_port'], self.config['receive_path'], params))\n\n response = yield http_request_full(url, method='GET')\n self.assertEqual(response.code, http.OK)\n self.assertEqual(response.delivered_body, '0')\n \n [smsg] = self.get_dispatched('mobivate.inbound')\n sms_in = TransportMessage.from_json(smsg.body)\n self.assertEqual(self.transport_name, sms_in['transport_name'])\n self.assertEqual(\"Hello There!\", sms_in['content'])\n self.assertEqual(\"61412345678\", sms_in['from_addr'])\n self.assertEqual(\"1987654\", sms_in['to_addr'])\n\n @inlineCallbacks\n def test_receiving_delivery_report(self):\n params = (\"ORIGINATOR=61412345678&RECIPIENT=1987654&PROVIDER=telstra\"\n \"&MESSAGE_TEXT=Hello%20There!&ID=939ec52e333fbf124a87845d3a5d72e1\"\n \"&REFERENCE=ABC123&RESULT=1\")\n url = (\"http://localhost:%s%s/DeliveryReciept?%s\" %\n (self.config['receive_port'], self.config['receive_path'], params))\n\n response = yield http_request_full(url, method='GET')\n self.assertEqual(response.code, http.OK)\n self.assertEqual(response.delivered_body, '0') \n \n [smsg] = self.get_dispatched('mobivate.event')\n sms_delivery = TransportMessage.from_json(smsg.body) \n self.assertEqual(\n self.mkmsg_delivery(\n transport_name=self.transport_name,\n delivery_status='delivered',\n user_message_id='ABC123'),\n sms_delivery)\n\n\nclass TestResource(Resource):\n isLeaf = True\n \n def __init__(self, response, code=http.OK, send_id=None):\n self.response = response\n self.code = code\n self.send_id = send_id\n \n def render_GET(self, request):\n regex = re.compile('^(\\+|00|0)[0-9]*')\n request.setResponseCode(self.code)\n if (not ('RECIPIENT' in request.args) or\n regex.match(request.args['RECIPIENT'][0]) or\n not ('ORIGINATOR' in request.args) or\n not ('USER_NAME' in request.args) or\n not ('PASSWORD' in request.args) or\n not ('MESSAGE_TEXT' in request.args) or\n not ('REFERENCE' in request.args) or\n (self.send_id is not None and self.send_id != request.args['originator'][0])):\n return \"501\"\n else:\n return self.response\n","sub_path":"transports/tests/test_mobivate_http.py","file_name":"test_mobivate_http.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364029014","text":"from neuroglancer.AlignmentScore import AlignmentScore\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom django_plotly_dash import DjangoDash\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\nalignmentPlot = DjangoDash('AlignmentPlot',\n external_stylesheets=external_stylesheets)\n\nalignmentPlot.layout = html.Div(children=[\n dcc.Graph(id='plot'),\n html.Label('Select plot type'),\n dcc.RadioItems(id='plottype',\n options=[\n {'label': 'scatter plot', 'value': 'scatter'},\n {'label': u'box plot', 'value': 'box_plot'},\n ],\n value='scatter'\n ),\n])\n\n\n@alignmentPlot.expanded_callback(\n Output('plot', 'figure'),\n [Input('plottype', 'value')])\ndef update_figure(figure_type):\n align_score = AlignmentScore()\n fig = align_score.get(figure_type)\n return fig\n","sub_path":"neuroglancer/com_score_app.py","file_name":"com_score_app.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"280086488","text":"from selenium import webdriver\r\nimport sys\r\n\r\ndriver = webdriver.Chrome()\r\nexecutor_url = driver.command_executor._url\r\nsession_id = driver.session_id\r\npidfile=\"chrome.pid\"\r\ntarget_url = sys.argv[1] \r\n\r\ndriver.get(target_url)\r\n\r\nprint(session_id)\r\nprint(executor_url)\r\nwith open(pidfile, 'wb') as the_file:\r\n the_file.write(executor_url + '\\n')\r\n the_file.write(session_id)\r\n\r\ndef create_driver_session(session_id, executor_url):\r\n from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver\r\n\r\n # Save the original function, so we can revert our patch\r\n org_command_execute = RemoteWebDriver.execute\r\n\r\n def new_command_execute(self, command, params=None):\r\n if command == \"newSession\":\r\n # Mock the response\r\n return {'success': 0, 'value': None, 'sessionId': session_id}\r\n else:\r\n return org_command_execute(self, command, params)\r\n\r\n # Patch the function before creating the driver object\r\n RemoteWebDriver.execute = new_command_execute\r\n\r\n new_driver = webdriver.Remote(command_executor=executor_url, desired_capabilities={})\r\n new_driver.session_id = session_id\r\n\r\n # Replace the patched function with original function\r\n RemoteWebDriver.execute = org_command_execute\r\n\r\n return new_driver\r\n\r\ndriver2 = create_driver_session(session_id, executor_url)\r\nprint(driver2.current_url)\r\n","sub_path":"create_new_session.py","file_name":"create_new_session.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"373400857","text":"from django.shortcuts import render\nfrom django.http import HttpResponse \nfrom django.views.decorators.csrf import csrf_exempt \nfrom rest_framework.renderers import JSONRenderer \nfrom rest_framework.parsers import JSONParser \nfrom rest_framework import status \nfrom WhiteMarket.apps.products.models import Product \nfrom WhiteMarket.apps.products.serializers import ProductSerializer \nfrom django.contrib.gis.geoip2 import GeoIP2\n\n# Create your views here.\nclass JSONResponse(HttpResponse): \n def __init__(self, data, **kwargs): \n content = JSONRenderer().render(data) \n kwargs['content_type'] = 'application/json' \n super(JSONResponse, self).__init__(content, **kwargs) \n \n#Get Products === List Products\n#Post Products === Create Product\n@csrf_exempt \ndef product_list(request): \n if request.method == 'GET': \n products = Product.objects.all() \n products_serializer = ProductSerializer(products, many=True)\n return JSONResponse(products_serializer.data) \n \n elif request.method == 'POST': \n product_data = JSONParser().parse(request) \n product_serializer = ProductSerializer(data=product_data) \n if product_serializer.is_valid(): \n product_serializer.save() \n return JSONResponse(product_serializer.data, \\\n status=status.HTTP_201_CREATED) \n return JSONResponse(product_serializer.errors, \\\n status=status.HTTP_400_BAD_REQUEST) \n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n '''\n g = GeoIP2()\n return JSONResponse(g.city(get_client_ip(request)))\n '''","sub_path":"server/WhiteMarket/WhiteMarket/apps/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"187831214","text":"import mock\nimport threading\n\nfrom chalice.cli import reloader\n\n\nDEFAULT_DELAY = 0.1\nMAX_TIMEOUT = 5.0\n\n\ndef modify_file_after_n_seconds(filename, contents, delay=DEFAULT_DELAY):\n t = threading.Timer(delay, function=modify_file, args=(filename, contents))\n t.daemon = True\n t.start()\n\n\ndef modify_file(filename, contents):\n if filename is None:\n return\n with open(filename, 'w') as f:\n f.write(contents)\n\n\ndef assert_reload_happens(root_dir, when_modified_file):\n http_thread = mock.Mock(spec=reloader.HTTPServerThread)\n p = reloader.WorkerProcess(http_thread)\n modify_file_after_n_seconds(when_modified_file, 'contents')\n rc = p.main(root_dir, MAX_TIMEOUT)\n assert rc == reloader.RESTART_REQUEST_RC\n\n\ndef test_can_reload_when_file_created(tmpdir):\n top_level_file = str(tmpdir.join('foo'))\n assert_reload_happens(str(tmpdir), when_modified_file=top_level_file)\n\n\ndef test_can_reload_when_subdir_file_created(tmpdir):\n subdir_file = str(tmpdir.join('subdir').mkdir().join('foo.txt'))\n assert_reload_happens(str(tmpdir), when_modified_file=subdir_file)\n\n\ndef test_rc_0_when_no_file_modified(tmpdir):\n http_thread = mock.Mock(spec=reloader.HTTPServerThread)\n p = reloader.WorkerProcess(http_thread)\n rc = p.main(str(tmpdir), timeout=0.2)\n assert rc == 0\n","sub_path":"tests/functional/cli/test_reloader.py","file_name":"test_reloader.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"473756567","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\b3\\parsers\\cod.py\n# Compiled at: 2016-03-08 18:42:09\n__author__ = 'ThorN, xlr8or'\n__version__ = '1.5.3'\nimport b3, b3.events, b3.parsers.punkbuster, re, string\nfrom b3.parsers.q3a.abstractParser import AbstractParser\nfrom threading import Timer\n\nclass CodParser(AbstractParser):\n gameName = 'cod'\n PunkBuster = None\n IpsOnly = False\n _guidLength = 6\n _reMap = re.compile('map ([a-z0-9_-]+)', re.IGNORECASE)\n _pbRegExp = re.compile('^[0-9a-f]{32}$', re.IGNORECASE)\n _logSync = 3\n _counter = {}\n _line_length = 65\n _line_color_prefix = ''\n _commands = {'message': 'tell %(cid)s %(message)s', \n 'say': 'say %(message)s', \n 'set': 'set %(name)s \"%(value)s\"', \n 'kick': 'clientkick %(cid)s', \n 'ban': 'banclient %(cid)s', \n 'unban': 'unbanuser %(name)s', \n 'tempban': 'clientkick %(cid)s'}\n _eventMap = {}\n _lineClear = re.compile('^(?:[0-9:]+\\\\s?)?')\n _lineFormats = (\n re.compile('^(?P[a-z]+):\\\\s?(?P.*)$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9-]{1,2});(?P[a-z]+);(?P[^;]+);(?P[^;]*);(?P-1);(?Pworld);(?P[^;]*);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]*);(?P-1);(?P[a-z]*);(?P[^;]+);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]*);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]*);(?P[0-9]{1,2});(?Pworld);(?P[a-z]*);(?Pnone);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]+);(?P[^;]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?PJT);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]+);(?P[^;]+);)$', re.IGNORECASE),\n re.compile('^(?P[a-z]+);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+);(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+);(?P.*))$', re.IGNORECASE),\n re.compile('^(?P[a-z]+);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+);(?P.*))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+))$', re.IGNORECASE))\n _regPlayer = re.compile('^\\\\s*(?P[0-9]+)\\\\s+(?P[0-9-]+)\\\\s+(?P[0-9]+)\\\\s+(?P[0-9]+)\\\\s+(?P.*?)\\\\s+(?P[0-9]+?)\\\\s*(?P(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\\\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])):?(?P-?[0-9]{1,5})\\\\s*(?P-?[0-9]{1,5})\\\\s+(?P[0-9]+)$', re.IGNORECASE | re.VERBOSE)\n\n def startup(self):\n \"\"\"\n Called after the parser is created before run().\n \"\"\"\n if not self.config.has_option('server', 'game_log'):\n self.critical(\"Your main config file is missing the 'game_log' setting in section 'server'\")\n raise SystemExit(220)\n if self.IpsOnly:\n self.debug('Authentication method: Using IP instead of GUID!')\n self.clients.newClient('-1', guid='WORLD', name='World', hide=True, pbid='WORLD')\n if not self.config.has_option('server', 'punkbuster') or self.config.getboolean('server', 'punkbuster'):\n result = self.write('PB_SV_Ver')\n if result != '' and result[:7] != 'Unknown':\n self.info('punkbuster active: %s' % result)\n self.PunkBuster = b3.parsers.punkbuster.PunkBuster(self)\n else:\n self.warning('Punkbuster test failed: check your game server setup and B3 config!')\n self.debug('Disabling punkbuster support!')\n self._eventMap['warmup'] = self.getEventID('EVT_GAME_WARMUP')\n self._eventMap['restartgame'] = self.getEventID('EVT_GAME_ROUND_END')\n mapname = self.getMap()\n if mapname:\n self.game.mapName = mapname\n self.info('map is: %s' % self.game.mapName)\n self.debug('Forcing server cvar g_logsync to %s' % self._logSync)\n self.setCvar('g_logsync', self._logSync)\n try:\n self.game.fs_game = self.getCvar('fs_game').getString()\n except:\n self.game.fs_game = None\n self.warning('Could not query server for fs_game')\n\n try:\n self.game.fs_basepath = self.getCvar('fs_basepath').getString().rstrip('/')\n self.debug('fs_basepath: %s' % self.game.fs_basepath)\n except:\n self.game.fs_basepath = None\n self.warning('could not query server for fs_basepath')\n\n try:\n self.game.fs_homepath = self.getCvar('fs_homepath').getString().rstrip('/')\n self.debug('fs_homepath: %s' % self.game.fs_homepath)\n except:\n self.game.fs_homepath = None\n self.warning('could not query server for fs_homepath')\n\n try:\n self.game.shortversion = self.getCvar('shortversion').getString()\n self.debug('shortversion: %s' % self.game.shortversion)\n except:\n self.game.shortversion = None\n self.warning('Could not query server for shortversion')\n\n self.setVersionExceptions()\n self.debug('Parser started')\n return\n\n def OnK(self, action, data, match=None):\n victim = self.getClient(victim=match)\n if not victim:\n self.debug('No victim')\n self.OnJ(action, data, match)\n return None\n else:\n attacker = self.getClient(attacker=match)\n if not attacker:\n self.debug('No attacker')\n return None\n attacker.team = self.getTeam(match.group('ateam'))\n attacker.name = match.group('aname')\n victim.team = self.getTeam(match.group('team'))\n victim.name = match.group('name')\n event_key = 'EVT_CLIENT_KILL'\n if attacker.cid == victim.cid:\n event_key = 'EVT_CLIENT_SUICIDE'\n elif attacker.team != b3.TEAM_UNKNOWN and attacker.team == victim.team:\n event_key = 'EVT_CLIENT_KILL_TEAM'\n victim.state = b3.STATE_DEAD\n data = (float(match.group('damage')), match.group('aweap'), match.group('dlocation'), match.group('dtype'))\n return self.getEvent(event_key, data=data, client=attacker, target=victim)\n\n def OnD(self, action, data, match=None):\n victim = self.getClient(victim=match)\n if not victim:\n self.debug('No victim - attempt join')\n self.OnJ(action, data, match)\n return None\n else:\n attacker = self.getClient(attacker=match)\n if not attacker:\n self.debug('No attacker')\n return None\n attacker.team = self.getTeam(match.group('ateam'))\n attacker.name = match.group('aname')\n victim.team = self.getTeam(match.group('team'))\n victim.name = match.group('name')\n eventkey = 'EVT_CLIENT_DAMAGE'\n if attacker.cid == victim.cid:\n eventkey = 'EVT_CLIENT_DAMAGE_SELF'\n elif attacker.team != b3.TEAM_UNKNOWN and attacker.team == victim.team:\n eventkey = 'EVT_CLIENT_DAMAGE_TEAM'\n data = (float(match.group('damage')), match.group('aweap'), match.group('dlocation'), match.group('dtype'))\n return self.getEvent(eventkey, data=data, client=attacker, target=victim)\n\n def OnQ(self, action, data, match=None):\n client = self.getClient(match)\n if client:\n client.disconnect()\n elif match.group('cid') in self._counter:\n cid = match.group('cid')\n self._counter[cid] = 'Disconnected'\n self.debug('Slot %s has disconnected or was forwarded to our http download location: removing from authentication queue...' % cid)\n return\n\n def OnJ(self, action, data, match=None):\n codguid = match.group('guid')\n cid = match.group('cid')\n name = match.group('name')\n if len(codguid) < self._guidLength:\n self.verbose2('Invalid GUID: %s. GUID length set to %s' % (codguid, self._guidLength))\n codguid = None\n client = self.getClient(match)\n if client:\n self.verbose2('Client object already exists')\n if not self.PunkBuster:\n if self.IpsOnly:\n if name != client.name:\n self.debug('This is not the correct client (%s <> %s): disconnecting..' % (name, client.name))\n client.disconnect()\n return\n self.verbose2('client.name in sync: %s == %s' % (name, client.name))\n else:\n if codguid != client.guid:\n self.debug('This is not the correct client (%s <> %s): disconnecting...' % (codguid, client.guid))\n client.disconnect()\n return\n self.verbose2('client.guid in sync: %s == %s' % (codguid, client.guid))\n client.state = b3.STATE_ALIVE\n client.name = name\n return self.getEvent('EVT_CLIENT_JOIN', client=client)\n else:\n if self._counter.get(cid) and self._counter.get(cid) != 'Disconnected':\n self.verbose('cid: %s already in authentication queue: aborting join' % cid)\n return\n self._counter[cid] = 1\n t = Timer(2, self.newPlayer, (cid, codguid, name))\n t.start()\n self.debug('%s connected: waiting for authentication...' % name)\n self.debug('Our authentication queue: %s' % self._counter)\n return\n\n def OnA(self, action, data, match=None):\n client = self.getClient(match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n client.name = match.group('name')\n actiontype = match.group('type')\n self.verbose('On action: %s: %s' % (client.name, actiontype))\n return self.getEvent('EVT_CLIENT_ACTION', data=actiontype, client=client)\n\n def OnSay(self, action, data, match=None):\n client = self.getClient(match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n data = match.group('text')\n if data and ord(data[:1]) == 21:\n data = data[1:]\n if self.encoding:\n try:\n data = data.decode(self.encoding)\n except Exception as msg:\n self.warning('ERROR: decoding data: %r', msg)\n\n if client.name != match.group('name'):\n client.name = match.group('name')\n return self.getEvent('EVT_CLIENT_SAY', data=data, client=client)\n\n def OnSayteam(self, action, data, match=None):\n client = self.getClient(match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n data = match.group('text')\n if data and ord(data[:1]) == 21:\n data = data[1:]\n if self.encoding:\n try:\n data = data.decode(self.encoding)\n except Exception as msg:\n self.warning('ERROR: decoding data: %r', msg)\n\n if client.name != match.group('name'):\n client.name = match.group('name')\n return self.getEvent('EVT_CLIENT_TEAM_SAY', data=data, client=client)\n\n def OnTell(self, action, data, match=None):\n client = self.getClient(match)\n tclient = self.getClient(attacker=match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n data = match.group('text')\n if data and ord(data[:1]) == 21:\n data = data[1:]\n if self.encoding:\n try:\n data = data.decode(self.encoding)\n except Exception as msg:\n self.warning('ERROR: decoding data: %r', msg)\n\n client.name = match.group('name')\n return self.getEvent('EVT_CLIENT_PRIVATE_SAY', data=data, client=client, target=tclient)\n\n def OnInitgame(self, action, data, match=None):\n options = re.findall('\\\\\\\\([^\\\\\\\\]+)\\\\\\\\([^\\\\\\\\]+)', data)\n for o in options:\n if o[0] == 'mapname':\n self.game.mapName = o[1]\n elif o[0] == 'g_gametype':\n self.game.gameType = o[1]\n elif o[0] == 'fs_game':\n self.game.modName = o[1]\n else:\n setattr(self.game, o[0], o[1])\n\n self.verbose('...self.console.game.gameType: %s' % self.game.gameType)\n self.game.startRound()\n return self.getEvent('EVT_GAME_ROUND_START', data=self.game)\n\n def OnExitlevel(self, action, data, match=None):\n t = Timer(60, self.clients.sync)\n t.start()\n self.game.mapEnd()\n return self.getEvent('EVT_GAME_EXIT', data=data)\n\n def OnItem(self, action, data, match=None):\n guid, cid, name, item = string.split(data, ';', 3)\n client = self.clients.getByCID(cid)\n if client:\n return self.getEvent('EVT_CLIENT_ITEM_PICKUP', data=item, client=client)\n else:\n return\n\n def setVersionExceptions(self):\n \"\"\"\n Dummy to enable shortversionexceptions for cod2.\n Use this function in inheriting parsers to override certain vars based on ie. shortversion.\n \"\"\"\n pass\n\n def getTeam(self, team):\n \"\"\"\n Return a B3 team given the team value.\n :param team: The team value\n \"\"\"\n if team == 'allies':\n return b3.TEAM_BLUE\n else:\n if team == 'axis':\n return b3.TEAM_RED\n return b3.TEAM_UNKNOWN\n\n def connectClient(self, ccid):\n \"\"\"\n Return the client matchign the given slot number.\n :param ccid: The client slot number\n \"\"\"\n players = self.getPlayerList()\n self.verbose('connectClient() = %s' % players)\n for cid, p in players.iteritems():\n if int(cid) == int(ccid):\n self.debug('%s found in status/playerList' % p['name'])\n return p\n\n def newPlayer(self, cid, codguid, name):\n \"\"\"\n Build a new client using data in the authentication queue.\n :param cid: The client slot number\n :param codguid: The client GUID\n :param name: The client name\n \"\"\"\n if not self._counter.get(cid):\n self.verbose('newPlayer thread no longer needed: key no longer available')\n return\n else:\n if self._counter.get(cid) == 'Disconnected':\n self.debug('%s disconnected: removing from authentication queue' % name)\n self._counter.pop(cid)\n return\n self.debug('newClient: %s, %s, %s' % (cid, codguid, name))\n sp = self.connectClient(cid)\n if sp and self.PunkBuster:\n self.debug('sp: %s' % sp)\n if not re.match(self._pbRegExp, sp['pbid']):\n self.debug('PB-id is not valid: giving it another try')\n self._counter[cid] += 1\n t = Timer(4, self.newPlayer, (cid, codguid, name))\n t.start()\n return\n if self.IpsOnly:\n guid = sp['ip']\n pbid = sp['pbid']\n else:\n guid = sp['pbid']\n pbid = guid\n ip = sp['ip']\n if self._counter.get(cid):\n self._counter.pop(cid)\n else:\n return\n elif sp:\n if self.IpsOnly:\n codguid = sp['ip']\n if not codguid:\n self.warning('Missing or wrong CodGuid and PunkBuster is disabled: cannot authenticate!')\n if self._counter.get(cid):\n self._counter.pop(cid)\n return\n guid = codguid\n pbid = ''\n ip = sp['ip']\n if self._counter.get(cid):\n self._counter.pop(cid)\n else:\n return\n else:\n if self._counter.get(cid) > 10:\n self.debug('Could not auth %s: giving up...' % name)\n if self._counter.get(cid):\n self._counter.pop(cid)\n return\n if self._counter.get(cid):\n self.debug('%s not yet fully connected: retrying...#:%s' % (name, self._counter.get(cid)))\n self._counter[cid] += 1\n t = Timer(4, self.newPlayer, (cid, codguid, name))\n t.start()\n else:\n self.warning('All authentication attempts failed')\n return\n client = self.clients.newClient(cid, name=name, ip=ip, state=b3.STATE_ALIVE, guid=guid, pbid=pbid, data={'codguid': codguid})\n self.queueEvent(self.getEvent('EVT_CLIENT_JOIN', client=client))\n return\n\n def unban(self, client, reason='', admin=None, silent=False, *kwargs):\n \"\"\"\n Unban a client.\n :param client: The client to unban\n :param reason: The reason for the unban\n :param admin: The admin who unbanned this client\n :param silent: Whether or not to announce this unban\n \"\"\"\n if self.PunkBuster:\n if client.pbid:\n result = self.PunkBuster.unBanGUID(client)\n if result:\n admin.message('^3Unbanned^7: %s^7: %s' % (client.exactName, result))\n if admin:\n variables = self.getMessageVariables(client=client, reason=reason, admin=admin)\n fullreason = self.getMessage('unbanned_by', variables)\n else:\n variables = self.getMessageVariables(client=client, reason=reason)\n fullreason = self.getMessage('unbanned', variables)\n if not silent and fullreason != '':\n self.say(fullreason)\n elif admin:\n admin.message('%s ^7unbanned but has no punkbuster id' % client.exactName)\n else:\n name = self.stripColors(client.exactName)\n result = self.write(self.getCommand('unban', name=name, reason=reason))\n if admin:\n admin.message(result)\n\n def getMaps(self):\n \"\"\"\n Return the available maps/levels name\n \"\"\"\n maps = self.getCvar('sv_mapRotation')\n nmaps = []\n if maps:\n maps = re.findall(self._reMap, maps[0])\n for m in maps:\n if m[:3] == 'mp_':\n m = m[3:]\n nmaps.append(m.title())\n\n return nmaps\n\n def getNextMap(self):\n \"\"\"\n Return the next map/level name to be played.\n \"\"\"\n if not self.game.mapName:\n return\n else:\n maps = self.getCvar('sv_mapRotation')\n if maps:\n maps = re.findall(self._reMap, maps[0])\n gmap = self.game.mapName.strip().lower()\n found = False\n nmap = ''\n for nmap in maps:\n nmap = nmap.strip().lower()\n if found:\n found = nmap\n break\n elif nmap == gmap:\n found = True\n\n if found is True:\n nmap = maps[0].strip().lower()\n if found:\n if nmap[:3] == 'mp_':\n nmap = nmap[3:]\n return nmap.title()\n return\n return\n return\n\n def sync(self):\n \"\"\"\n For all connected players returned by self.get_player_list(), get the matching Client\n object from self.clients (with self.clients.get_by_cid(cid) or similar methods) and\n look for inconsistencies. If required call the client.disconnect() method to remove\n a client from self.clients.\n \"\"\"\n self.debug('synchronising clients...')\n plist = self.getPlayerList(maxRetries=4)\n mlist = {}\n for cid, c in plist.iteritems():\n client = self.clients.getByCID(cid)\n if client:\n if client.guid and 'guid' in c and not self.IpsOnly:\n if client.guid == c['guid']:\n self.debug('in-sync %s == %s', client.guid, c['guid'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.guid, c['guid'])\n client.disconnect()\n elif client.ip and 'ip' in c:\n if client.ip == c['ip']:\n self.debug('in-sync %s == %s', client.ip, c['ip'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.ip, c['ip'])\n client.disconnect()\n else:\n self.debug('no-sync: no guid or ip found')\n\n return mlist\n\n def authorizeClients(self):\n \"\"\"\n For all connected players, fill the client object with properties allowing to find\n the user in the database (usualy guid, or punkbuster id, ip) and call the\n Client.auth() method.\n \"\"\"\n players = self.getPlayerList(maxRetries=4)\n self.verbose('authorizeClients() = %s' % players)\n for cid, p in players.iteritems():\n sp = self.clients.getByCID(cid)\n if sp:\n sp.ip = p.get('ip', sp.ip)\n sp.pbid = p.get('pbid', sp.pbid)\n if self.IpsOnly:\n sp.guid = p.get('ip', sp.guid)\n else:\n sp.guid = p.get('guid', sp.guid)\n sp.data = p\n sp.auth()","sub_path":"pycfiles/b3-1.10.10-py2.7/cod.py","file_name":"cod.py","file_ext":"py","file_size_in_byte":23655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"408787009","text":"'''Un alumno desea saber cual será su promedio general en las tres materias mas difíciles\nque cursa y cual será el promedio que obtendrá en cada una de ellas. Estas materias se\nevalúan como se muestra a continuación:\nLa calificación de Matemáticas se obtiene de la sig. manera:\nExamen 90%\nPromedio de tareas 10%\nEn esta materia se pidió un total de tres tareas.\nLa calificación de Física se obtiene de la sig. manera:\nExamen 80%\nPromedio de tareas 20%\nEn esta materia se pidió un total de dos tareas.\nLa calificación de Química se obtiene de la sig. manera:\nExamen 85%\nPromedio de tareas 15%\nEn esta materia se pidió un promedio de tres tareas.'''\n\nexamenm = float(input(\"Escriba la calificacion del examen de matematicas: \"))\ntarem1 = float(input(\"Escriba la calificacion de la tarea 1 de matematicas: \"))\ntarem2 = float(input(\"Escriba la calificacion de la tarea 2 de matematicas: \"))\ntarem3 = float(input(\"Escriba la calificacion de la tarea 3 de matematicas: \"))\nexamenf= float(input(\"Escriba la calificacion del examen de fisica: \"))\ntaref1 = float(input(\"Escriba la calificacion de la tarea 1 de fisica: \"))\ntaref2 = float(input(\"Escriba la calificacion de la tarea 2 de fisica: \"))\nexamenq = float(input(\"Escriba la calificacion del examen de quimica: \"))\ntareq1 = float(input(\"Escriba la calificacion de la tarea 1 de quimica: \"))\ntareq2 = float(input(\"Escriba la calificacion de la tarea 2 de quimica: \"))\ntareq3 = float(input(\"Escriba la calificacion de la tarea 3 de quimica: \"))\n\npem = examenm * 0.90\npromtarem = (tarem1 + tarem2 + tarem3) / 3\nporcenttm = promtarem * 0.10\ntotalm = pem + porcenttm\n\npef = examenf * 0.80\npromtaref = (taref1 + taref2 ) / 2\nporcenttf = promtaref * 0.20\ntotalf = pef + porcenttf\n\npeq = examenq * 0.85\npromtareq = (tareq1 + tareq2 + tareq3) / 3\nporcenttq = promtareq * 0.15\ntotalq = peq + porcenttq\n\nprommat = (totalm + totalf + totalq)/3\n\nprint(f\"La calificacion final de matematicas es: {totalm}\")\nprint(f\"La calificacion final de fisica es: {totalf}\")\nprint(f\"La calificacion final de quimica es: {totalq}\")\nprint(f\"El promedio de las tres materias es: {prommat}\")\n","sub_path":"propuesto 10.py","file_name":"propuesto 10.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"148873187","text":"import time\n\nfrom binary_search_tree import BSTNode\n\n\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = [] # Return the list of duplicates in this data structure\n\n#Replace the nested for loops below with your improvements\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n\n# the runtime complexity is O(n**2), or to be more specific O(len(names_1)*len(names_2))\n\n\"\"\"solution 1\"\"\"\n\n# bst = BSTNode(\"named entries\")\n\n# for name in names_1:\n\n# bst.insert(name)\n\n# for name in names_2:\n\n# if bst.contains(name):\n\n# duplicates.append(name)\n\n\n# ---------- Stretch Goal -----------\n# Python has built-in tools that allow for a very efficient approach to this problem\n# What's the best time you can accomplish? Thare are no restrictions on techniques or data\n# structures, but you may not import any additional libraries that you did not write yourself.\n\n\n\n\"\"\" solution 2: fastest solution\"\"\"\n\nnames_1 = set(names_1)\nnames_2 = set(names_2)\n\nduplicates = list(names_1.intersection(names_2))\n\n\"\"\" solution 3 \"\"\"\n\n# storage = {name:0 for name in names_1}\n\n# for name in names_2:\n\n# try:\n\n# storage[name] += 1\n\n# except:\n\n# continue\n\n# duplicates = [key for key in storage.keys() if storage[key] == 1]\n\n\n\"\"\" solution using only lists \"\"\"\n\n# from collections import Counter\n\n# all_names = names_1 + names_2\n\n# cnt = Counter(all_names)\n\n# duplicates = [k for k, v in cnt.items() if v > 1]\n\n# this solution does not account for duplicates in lists themeselves\n\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"325795544","text":"\"\"\"Activity views.\"\"\"\n\nimport django_filters\n\nfrom django.conf.urls import url\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import CreateView, DetailView, UpdateView\nfrom django_filters.views import FilterView\n\nfrom .mixins import AJAXRedirectMixin\nfrom ..forms import activity as activity_forms\nfrom ..models.activity import Activity\n\n\nclass ActivityCreateView(CreateView):\n \"\"\"Create an Activity.\"\"\"\n\n model = Activity\n template_name = 'activity/create.html'\n form_class = activity_forms.ActivityForm\n\n def get_success_url(self):\n \"\"\"Go to the Activity details view.\"\"\"\n\n return reverse_lazy('activity_details', args=[self.object.id])\n\n\nclass ActivityDetailView(DetailView):\n \"\"\"Activity Details\"\"\"\n\n model = Activity\n template_name = 'activity/activity.html'\n\n\nclass ActivityInlineDetailView(DetailView):\n \"\"\"Display Activity details in a table row.\"\"\"\n\n model = Activity\n template_name = 'activity/inline_details.html'\n\n\nclass ActivityEditView(AJAXRedirectMixin, UpdateView):\n \"\"\"Edit Activity details.\"\"\"\n\n model = Activity\n template_name = 'activity/edit.html'\n form_class = activity_forms.ActivityForm\n\n def get_success_url(self):\n \"\"\"Go to the Activity details view.\"\"\"\n\n return reverse_lazy('activity_details', args=[self.object.id])\n\n\nclass ActivityInlineEditView(AJAXRedirectMixin, UpdateView):\n \"\"\"Display a form in a table row.\"\"\"\n\n model = Activity\n template_name = 'activity/inline_edit.html'\n fields = [\n 'id',\n 'short_description',\n 'long_description',\n 'to_dos',\n 'places',\n ]\n\n def get_success_url(self):\n \"\"\"Go to the Activity details view.\"\"\"\n\n return reverse_lazy('activity_inline_details', args=[self.object.id])\n\n\nclass ActivityFilter(django_filters.FilterSet):\n \"\"\"Filter for Activities.\"\"\"\n\n all_choices = [('', '---------')]\n\n id = django_filters.CharFilter( # pylint:disable=invalid-name\n lookup_expr='icontains',\n help_text='',\n )\n\n short_description = django_filters.CharFilter(\n lookup_expr='icontains',\n help_text='',\n )\n\n long_description = django_filters.CharFilter(\n lookup_expr='icontains',\n help_text='',\n )\n\n class Meta:\n model = Activity\n fields = ['id', 'short_description', 'long_description']\n order_by = ['id']\n\n\nclass ActivityListView(AJAXRedirectMixin, FilterView):\n \"\"\"List Activities and provide a filter.\"\"\"\n\n model = Activity\n template_name = 'activity/filtered_list.html'\n filterset_class = ActivityFilter\n\n\nurlpatterns = [\n url('^create$', ActivityCreateView.as_view(), name='activity_create'),\n url(\n r'^(?P\\d+)/$',\n ActivityDetailView.as_view(),\n name='activity_details',\n ),\n url(\n r'^(?P\\d+)/edit$',\n ActivityEditView.as_view(),\n name='activity_edit'\n ),\n url(r'^list/$', ActivityListView.as_view(), name='activity_list'),\n url(\n r'^inline/(?P\\d+)/$',\n ActivityInlineDetailView.as_view(),\n name='activity_inline_details',\n ),\n url(\n r'^inline/(?P\\d+)/edit$',\n ActivityInlineEditView.as_view(),\n name='activity_inline_edit',\n ),\n]\n","sub_path":"holiday_planner/holiday_place/views/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"539419722","text":"# ghifarul azhar #\r\n# 15200234 #\r\n\r\nprint(\"//=========================================================\")\r\nprint(\"// Data Pembeli Baju \")\r\nprint(\"//=========================================================\")\r\ndef queue():\r\n s = []\r\n return s\r\ndef enqueue (s,i):\r\n s.insert(0,i)\r\n return s\r\ndef dequeue (s):\r\n return s.pop()\r\ndef rear(s):\r\n return(s[0])\r\ndef front (s):\r\n return(s[len(s)-1])\r\ndef size (s):\r\n return len(s)\r\ndef IsEmpety(s):\r\n return s == []\r\n\r\ndef Ke2():\r\n s = queue()\r\n k = ''\r\n while True:\r\n banyak = int(input(\"Masukan Banyak Pembeli secara keseluruhan = \"))\r\n for j in range(banyak):\r\n orang = input(\"Masukan Nama Pembeli ke %i yang masuk di antrian = \" %(j+1))\r\n enqueue(s,orang)\r\n s.reverse()\r\n print(\"Data Nama Seluruh Pembeli Adalah : %s\"%(s))\r\n s.reverse()\r\n o = input(\"Masukan Nama Pembeli yang dicari = \")\r\n ditemukan = \"t\"\r\n itung = 0\r\n while ditemukan=='t':\r\n if o == front(s):\r\n print(\"Congrats Pembeli Sudah Ditemukan\")\r\n ditemukan = 'y'\r\n print(\"Pembeli berada pada antrian yang ke-\",str(itung-1+2),\"Dari Data Nama Seluruh Pembeli\")\r\n print(\"Dengan Looping\",str(itung-1+1),\"Kali\")\r\n elif o != front(s):\r\n masukan = dequeue(s)\r\n enqueue(s,Masukan)\r\n ditemukan = 't'\r\n s.reverse()\r\n print(\"Looping %i = %s \"%(itung+1),s)\r\n s.reverse()\r\n itung+=1\r\n if itung > len(s):\r\n print(\"Maaf Nama yang Dimaksud Tidak Ada\")\r\n print()\r\n print(\"Silahkan tambahkan nama jika ingin memesan dengan ketik (yes/no) dibawah ini \")\r\n ditemukan = \"y\"\r\n k = input(\"Apakah Masih ada yang dibantu? --Ketik (yes/no)-- ?\")\r\n if k != 'yes':\r\n print(\"||=======================================================||\")\r\n print(\"||==========================Thanks You===================||\")\r\n print(\"||==================Data Pembeli Baju ===================||\")\r\n print(\"||==================Ghifarul Azhar ======================||\")\r\n print(\"||=======================================================||\")\r\n break\r\n else:\r\n print(\"Ketik Nama yang ingin memesan \")\r\n\r\n\r\nKe2()\r\n\r\n\r\n\r\n","sub_path":"Ghifarul Azhar - 15200234.py","file_name":"Ghifarul Azhar - 15200234.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"416315327","text":"def adding_two_number(arr1, arr2):\n \"\"\"\n if num1 is 123 then arr1 = [3,2,1]\n\n :param arr1:\n :param arr2:\n\n :return:\n \"\"\"\n\n if len(arr1) > len(arr2):\n return adding_two_number(arr2, arr1)\n\n out = []\n extra = 0\n\n for i, (a1, a2) in enumerate(zip(arr1, arr2)):\n e = a1 + a2 + extra\n out.append(e % 10)\n extra = e // 10\n\n for i in range(len(arr1), len(arr2)):\n e = arr2[i] + extra\n out.append(e % 10)\n extra = e // 10\n\n if extra:\n out.append(extra)\n\n return out\n\n\nif __name__ == '__main__':\n print(adding_two_number([9, 9, 9], [9, 9, 9]))\n","sub_path":"algo/addingTwoNumber.py","file_name":"addingTwoNumber.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"609496020","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 19 15:40:02 2018\n\nCode to look at results from convolution\n\n@author: ppxee\n\"\"\"\n\n\n### Import required libraries ###\nimport matplotlib.pyplot as plt #for plotting\nfrom astropy.io import fits #for handling fits\n#from astropy.table import Table #for handling tables\nimport numpy as np #for handling arrays\n#import math\n#from astropy.stats import median_absolute_deviation\nimport vari_funcs_no06 #my module to help run code neatly\nfrom matplotlib.colors import LogNorm\nplt.close('all') #close any open plots\n\ncombined = fits.open('stars_mag_flux_table.fits')\nalldata = combined[1].data\ncombined = fits.open('stars_mag_flux_convS.fits')\nalldataconv = combined[1].data\nstars = fits.open('starsfwhm.fits')\nsdata = stars[1].data\n\n# remove saturated stars\nsdata = sdata[alldata['MAG_APER_5_05B'] >= 12]\nalldata = alldata[alldata['MAG_APER_5_05B'] >= 12]\nalldataconv = alldataconv[alldataconv['MAG_APER_5_05B'] >= 12]\n\n## Create flux stack\n#allflux = vari_funcs_no06.flux5_stacks(alldata)\n#allfluxconv = vari_funcs_no06.flux5_stacks(alldataconv)\n\n\n# Create mag stack\n#allflux = vari_funcs_no06.mag5_stacks(alldata)\n#allflux, alldata2 = vari_funcs_no06.no99(allflux, alldata)\n#allfluxconv = vari_funcs_no06.mag5_stacks(alldataconv)\n#allfluxconv, alldataconv2 = vari_funcs_no06.no99(allfluxconv, alldataconv)\n\n# Remove negative values\n#allflux[allflux <= 0] = np.nan\n#mask = ~np.isnan(allflux).any(axis=1)\n#allflux = allflux[mask]\n##allfluxconv[allfluxconv <= 0] = np.nan\n##mask = ~np.isnan(allfluxconv).any(axis=1)\n#allfluxconv = allfluxconv[mask]\n#\n## Normalise\n#allflux = vari_funcs_no06.normalise_flux(allflux)\n##allflux = vari_funcs_no06.normalise_mag(allflux)\n##allfluxconv = vari_funcs_no06.psf_correct_flux(allflux, allflux, 'median')\n#allfluxconv = vari_funcs_no06.normalise_flux(allfluxconv)\n##allfluxconv = vari_funcs_no06.normalise_mag(allfluxconv)\n\navgflux = np.array([np.median(sdata['FWHM_05B']), \n np.median(sdata['FWHM_07B']), \n np.median(sdata['FWHM_08B']), \n np.median(sdata['FWHM_09B']), \n np.median(sdata['FWHM_10B']), \n np.median(sdata['FWHM_11B']), \n np.median(sdata['FWHM_12B'])]) *3600\n\navgfluxconv = np.array([np.median(alldataconv['FWHM_WORLD_05B']), \n np.median(alldataconv['FWHM_WORLD_07B']), \n np.median(alldataconv['FWHM_WORLD_08B']), \n np.median(alldataconv['FWHM_WORLD_09B']), \n np.median(alldataconv['FWHM_WORLD_10B']), \n np.median(alldataconv['FWHM_WORLD_11B']), \n np.median(alldataconv['FWHM_WORLD_12B'])]) *3600\n\n## find and plot average\n#avgflux = np.median(allflux, axis=0)\nvari_funcs_no06.avg_lightcurve(avgflux)\n#plt.title('Normalised Flux of Stars with Unconvolved')\n#plt.ylim(0.9986, 1.0004)\n#plt.ylim(21.36, 21.51)\nplt.title('Median FWHM of stars before convolution')\nplt.ylim(0.74, 0.88)\nplt.ylabel('FWHM (arcsec)')\n#plt.ylabel('Normalised Flux')\nplt.savefig('plots/Lightcurves/FWHMbefore')\n\n#avgfluxconv = np.median(allfluxconv, axis=0)\nvari_funcs_no06.avg_lightcurve(avgfluxconv)\n#plt.title('Normalised Flux of Stars curve with Convolved')\n#plt.ylim(0.9986, 1.0004)\n#plt.ylim(21.36, 21.51)\n#plt.ylim(0.000205,0.000243)\nplt.title('Median FWHM of stars after convolution')\nplt.ylim(0.74, 0.88)\nplt.ylabel('FWHM (arcsec)')\n#plt.ylabel('Normalised Flux')\nplt.savefig('plots/Lightcurves/FWHMafter')\n","sub_path":"invextconv.py","file_name":"invextconv.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"116081588","text":"import sys\nimport os\nimport string\nimport re\nimport urlparse\nimport urllib\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n# threshold for displaying trace message. Must be <, so higher means more\ntraceLevel = 0\n\ndef trace(s, level = 1):\n \"\"\"\n is compared to rdfAppDef.traceLevel. Higher means less likely to display\n \"\"\"\n if level > traceLevel:\n return\n sys.stderr.write(\"rdfAppDef.py: %s\\n\" % s)\n\nclass InvalidFileError(Exception):\n def __init__(self, path):\n self.path = path\n def __str__(self):\n return repr(self.path)\n\nclass ScriptError(Exception):\n def __init__(self, commandLine, exitCode):\n self.commandLine = commandLine\n self.exitCode = exitCode\n def __str__(self):\n return repr(\"Script error calling %s:\\nError %s.\" % (self.commandLine, os.strerror(self.exitCode)))\n\nclass LoadFailure(Exception):\n def __init__(self, fileUri):\n self.message = \"File %s failed to load.\" % (fileUri)\n self.fileUri = fileUri\n def __str__(self):\n return repr(self.message)\n\nclass NoGraph(Exception):\n def __init__(self, fileUri):\n self.message = \"Dependency %s has no graph assignments.\" % (fileUri)\n self.fileUri = fileUri\n def __str__(self):\n return repr(self.message)\n\nclass NoSpecification(Exception):\n def __init__(self, parameterName):\n self.message = \"No specification provided for %s\" % parameterName\n self.parameterName = parameterName\n def __str__(self):\n return repr(self.message)\n\nclass NotAnRdfFile(Exception):\n def __init__(self, fileName):\n self.message = \"%s is not subsumed by app:RDFFile.\" % fileName\n self.fileName = fileName\n def __str__(self):\n return repr(self.message)\n\nclass ApplicationDefinitionError(Exception):\n def __init__(self, bindings):\n if len(bindings) == 0:\n self.message = \"No Application Definition Graph Found\"\n else:\n self.message = \"Should be exactly one Application Definition. Instead there were these:%s\" % map(lambda b:b['appDef'], bindings)\n self.bindings = bindings\n def __str__(self):\n return repr(self.message)\n\nSparqlPrefixes = \"\"\"\nPrefix app: \nPrefix rdfs: \n\"\"\"\n\n\ndef uriToPath(uri):\n \"\"\"\n returns translated to a pathname, if indicates a file, else None\n \"\"\"\n unquotedUri = uri.replace(\"<\", \"\").replace(\">\", \"\")\n parse = urlparse.urlparse(unquotedUri)\n if parse.scheme == 'file' or parse.scheme== '':\n return parse.path\n\ndef maybeAngleQuote(uri):\n \"\"\"\n ensures that angle quotes are provided for if appropriate\n \"\"\"\n\n uri = uri.strip()\n hasAnglequotes = re.compile('^<.*>$')\n needsAngleQuotes = re.compile(\"^https?://|^file:|^/\", re.IGNORECASE)\n\n if hasAnglequotes.match(uri):\n return uri\n elif needsAngleQuotes.match(uri):\n return \"<%s>\" % uri\n else:\n return uri\n\ndef clearGraph(graphUri, updateFn):\n \"\"\"\n Side-effect: drops \n Side-effect: removes all members of app:loadedFiles assigned to \n Where:\n := fn() -> None\n \"\"\"\n trace (\"clearing graph:%s\" % maybeAngleQuote(graphUri), level = 0)\n\n updateFn(SparqlPrefixes + \"Drop Graph %s\" % maybeAngleQuote(graphUri))\n\ndef isInvalidFile(path, askFn):\n \"\"\"\n Returns True if fileUri is of class app:InvalidFile.\n Where:\n is a file path, eg 'Data/blah.ttl'\n Note this is typically assigned by the formatCheck function.\n \"\"\"\n path = path.strip(' <>')\n template = SparqlPrefixes + \"\"\"\n Ask\n Where\n {\n Bind (URI(\"$Path\") as ?file)\n Graph ?containingGraph\n {\n ?file a/rdfs:subClassOf* app:InvalidFile.\n }\n }\n \"\"\"\n query = string.Template(template).substitute(\n Path = path)\n trace(\"query in isInvalidFile:%s\" % query)\n return askFn(query)\n\ndef uniqueApplicationDefinition(queryFn):\n \"\"\"\n Returns the URI of the only graph G for which \n Graph ?g { ?g a app:ApplicationDefinition. ...}\n Raises an ApplicationDefinitionError if there is not exactly one\n such graph.\n Note: this is the default value of applicationDefinitionFn parameters\n to functions defined in rdfAppDef.py which require such parameters.\n \"\"\"\n query = SparqlPrefixes + \"\"\"\n Select Distinct ?appDef\n Where\n {\n Graph ?appDef\n {\n ?appDef a app:ApplicationDefinition.\n }\n }\n \"\"\"\n bindings = queryFn(query)\n if len(bindings) != 1:\n raise ApplicationDefinitionError(bindings)\n appDefs = map(lambda b: b['appDef'], bindings)\n trace (\"type of appDefs[0]:%s\" % type(appDefs[0]))\n return appDefs[0]\n\ndef moveToTmp (fileName):\n \"\"\"\n SIDE-EFFECT: moves to /tmp/\n Returns: new pathname of /tmp/, or None if not found\n NOTE: this is the default function to invaldiate a file whose dependencies\n have been made invalid.\n \"\"\"\n splitFileName = fileName.split('/')\n target = os.path.join(\"/tmp/\" , splitFileName[len(splitFileName)-1:][0])\n if os.path.exists(fileName):\n trace(\"moving %s to %s\" % (fileName, target), 0)\n os.rename(fileName, target)\n return target\n\n \ndef removeInvalidDependencies (queryFn, updateFn, \n appDefGraphFn=uniqueApplicationDefinition,\n invalidateFileFn=moveToTmp):\n \"\"\"\n Returns: the set of any graphs cleared when invalidating dependencies\n Side-effect: any files dependent on non-existent files are also removed.\n Side-effect: the app:LoadedFiles graph is dropped if any files were removed.\n Where:\n is a function f(query) -> [, ...]\n will be a SPARQL query into file dependencies.\n is a function f(queryFn) -> \n is the URI of the apprpriate Application Definition Graph.\n := fn(dependency) -> newPath, with side-effect that \n has been moved to , or removed completely if is None.\n \"\"\"\n template = SparqlPrefixes + \"\"\"\n Select Distinct ?dependent ?dependency ?graph\n Where\n {\n Bind(URI(\"$AppDef\") as ?applicationDefinition)\n Graph ?applicationDefinition\n {\n ?dependent app:informedByFile+ ?dependency.\n ?dependent a/rdfs:subClassOf* app:RDFFile.\n Optional\n {\n ?dependent a*/rdfs:subClassOf*/app:graph ?graph\n }\n }\n }\n \"\"\"\n query = string.Template(template).substitute(\n AppDef = appDefGraphFn(queryFn)\n )\n trace (\"query in removeInvalidDependencies:%s\" % query, 0)\n bindings = queryFn(query)\n #[{\"dependency\" : , \"dependent\" : }, ...]\n invalidGraphs = []\n for dependencyMap in bindings:\n dependency = dependencyMap[\"dependency\"]\n dependent = dependencyMap[\"dependent\"]\n if '#' in dependent:\n raise Exception (\"dependents should not include paths with #\")\n dependencyName = urlparse.urlparse(dependency).path\n dependentName = urlparse.urlparse(dependent).path\n if (not os.path.exists(dependencyName) and os.path.exists(dependentName)):\n trace (\"Invalidating dependent %s\" % dependentName, 0)\n invalidateFileFn(dependentName)\n #os.remove(dependentName)\n if 'graph' in dependencyMap:\n invalidGraph = dependencyMap['graph']\n filesWereRemoved = True\n if invalidGraphs:\n for invalidGraph in set(invalidGraphs):\n clearGraph(invalidGraph, updateFn)\n return set(invalidGraphs)\n\n# def makeObsolete(path, queryFn, askFn, updateFn,\n# appDefGraphFn=uniqueApplicationDefinition):\n# \"\"\"\n# Returns: the graph to which was loaded, and graphs of all dependencies\n# Side-effect: deletes and clears its associated graph.\n# Side-effect clears graph of and all graphs associated with dependencies.\n# \"\"\"\n# if os.path.exists(path):\n# os.remove(path)\n# clearedGraphs = []\n# graphBindings = queryFn(string.Template(template).substitute(Path = \"file:\" + path))\n# graphs = map (lambda b: b['graph'], graphBindings)\n# for graph in graphs:\n\n# if fileIsLoaded(path, graph, askFn):\n# template = SparqlPrefixes + \"\"\"\n# Select distinct ?graph\n# Where\n# {\n# Bind(URI(\"$AppDef\") as ?applicationDefinition)\n# Bind(URI(\"$PathUri\") as ?pathUri)\n# Graph ?applicationDefinition\n# {\n# ?path app:graph ?graph.\n# }\n# }\n# \"\"\"\n# clearGraph(graph)\n# clearedGraphs = clearedGraphs + [graph]\n# return set (clearedGraphs).union(removeInvalidDependencies(queryFn, updateFn))\n\n\ndef fileIsLoaded(fileUri, graphUri, base, askFn):\n \"\"\"\n Returns: true if the there is some graph in the model which lists as type app:LoadedFile. This is not done automatically by AppDef, but the user can add this statement as a utility.\n Where:\n is an angleQuoted file URI.\n is a function f(query)->True/False\n \"\"\"\n template = SparqlPrefixes + \"\"\"\n BASE <$Base>\n Ask\n Where\n {\n Bind (URI(\"$GraphUri\") as ?graphUri)\n Bind (URI(\"$File\") as ?fileUri)\n Graph ?graphUri\n {\n ?fileUri a app:LoadedFile.\n }\n }\n \"\"\"\n isLoadedQuery = string.Template(template).substitute(\n Base = base,\n GraphUri = graphUri,\n File = fileUri\n )\n trace (\"isLoaded Query: %s\" % isLoadedQuery, level = 0)\n isLoaded = askFn(isLoadedQuery)\n if isLoaded:\n trace(\"%s is loaded.\" % fileUri, level = 0)\n return isLoaded\n\ndef loadFileUri(fileUri, graphUri, updateFn, askFn, formatCheck, base,\n fileIsLoadedFn=fileIsLoaded):\n \"\"\"\n Side Effect: is loaded per , into or Default Graph if graphUri is None.\n Side Effect: if , asserts app:loadedInto in the app:LoadedFiles graph. if not it asserts a app:LoadedFile.\n May raise errors per \n Where:\n is a URI naming an RDF file\n is None, or a URI naming a graph\n := fn() -> None\n := None or fn(askQuery) -> True/False\n Used only to inform . If provided, it will flag an error if the \n app.fileIsLoaded is not true.\n := None, or fn() -> s.t. is guranteed to point to a file containing valid RDF.\n := f(, , ) -> True if the file is loaded\n \"\"\"\n\n trace(\"fileUri at start of loadFileUri:%s\" % fileUri, level=0)\n fileUri = formatCheck(fileUri) if formatCheck else fileUri\n trace(\"fileUri after format check:%s\" % fileUri, level=0)\n # bad things happen if pathnames starting with / aren't explicitly marked\n # with file:// scheme. Strangely it's better to leave it off for relative \n # pathnames...\n\n if re.match('^(file:)?/', fileUri): \n trace (\"dealing with root path %s\" % fileUri)\n # ensure /// start if path starts at root\n fileUri = re.sub('^(file:)?/+', 'file:///', fileUri)\n #scheme, netloc, path, query, fragment = urlparse.urlsplit(fileUri)\n #fileUri = urlparse.urlunsplit(('file', netloc, path, query, fragment))\n trace (\"About to load: %s into %s\" % (fileUri, graphUri), level= 0)\n if graphUri:\n template = \"\"\"\n BASE <$Base>\n Load <$File> into Graph <$Graph>\n \"\"\"\n loadUpdate = string.Template(template).substitute(\n Base = base,\n File = fileUri.strip('<>'),\n Graph = graphUri.strip('<>'))\n trace(\"load update in loadFileUri:%s\" % loadUpdate, level=0)\n updateFn(loadUpdate)\n if askFn and not fileIsLoadedFn(fileUri, graphUri, base, askFn):\n raise LoadFailure(fileUri)\n\n else:\n updateFn(\"\"\"\n load %s\n \"\"\" % (maybeAngleQuote(fileUri)))\n #todo: do load check on the default graph\n\ndef checkLoadedCacheThenQuery(queryFn):\n \"\"\"\n Returns f(, , ) -> True iff fileIsLoaded() would be true\n (see docs for fileIsLoaded)\n Creates a cache of files loaded at the time of the call. If the file is not found in the cache, fileIsLoaded will be called on the same arguments.\n Note: this is typically provided as an argument to fileIsLoadedFn parameter to acquireFile.\n \"\"\"\n query = SparqlPrefixes + \"\"\"\n Select Distinct ?graph ?file\n Where\n {\n Graph ?graph\n {\n ?file a app:LoadedFile.\n }\n }\n \"\"\"\n def collectLoadedFiles (loadedFiles, nextBinding):\n graph = nextBinding['graph']\n lf = loadedFiles[graph] if graph in loadedFiles else []\n lf = lf + [nextBinding['file']]\n loadedFiles[graph] = lf\n return loadedFiles\n cache = reduce(collectLoadedFiles, queryFn(query), {})\n def response (fileUri, graphUri, askFn):\n if graphUri in cache and fileUri in cache[graphUri]:\n trace(\"%s is cached as loaded\" % fileUri, level=0)\n return True\n return fileIsLoaded(fileUri, graphUri, askFn)\n return response\n\n\ndef acquireFile (queryFn, updateFn, askFn, constructForServiceFn, fileUri, \n base = \"file:///\",\n formatCheck = lambda uri:uri,\n appDefGraphFn = uniqueApplicationDefinition,\n fileIsLoadedFn = fileIsLoaded,\n loadFileUriFn = loadFileUri,\n isInvalidFileFn = isInvalidFile):\n \"\"\"\n Returns: the set of any graphs altered in the process of acquiring the file.\n Side-effect: generates the contents of per specifications in . plus any found to be invalid. Loads . If a dependency is a\n rdfs:subClassOf app:ApplicationExtension, it will be acquired and loaded first.\n Where\n is a function f(query)->{, ...}\n is a function f(updateQuery) -> none, with Side-effect that the service is\n updated\n is a function f(query)->True/False\n is a function f(service) -> f(constructQuery, outputPath)\n -> none, with side effect such that the TTL specified by is written\n to , This function should also appropriately handle cases where the file\n already exists.\n will name a service indicated by the app:fromService predicate.\n is the name of the file to acquire.\n is fn(fileUri) -> fileUri, where is guaranteed to have valid\n RDF.\n is both the name of the file containing the definition and the graph \n that holds the definition.\n := [, ...]\n is a file specified with the property\n app:informedByFile (or an rdfs:subProperty of\n app:InformedByFile).\n := fn(queryFn) -> \n is the URI of the appropriate Application Definition Graph\n := fn(fileUri, graphUri, base, askFn) -> True iff \n is already loaded into \n := is any function with the same I/O as app.loadFileUri. This can allow\n you to move caching upstream.\n is any function with the same I/O as app.isInvalidFile.\n again, for caching.\n\n\n \"\"\"\n trace(\"Acquiring file %s\" % fileUri, level = 0)\n trace(\"working directory:%s\" % os.getcwd(), level = 0)\n trace(\"base:%s\" % base, level = 0) \n fileName = uriToPath(fileUri)\n targetPath = urlparse.urlparse(fileName).path\n appDefG = appDefGraphFn(queryFn),\n trace(\"AppDef Graph in acquire file:'%s'\" % appDefG, level = 0)\n if (os.path.exists(targetPath)):\n trace(fileName + \" already exists.\", level = 0)\n return\n else:\n trace(\"%s not found. Creating...\" % targetPath, level = 0)\n trace(\"about to remove invalid dependencies\\n\")\n alteredGraphs = removeInvalidDependencies(queryFn, updateFn, \n appDefGraphFn = appDefGraphFn)\n #fileUri = formatCheck(fileUri)\n template = SparqlPrefixes + \"\"\"\n Select Distinct ?dependency ?graph ?isPlan\n Where\n {\n Bind (URI(\"$AppDef\") as ?appDef)\n Bind (URI(\"$TargetUri\") as ?target)\n Graph ?appDef\n {\n {\n # Load if informed by file, or descended from a class so informed...\n ?informedByFile rdfs:subPropertyOf* app:informedByFile.\n { \n ?target ?informedByFile ?dependency. \n }\n Union\n {\n ?target a/rdfs:subClassOf* ?targetClass.\n ?targetClass ?informedByFile ?dependency.\n }\n Optional\n { \n ?dependency a*/rdfs:subClassOf*/app:graph ?graph. \n }\n Optional\n {\n ?dependency a/rdfs:subClassOf* app:ApplicationExtension.\n Bind(true as ?isPlan)\n }\n }\n Union\n {\n # Load if informed by file class...\n ?target a*/rdfs:subClassOf*/app:informedByFileClass ?dependentFileClass.\n ?dependency a*/rdfs:subClassOf* ?dependentFileClass.\n Optional\n { ?dependency a*/rdfs:subClassOf*/app:graph ?graph. }\n }\n Union\n {\n # Load if informed by graph class...\n ?target a*/rdfs:subClassOf*/app:informedByGraphClass ?dependentGraphClass.\n ?dependency a*/rdfs:subClassOf*/app:graph ?graph.\n ?graph a*/rdfs:subClassOf* ?dependentGraphClass.\n }\n\n }\n }\n Order by Desc(?isPlan)\n \"\"\"\n query = string.Template(template).substitute(\n AppDef = \"%s\" % appDefG, #kludge\n TargetUri = fileUri\n )\n trace(\"This is the query to derive dependencies:\" + query, level = 0)\n\n bindings = queryFn(query)\n def extractDependency(bindingData):\n #print \"binding data:%s\" % bindingData\n dep = bindingData[\"dependency\"]\n graph = bindingData[\"graph\"] if \"graph\" in bindingData else None\n return (tuple ([dep,graph]))\n\n dependencies = map (extractDependency, bindings)\n trace (\"Dependencies for %s in appDefGraph %s :%s\" % (fileUri, appDefG, dependencies), level = 0)\n #dependencies = [, ...]\n # = (, ?)\n # = the uri of a dependency file\n # = the optional uri of a graph into which the contents should be inserted\n for (dependency, graph) in dependencies:\n if not graph:\n raise NoGraph(dependency)\n #trace (\"WARNING: no graph given for %s\" % dependency, level = 0)\n dependencyName = urlparse.urlparse(dependency).path\n if (not os.path.exists(dependencyName)):\n trace (\"about to recurse on acquireFile: %s\" % dependencyName, level = 0)\n acquireFile(queryFn, updateFn, askFn, constructForServiceFn, \n dependencyName, \n base = base,\n formatCheck = formatCheck,\n appDefGraphFn = appDefGraphFn,\n fileIsLoadedFn = fileIsLoadedFn, \n loadFileUriFn = loadFileUriFn, \n isInvalidFileFn = isInvalidFileFn)\n if (not os.path.exists(dependencyName)):\n trace (\"Could not acquire %s\\n\" % dependencyName, level = 0)\n continue\n template = SparqlPrefixes + \"\"\"\n Ask\n {\n Bind (URI(\"$AppDef\") as ?appDef)\n Bind (URI(\"$Dependency\") as ?dependency)\n graph ?appDef\n {\n ?dependency a/rdfs:subClassOf* app:RDFFile.\n }\n }\n \"\"\"\n rdfFileQuery = string.Template(template).substitute(\n AppDef = \"%s\" % appDefG, #kludge\n Dependency = dependency\n )\n trace (\"rdfFileQuery:%s\" % rdfFileQuery, level = 0)\n trace (\"graph before fileIsLoadedTest:%s\" % graph, level=0)\n if askFn(rdfFileQuery) and not fileIsLoadedFn(dependency, \n graph, \n base, \n askFn):\n #dependency is an RDF file, so load it into graph\n trace (\"about to load dependency %s into graph %s\" \n % (dependency, graph),\n level = 0)\n loadFileUriFn(dependency, graph, updateFn, askFn, formatCheck, base)\n if isInvalidFileFn(dependency, askFn):\n raise InvalidFileError(dependency)\n alteredGraphs = alteredGraphs.union(set([graph]))\n else:\n if not askFn(rdfFileQuery):\n raise NotAnRdfFile(dependency)\n #sys.stderr.write (\"%s is not an RDF file. File exists, but not loading.\\n\" % dependency)\n #else:\n #trace(\"%s is already loaded\" % dependency)\n trace(\"target file in acquireFile: %s \" % fileUri, level = 0)\n template = SparqlPrefixes + \"\"\"\n Select distinct ?script ?service ?query ?queryHeader ?instructions\n Where\n {\n Bind (URI(\"$AppDef\") as ?appDef)\n Bind (URI(\"$TargetFile\") as ?targetFile)\n Graph ?appDef\n {\n ?targetFile a/rdfs:subClassOf* app:DataFile.\n Optional\n {\n ?targetFile a*/rdfs:subClassOf*/app:generatedByScript ?script.\n }\n Optional\n {\n ?targetFile a*/rdfs:subClassOf*/app:fromService ?service;\n a*/rdfs:subClassOf*/app:generatedByQuery ?query.\n Optional\n {\n ?targetFile a*/rdfs:subClassOf*/app:queryHeader ?queryHeaderUri.\n ?queryHeaderUri app:headerString ?queryHeader.\n }\n }\n Optional\n {\n ?targetFile a*/rdfs:subClassOf*/app:instructions ?instructions.\n }\n }\n }\n \"\"\"\n query = string.Template(template).substitute(\n AppDef = \"%s\" % appDefG, #kludge\n TargetFile = fileUri\n )\n trace (\"query for %s:%s\" % (fileUri, query))\n bindings = queryFn(query) or []\n if len(bindings) == 0:\n sys.stderr.write (\"Could not find a specification for %s (is it a app:DataFile?)\\n\" % fileUri)\n sys.stderr.write (query)\n return()\n #print \"Query response: %s\" % queryResponse\n if (len(bindings) > 1):\n raise Exception(\"More than one binding in acquireFile:\\n%s\" % query)\n onlyBinding = bindings[0]\n\n if \"script\" in onlyBinding:\n scriptBinding = onlyBinding['script']\n script = urlparse.urlparse(scriptBinding).path\n trace(\"Script: %s\" % script)\n trace(\"fileName: %s\" % fileName)\n commandLine = \"\"\"%s \"%s\" > \"%s\" \"\"\" % (script, fileName, fileName)\n #script + \" \" + fileName + \" > \" + fileName\n trace(\"executing: %s\" % commandLine, level = 0)\n exitCode = os.system (commandLine)\n if exitCode != 0:\n raise ScriptError(commandLine, exitCode)\n return(None)\n elif \"service\" in onlyBinding:\n service = onlyBinding[\"service\"]\n query = onlyBinding[\"query\"] if \"query\" in onlyBinding else None\n header = onlyBinding[\"queryHeader\"] if \"queryHeader\" in onlyBinding else \"\"\n trace(\"service:%s\" % service)\n if query:\n constructFn = constructForServiceFn(service)\n constructFn(header + query, fileName)\n elif \"instructions\" in onlyBinding:\n instructions = onlyBinding[\"instructions\"]\n sys.stderr.write (\"INSTRUCTIONS FOR %s:\\n\" % fileName)\n sys.stderr.write (instructions)\n else:\n sys.stderr.write (\"Neither script nor service nor instructions found for %s.\\n\" % fileName)\n return alteredGraphs\n\n# def confirmAppDefUri (queryFn, updateFn, appDefUri, formatCheck):\n# \"\"\"\n# Apparently there is an inconsitency in the way URIs are handled on different platforms. This addresses that by loading the appdef file (which should have a '<> a app:ApplicationDefiniton' clause) into a 'scratch' graph, and asks the file what it calls itself.\n# \"\"\"\n\n# appDefUri = formatCheck(appDefUri)\n# updateFn(SparqlPrefixes + \"\"\"\n# load %s into graph app:Scratch\n# \"\"\" % (maybeAngleQuote(appDefUri)))\n\n# whoAreYou = queryFn (SparqlPrefixes + \"\"\"\n# Select ?appDef\n# Where\n# {\n# Graph app:Scratch\n# {\n# ?appDef a app:ApplicationDefinition.\n# }\n# }\n# \"\"\")\n# updateFn(SparqlPrefixes + \"Drop Graph app:Scratch\")\n# assert (len (whoAreYou) == 1)\n# return whoAreYou[0]['appDef']\n\ndef ontologyUri():\n variable = 'RDF_APP_DEF_ONTOLOGY_URI'\n localOntology = os.environ[variable] if variable in os.environ else None\n return (localOntology or \"\")\n\ndef loadAppDefOntology(appDefGraphUri, updateFn, base = \"file:///\"):\n \"\"\"\n Side effect: loads the core appDef ontology into \n Where\n typically shares the name of the main application definition file.\n \"\"\"\n loadFileUri(ontologyUri(), appDefGraphUri, updateFn, None, None, base)\n\ndef reset (queryFn, updateFn, appDefUri, askFn, formatCheck, \n base = 'file:///',\n appDefGraphFn=uniqueApplicationDefinition):\n \"\"\"\n Side-effect: Drops all named graphs and reloads and \n Where:\n := f() -> [, ....]\n := f() -> None\n is the URI of an app def.\n := f(appDefUri) -> or exception\n := a uri guaranteed to contain valid data (hopefully == )\n \"\"\"\n trace (\"starting reset of %s\" % appDefUri, level = 0)\n\n appDefUri = appDefGraphFn(queryFn)\n\n updateFn (\"Drop All\")\n\n # '<>' is not rendered consistently from one platform to the next, so\n # we need to ask the appdef what it calls itself....\n\n trace(\"appDefUri graph and file in reset:%s\" % appDefUri)\n loadAppDefOntology(appDefUri, updateFn)\n\n loadFileUri(appDefUri, appDefUri, updateFn, askFn, formatCheck, base)\n\n trace(\"exiting reset %s\" % ontologyUri, level = 0)\n\n\ndef raiseNoSpecificationError(parameterName):\n raise NoSpecification(parameterName)\n\ndef standardParameterPropertyPath(prefix, parameterName):\n \"\"\"\n Returns the default URI for parameter predicates in getApplicationParametersFn.\n \"\"\"\n template = \"a*/rdfs:subClassOf*/$Prefix:$ParameterName\"\n return string.Template(template).substitute(\n Prefix = prefix,\n ParameterName = parameterName)\n\ndef getApplicationParametersFn (sparqlPrefixes = SparqlPrefixes, \n namespacePrefix=\"app\"):\n \"\"\"\n Returns \n Where\n is the 'Prefix' preamble for a sparql query which will retrieve\n the parameter.\n is a string matching some prefix in \n f(appDefFileUri, queryFn, parameterName, appDefGraphFn, ifNotFoundFn)\n -> ([, ...] or output of \n := f() -> [, ...]\n is a predicate in the app: namespace, whose subject is the base URI of the application, and whose object is \n is the uri of the appDef file\n := f(queryFn) -> \n := the URI of the appropriate Application Definition Graph\n := { : }\n will be a SPARQL query against the appdef graph.\n is a URI for the predicate to use in matching the parameter\n (default returns a*/rdfs:subClassOf*/:).\n := f(parameterName) -> ?, called if the parameter is not found.\n \"\"\"\n def getApplicationParameters (appDefFileUri, queryFn, parameterName, \n appDefGraphFn = uniqueApplicationDefinition,\n parameterPredicate = standardParameterPropertyPath,\n ifNotFoundFn = raiseNoSpecificationError):\n\n template = sparqlPrefixes + \"\"\"\n Select Distinct ?$ParameterName\n Where\n {\n Bind (URI(\"$AppDefGraph\") as ?appDefGraph)\n Bind (URI(\"$AppDef\") as ?appDef)\n graph ?appDefGraph\n {\n ?appDef $ParameterPredicate ?$ParameterName.\n }\n }\n \"\"\"\n query = string.Template(template).substitute(\n AppDefGraph = appDefGraphFn(queryFn),\n AppDef = appDefFileUri,\n ParameterName = parameterName,\n Prefix = namespacePrefix,\n ParameterPredicate = parameterPredicate(namespacePrefix, parameterName)\n )\n trace(\"get app parameters query:\\n%s\" % query, level = 0)\n bindings = queryFn(query)\n if not bindings:\n return ifNotFoundFn(parameterName)\n return map(lambda binding: binding[parameterName], bindings)\n return getApplicationParameters\n\ngetApplicationParameters = getApplicationParametersFn()\n\n\ndef getApplicationParameter (appDefFileUri, queryFn, parameterName, ifNotFoundFn = raiseNoSpecificationError):\n values = getApplicationParameters (appDefFileUri, queryFn, parameterName, \n ifNotFoundFn = raiseNoSpecificationError)\n if values and len(values) > 0:\n return values[0]\n\n\ndef getParameterFn (sparqlPrefixes = SparqlPrefixes, namespacePrefix=\"app\"):\n \"\"\"Returns function that returns a parameter assigned to the target file of a script in the application definition. Returns a list if there is more than one binding. This allows you to customize the getParameter function for namespaces other than app:\"\"\"\n def getParameter(appDefFileUri, queryFn, targetFile, parameterName, \n appDefGraphFn = uniqueApplicationDefinition,\n ifNotFoundFn = raiseNoSpecificationError):\n template = sparqlPrefixes + \"\"\"\n Select distinct ?$ParameterName\n Where\n {\n Bind (URI(\"$AppDefGraph\") as ?appDefGraph)\n Bind (URI(\"$AppDefFileUri\") as ?appDef)\n Bind (URI(\"$TargetFile\") as ?targetFileUri)\n graph ?appDefGraph\n {\n ?appDef a/rdfs:subClassOf* app:ApplicationDefinition.\n ?targetFileUri a*/rdfs:subClassOf* ?prototype.\n ?prototype $Prefix:$ParameterName ?$ParameterName.\n }\n }\n \"\"\" \n query = string.Template(template).substitute(\n ParameterName = parameterName,\n AppDefGraph = appDefGraphFn(queryFn),\n AppDefFileUri = appDefFileUri,\n TargetFile = targetFile,\n Prefix = namespacePrefix,\n )\n trace (\"getParameter query:%s\" % query, 0)\n bindings = queryFn(query)\n if not bindings:\n return ifNotFoundFn(parameterName)\n else:\n return map (lambda binding: binding[parameterName], bindings)\n return getParameter\n\n# def old_getParameterFn (sparqlPrefixes = SparqlPrefixes, namespacePrefix=\"app\"):\n# \"\"\"Returns function that returns a parameter assigned to the target file of a script in the application definition. Returns a list if there is more than one binding. This allows you to customize the getParameter function for namespaces other than app:\"\"\"\n# def getParameter(queryFn, targetFile, parameterName, ifNotFoundFn = raiseNoSpecificationError):\n\n# query = sparqlPrefixes + \"\"\"\n# Select distinct ?%s\n# Where\n# {\n# graph ?appDef\n# {\n# ?appDef a app:ApplicationDefinition.\n# %s:%s ?%s\n# }\n# }\n# \"\"\" % (parameterName, targetFile, namespacePrefix, parameterName, parameterName)\n# bindings = queryFn(query)\n# if not bindings:\n# return ifNotFoundFn(parameterName)\n# else:\n# return map (lambda binding: binding[parameterName], bindings)\n# return getParameter\n#the default function\n\ngetParameter=getParameterFn()\n\ndef readAppendixes (appDefFileUri, queryFn, updateFn, askFn, formatCheck, base,\n appDefGraphFn = uniqueApplicationDefinition,\n loadFileUriFn = loadFileUri):\n \"\"\"\n Side-effect: Reads ttl files all appendixes of the definition (if any).\n Where:\n is the URI of an app def file.\n := f(queryFn) -> \n is the named graph which holds the application definition triples\n := f(query) -> [, ....]\n := f(updateQuery) -> None\n := f(askQuery) -> True/False\n := f(appDefUri) -> or exception\n := a uri guaranteed to contain valid data (normally == )\n := f(fileUri, appDefGraphUri, updateFn, askeFn, formatCheck) -> None\n whose signature is identical to the default rdfAppDef.loadFileUri()\n Note: to use, put <> :appendix into your appDef, then add ttl files to that directory as needed.\n \"\"\"\n appDefGraphUri = appDefGraphFn(queryFn)\n trace (\"Reading appendexes for appDefGraphUri:%s\" % appDefGraphUri, level = 0)\n appendixUris = getApplicationParameters(appDefFileUri, queryFn, \"appendix\", \n appDefGraphFn = appDefGraphFn,\n ifNotFoundFn=lambda x: None) or []\n for appendixUri in appendixUris:\n trace(\"appendixUri:%s\" % appendixUri, 0)\n trace (\"base:%s\" % base, 0)\n appendixPath = urlparse.urlparse(appendixUri).path\n if os.path.exists:\n assert os.path.isdir(appendixPath)\n for root, dirs, files in os.walk(appendixPath):\n for file in files:\n if not root == appendixPath:\n continue\n if re.match(\"^.+.ttl$\", file): #file.endswith(\".ttl\"):\n #uri = urlparse.urljoin('file:', urllib.pathname2url(appendixPath + file))\n uri = urlparse.urljoin('file:', urllib.pathname2url(root + file))\n loadFileUriFn(uri, appDefGraphUri, updateFn, askFn, formatCheck, base)\n","sub_path":"Python/rdfAppDef.py","file_name":"rdfAppDef.py","file_ext":"py","file_size_in_byte":35690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"431137199","text":"import tensorflow as tf\nimport numpy as np \nimport math\nimport matplotlib.pyplot as plt\nimport tensorflow.examples.tutorials.mnist.input_data as input_data\n\n# load MNIST data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\nmean_img = np.mean(mnist.train.images, axis=0)\n\n# Define a session to use across multiple computational graphs\nsess = tf.Session()\n\n# Fetch a subset of the data in order to counter the limitation of compute resources\ndef get_train_data(size):\n\ttrain_data = mnist.train.images[:size,:]\n\tprint(\"train data description: \" + str(train_data.shape))\n\ttrain_labels = mnist.train.labels[:size,:]\n\tprint(\"train labels description: \" + str(train_labels.shape))\t\n\treturn train_data, train_labels\n\ndef get_test_data(size):\n\ttest_data = mnist.test.images[:size,:]\n\tprint(\"test data description: \" + str(test_data.shape))\n\ttest_labels = mnist.test.labels[:size,:]\n\tprint(\"test labels description: \" + str(test_data.shape))\t\n\treturn test_data, test_labels\n\t\ndef get_next_batch(size):\n\t# imitating the method described in mnist object to load a random data in batch\n\t# count the number of rows in the training data\n\t# train_data encapsulates both data [0] and labels [1]\n\tindex = np.arange(0, train_data[0].shape[0])\n\tnp.random.shuffle(index)\n\tindex = index[:size]\n\tdata_shuffled = np.asmatrix([train_data[0][i] for i in index])\n\tlabels_shuffled = np.asmatrix([train_data[1][i] for i in index])\n\treturn data_shuffled, labels_shuffled\n\ndef my_autoencoder():\n\t'''\n\tInput: \n\tA list whose:\n\tFirst element denotes the number of nodes in the input layer\n\tSecond and subsequent elements denote number of nodes in the hidden layers \n\n\tOutput:\n\tx : placeholder for input data\n\ty : latent representation at the highest abstraction\n\tz : recontruction of pure input from the corrupted one\n\tloss : list of losses across multiple epoches\n\t'''\n\n\t# Input to the autoencoder\n\tdim_of_layer=[784, 392, 196]\n\tx = tf.placeholder(tf.float32, shape=[None, dim_of_layer[0]])\n\tinput_data = x\n\n\t# Encoder part: Iterate through all the output layers (except the first layer which is input)\n\tencoder = []\n\tfor layer_index, layer_dim in enumerate(dim_of_layer[1:]):\n\t\tinput_dim = int(input_data.get_shape()[1])\n\t\toutput_dim = layer_dim\n\t\tW = tf.Variable(tf.random_uniform([input_dim, output_dim],-1.0 / math.sqrt(input_dim),1.0 / math.sqrt(input_dim)))\n\n\t\t#W = tf.Variable(tf.random_uniform([input_dim, output_dim]))\n\t\tb = tf.Variable(tf.zeros([output_dim]))\n\t\tencoder.append([W])\n\t\tactivation = tf.nn.tanh(tf.matmul(input_data, W) + b)\n\t\tinput_data = activation\n\n\t# Latent representation at the highest abstraction\n\ty = input_data\n\n\t# Decoder part: Iterate through all the output layers in REVERSE (except the first layer which is input)\n\tencoder.reverse()\n\tfor layer_index, layer_dim in enumerate(dim_of_layer[::-1][1:]):\n\t\tinput_dim = int(input_data.get_shape()[1])\n\t\toutput_dim = layer_dim\n\t\tW = tf.transpose(encoder[layer_index])\n\t\tW = tf.reshape(W, [input_dim, output_dim])\n\t\tb = tf.Variable(tf.zeros([output_dim]))\n\t\tactivation = tf.nn.tanh(tf.matmul(input_data, W) + b)\n\t\tinput_data = activation\n\tz = input_data\n\n\t# RMS loss function\n\tloss = tf.sqrt(tf.reduce_mean(tf.square(z - x)))\n\n\t# Optimizer parameters\n\tlearning_rate = 0.001\n\toptimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\t# Initialize all the variables\n\tsess.run(tf.global_variables_initializer())\n\n\t# Fit all training data \n\tbatch_size = 20\n\tn_epochs = 2000\n\tloss_per_epoch = []\n\tfor epoch_i in range(n_epochs):\n\t\tfor batch_i in range(train_data[0].shape[0] // batch_size):\n\t\t\tbatch_input, _ = get_next_batch(batch_size)\n\t\t\t# introduces extra '1' dimension. Hence squeezing it to maintain dim consistency\n\t\t\ttrain = np.squeeze(np.array([img - mean_img for img in batch_input]))\n\t\t\tsess.run(optimizer, feed_dict={x:train, noise_factor:[1.0]})\n\t\tloss_per_epoch.append(sess.run(loss, feed_dict={x:train}))\n\t\tprint(epoch_i, sess.run(loss, feed_dict={x:train}))\n\n\n\t#return(input, most abstracted latent representation, reconstructed input, noise_factor, loss)\n\treturn {'x': x, 'y': y, 'z': z, 'loss': loss_per_epoch}\n\ndef reconstruct_mnist():\n\t# Plot reconstructed input for MNIST samples\n\tmnist_samples = 10\n\ttestdata, _ = mnist.test.next_batch(mnist_samples)\n\ttestdata_norm = np.array([img - mean_img for img in testdata])\n\trecon = sess.run(ae['z'], feed_dict={ae['x']:testdata_norm})\n\tfig, axis = plt.subplots(2, mnist_samples, figsize=(10, 4))\n\tfor example_i in range(mnist_samples):\n\t\taxis[0][example_i].imshow(np.reshape(testdata[example_i, :], (28, 28)))\n\t\taxis[1][example_i].imshow(np.reshape([recon[example_i, :] + mean_img], (28, 28)))\n\tfig.show()\n\tplt.draw()\n\tplt.waitforbuttonpress()\n\n\nif __name__ == '__main__':\n\n\tglobal train_data;\n\tglobal test_data;\n\ttrain_data = get_train_data(size=5000)\n\ttest_data = get_test_data(size=5000)\n\n\tae = my_autoencoder()\n\n\t# Get loss curve\n\tprint(\"printing loss curve:\")\n\tplt.plot(ae['loss'])\n\tplt.xlabel(\"number of iterations\")\n\tplt.ylabel(\"loss\")\n\tplt.show()\n\n\t# Get sample recontructions\n\treconstruct_mnist()\n\n","sub_path":"autoencoder_tied.py","file_name":"autoencoder_tied.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"155967741","text":"from datetime import date, datetime\nfrom typing import Optional\n\nimport structlog\n\nfrom dateutil.relativedelta import relativedelta\nfrom django.db import connections\n\nfrom psqlextra.models import PostgresPartitionedModel\nfrom psqlextra.types import PostgresPartitioningMethod, StrEnum\n\nLOGGER = structlog.get_logger(__name__)\n\n\nclass PostgresAutoPartitioningError(RuntimeError):\n \"\"\"Raised when a fatal error is encountered during automatic\n partitioning.\"\"\"\n\n\nclass PostgresAutoPartitioningIntervalUnit(StrEnum):\n \"\"\"Interval units that can auto partitioned with.\"\"\"\n\n MONTH = \"month\"\n WEEK = \"week\"\n\n\ndef postgres_auto_partition(\n model: PostgresPartitionedModel,\n count: int,\n interval_unit: PostgresAutoPartitioningIntervalUnit,\n interval: int,\n start_from: Optional[date] = None,\n using=\"default\",\n):\n \"\"\"Pre-create N partitions ahead of time according to the specified\n interval unit and interval.\n\n Arguments:\n model:\n The model to auto partition for.\n\n count:\n The amount of partitions for the specified interval\n to create ahead (from the current date).\n\n interval_unit:\n Date/time unit to partition by.\n\n interval:\n Amount of specified units to partition by.\n\n start_from:\n Skip creating any partitions that would\n contain data _before_ this date.\n\n Use this when switching partitioning\n interval. Useful when you've already partitioned\n ahead using the original interval and want\n to avoid creating overlapping partitioninig.\n Set this to the _end date_ for the\n last partition that was created.\n\n If the specified start date is in the past,\n it is ignored.\n\n using:\n Database connection name to use.\n\n Example:\n Partition by month, 2 months ahead:\n count=2, interval_unit=MONTH, interval=1\n\n Partition by week, 3 weeks ahead:\n count=3, interval_unit=WEEK, interval=1\n\n Partion by 2 weeks, 4 weeks ahead\n count=2, interval_unit=WEEK, interval=2\n \"\"\"\n\n connection = connections[using]\n\n with connection.cursor() as cursor:\n table = connection.introspection.get_partitioned_table(\n cursor, model._meta.db_table\n )\n\n if not table:\n raise PostgresAutoPartitioningError(\n f\"Model {model.__name__}, with table {model._meta.db_table} \"\n \"does not exists in the database. Did you run \"\n \"`python manage.py migrate`?\"\n )\n\n if table.method != PostgresPartitioningMethod.RANGE:\n raise PostgresAutoPartitioningError(\n f\"Table {table.name} is not partitioned by a range. Auto partitioning \"\n \"only supports partitioning by range.\"\n )\n\n schema_editor = connection.schema_editor()\n\n start_datetime = datetime.now()\n if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:\n start_datetime = start_datetime.replace(day=1)\n elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:\n start_datetime = start_datetime - relativedelta(\n days=start_datetime.weekday()\n )\n\n for _ in range(count):\n if interval_unit == PostgresAutoPartitioningIntervalUnit.MONTH:\n end_datetime = start_datetime + relativedelta(months=+interval)\n partition_name = start_datetime.strftime(\"%Y_%b\").lower()\n elif interval_unit == PostgresAutoPartitioningIntervalUnit.WEEK:\n end_datetime = start_datetime + relativedelta(weeks=+interval)\n partition_name = start_datetime.strftime(\"%Y_week_%W\").lower()\n\n from_values = start_datetime.strftime(\"%Y-%m-%d\")\n to_values = end_datetime.strftime(\"%Y-%m-%d\")\n\n logger = LOGGER.bind(\n model_name=model.__name__,\n name=partition_name,\n from_values=from_values,\n to_values=to_values,\n )\n\n if start_from and start_datetime.date() < start_from:\n start_datetime = end_datetime\n logger.info(\n \"Skipping creation of partition, before specified start date\",\n start_from=start_from,\n )\n continue\n\n partition_table_name = schema_editor.create_partition_table_name(\n model, partition_name\n )\n\n existing_partition = next(\n (\n table_partition\n for table_partition in table.partitions\n if table_partition.name == partition_table_name\n ),\n None,\n )\n\n if existing_partition:\n start_datetime = end_datetime\n logger.info(\"Skipping creation of partition, already exists\")\n continue\n\n schema_editor.add_range_partition(\n model=model,\n name=partition_name,\n from_values=from_values,\n to_values=to_values,\n )\n\n logger.info(\"Created partition\")\n\n start_datetime = end_datetime\n","sub_path":"psqlextra/auto_partition.py","file_name":"auto_partition.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"259866785","text":"import numpy as np, pandas as pd\r\nfrom textblob import TextBlob\r\nimport pickle\r\nfrom scipy import spatial\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport spacy\r\nen_nlp = spacy.load('en_core_web_sm')\r\nfrom nltk.stem.lancaster import LancasterStemmer\r\nst = LancasterStemmer()\r\n\r\ntest = pd.read_excel(r'..\\Data\\Test_QA_Comprehension_Grade5.xlsx')\r\n\r\nwith open(r'..\\Data\\dict_test5_embeddings1.pickle', \"rb\") as f:\r\n d1 = pickle.load(f)\r\n \r\nwith open(r'..\\Data\\dict_test5_embeddings2.pickle', \"rb\") as f:\r\n d2 = pickle.load(f)\r\n \r\ndict_emb_test = dict(d1)\r\ndict_emb_test.update(d2)\r\n\r\nlen(dict_emb_test)\r\n\r\ndel d1, d2\r\n\r\n#train.dropna(inplace=True)\r\n\r\ntest.shape\r\n\r\nimport re\r\ndef process_data(test):\r\n \r\n print(\"step 1\")\r\n test['sentences'] = test['context'].apply(lambda x: [item.raw for item in TextBlob(x).sentences])\r\n test['original_sentences'] = test['sentences']\r\n \r\n i=0\r\n for index, row in test.iterrows():\r\n test1=pd.DataFrame()\r\n test1['sentences']=row['sentences']\r\n \r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'\\s+', ' ', x[\"sentences\"]),axis=1)\r\n \r\n test1['sentences'] = test1.apply(lambda x: x['sentences'].lower(), axis=1)\r\n\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'(\\d+/\\d+/\\d+)|(\\d+\\.\\d+\\.\\d+)|(\\d+\\-\\d+\\-\\d+)|(\\d+\\/\\d+)|(\\d+th)|(\\d+nd)|(\\d+rd)|(\\d+st)', ' DATE ', x[\"sentences\"]),axis=1)\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'\\b(mon|tue|wed|thurs|fri|sat|sun|monday|tuesday|wednesday|thursday|friday|saturday|sunday|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|january|february|march|april|may|june|july|august|september|october|november|december)\\b',' DATE ', x[\"sentences\"]),axis=1)\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'(\\$\\d+\\,\\d+\\.\\d+)|(\\$\\d+\\,\\d+)|(\\$\\d+\\.\\d+)|(\\$\\d+)|(\\$\\ d+\\,\\d+\\.\\d+)|(\\$ \\d+\\,\\d+)|(\\$ \\d+\\.\\d+)|(\\$ \\d+)|(\\d+\\,\\d+\\.\\d+)|(\\d+\\,\\d+)|(\\d+\\.\\d+)', ' AMOUNT ', x[\"sentences\"]),axis=1)\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'(#\\d+)|(# \\d+)|(\\d+)', ' NUMBER ', x[\"sentences\"]),axis=1)\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'(\\d+\\.\\d+)|(\\d+)', ' AMOUNT ', x[\"sentences\"]),axis=1)\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'[^\\s]+@[^\\s]+\\.[^\\s]+',' MAIL ', x[\"sentences\"]),axis=1) \r\n \r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'\\s+', ' ', x[\"sentences\"]),axis=1)\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'(\\()|(\\))', '', x[\"sentences\"]),axis=1)\r\n\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'[^a-zA-Z]', ' ', x[\"sentences\"]),axis=1)\r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'\\s+', ' ', x[\"sentences\"]),axis=1)\r\n \r\n test1['sentences'] = test1.apply(lambda x:re.sub(r'\\.', '', x[\"sentences\"]),axis=1)\r\n\r\n test1['sentences'] = test1.apply(lambda x: x['sentences'].lower(), axis=1)\r\n \r\n test1=test1['sentences']\r\n \r\n test.loc[i,'sentences']=list(test1)\r\n i=i+1\r\n\r\n \r\n #print(\"step 2\")\r\n #train[\"target\"] = train.apply(get_target, axis = 1)\r\n \r\n print(\"step 3\")\r\n test['sent_emb'] = test['sentences'].apply(lambda x: [dict_emb_test[item][0] if item in\\\r\n dict_emb_test else np.zeros(4096) for item in x])\r\n print(\"step 4\")\r\n test['quest_emb'] = test['questions'].apply(lambda x: dict_emb_test[x] if x in dict_emb_test else np.zeros(4096) )\r\n \r\n return test\r\n\r\ntest = process_data(test)\r\n\r\nprint(test.head(5))\r\n\r\n#Predicted Cosine & Euclidean Index\r\ndef cosine_sim(x):\r\n li = []\r\n for item in x[\"sent_emb\"]:\r\n li.append(spatial.distance.cosine(item,x[\"quest_emb\"][0]))\r\n return li\r\n\r\ndef pred_idx(distances):\r\n return np.argmin(distances)\r\n\r\ndef predictions(train):\r\n \r\n train[\"cosine_sim\"] = train.apply(cosine_sim, axis = 1)\r\n train[\"diff\"] = (train[\"quest_emb\"] - train[\"sent_emb\"])**2\r\n train[\"euclidean_dis\"] = train[\"diff\"].apply(lambda x: list(np.sum(x, axis = 1)))\r\n del train[\"diff\"]\r\n \r\n print(\"cosine start\")\r\n \r\n train[\"pred_idx_cos\"] = train[\"cosine_sim\"].apply(lambda x: pred_idx(x))\r\n train[\"pred_idx_euc\"] = train[\"euclidean_dis\"].apply(lambda x: pred_idx(x))\r\n \r\n return train\r\n\r\npredicted = predictions(test)\r\n\r\npredicted[\"cosine_sim\"][0]\r\n\r\npredicted[\"euclidean_dis\"][0]\r\n\r\nques=[]\r\nanswer_cos=[]\r\nanswer_euc=[]\r\ncosine=[]\r\neuc=[]\r\nfor i in range(len(predicted)):\r\n answer_cos.append(predicted.loc[i,'original_sentences'][predicted.loc[i,'pred_idx_cos']])\r\n answer_euc.append(predicted.loc[i,'original_sentences'][predicted.loc[i,'pred_idx_euc']])\r\n ques.append(predicted.loc[i,'questions'])\r\n cosine.append(predicted.loc[i,'cosine_sim'][predicted.loc[i,'pred_idx_cos']])\r\n euc.append(predicted.loc[i,'euclidean_dis'][predicted.loc[i,'pred_idx_euc']])\r\n \r\ndf=pd.DataFrame()\r\ndf['Question']=ques\r\ndf['Answer_Cos']=answer_cos\r\ndf['Cosine Sim']=cosine\r\ndf['Answer_Euc']=answer_euc\r\ndf['Euclidean Dis']=euc\r\n\r\ndf.to_csv(r'..\\Results\\Grade5_Answers.csv')","sub_path":"Code/unsupervised_processed - level 1.py","file_name":"unsupervised_processed - level 1.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"240835513","text":"from src.utils.config import config\nimport json\n# import uuid\nimport requests\n\n\n_NAMESPACE = \"WS\"\n_VER_NAMESPACE = \"WSVER\"\n_SAMPLE_NAMESPACE = \"SMP\"\n# versioned and non-versioned index have same version\n_SAMPLE_SET_INDEX_VERSION = 1\n_SAMPLE_SET_INDEX_NAME = 'sample_set_' + str(_SAMPLE_SET_INDEX_VERSION)\n_VER_SAMPLE_SET_INDEX_NAME = 'sample_set_version_' + str(_SAMPLE_SET_INDEX_VERSION)\n# versioned and non-versioned index have same version\n_SAMPLE_INDEX_VERSION = 1\n_SAMPLE_INDEX_NAME = 'sample_' + str(_SAMPLE_INDEX_VERSION)\n# _VER_SAMPLE_INDEX_NAME = 'sample_version_' + str(_SAMPLE_INDEX_VERSION)\n\n\ndef _get_sample(sample_info):\n \"\"\" Get sample from SampleService\n sample_info - dict containing 'id' and 'version' of a sample\n \"\"\"\n headers = {\"Authorization\": config()['ws_token']}\n params = {\n \"id\": sample_info['id']\n }\n if sample_info.get('version'):\n params['version'] = sample_info['version']\n payload = {\n \"method\": \"SampleService.get_sample\",\n \"id\": \"\", # str(uuid.uuid4()),\n \"params\": [params],\n \"version\": \"1.1\"\n }\n resp = requests.post(url=config()['sample_service_url'], headers=headers, data=json.dumps(payload))\n if not resp.ok:\n raise RuntimeError(f\"Returned from sample service with status {resp.status_code} - {resp.text}\")\n resp_json = resp.json()\n if resp_json.get('error'):\n raise RuntimeError(f\"Error from SampleService - {resp_json['error']}\")\n sample = resp_json['result'][0]\n return sample\n\n\ndef _flatten_meta(meta, prefix=None):\n \"\"\" Flattens metadata fields in a Sample object. Fields are concatenated into a\n single string field to save into an Elasticsearch index\n meta - Sample Metadata to be flattened\n prefix - (optional) prefix for the metadata values. default=None\n \"\"\"\n new_meta = {}\n for key in meta:\n if prefix:\n val = prefix + \":\"\n else:\n val = \"\"\n if \"value\" in meta[key]:\n val += str(meta[key]['value'])\n if \"units\" in meta[key]:\n val += \";\" + str(meta[key]['units'])\n new_meta[key] = val\n return new_meta\n\n\ndef _combine_meta(meta, flattened_meta, idx):\n \"\"\" Combine newly flattened metadata with existing metadata. This Function is designed to keep the indexing\n of the different metadata fields consistent for each node within the sample node tree s.t. all the\n fields in index (idx) 0 will be from item 0 in the node tree. Empty string (\"\") entries are Empty and\n added simply so that the indexing of all fields line up.\n meta - existing metadata.\n flattened_meta - newly flattened metadata.\n idx - current index of ndoe_tree.\n \"\"\"\n for key in flattened_meta:\n if key in meta:\n meta[key] += [\"\" for _ in range(idx - len(meta[key]))] + [flattened_meta[key]]\n else:\n meta[key] = [\"\" for _ in range(idx)] + [flattened_meta[key]]\n return meta\n\n\ndef index_sample_set(obj_data, ws_info, obj_data_v1):\n \"\"\"Indexer for KBaseSets.SampleSet object type\"\"\"\n info = obj_data['info']\n if not obj_data.get('data'):\n raise Exception(\"no data in object\")\n data = obj_data['data']\n workspace_id = info[6]\n object_id = info[0]\n version = info[4]\n sample_set_id = f\"{_NAMESPACE}::{workspace_id}:{object_id}\"\n ver_sample_set_id = f\"{_VER_NAMESPACE}::{workspace_id}:{object_id}:{version}\"\n\n sample_set_index = {\n \"_action\": \"index\",\n \"doc\": {\n \"description\": data[\"description\"],\n \"sample_ids\": [s['id'] for s in data['samples']],\n \"sample_names\": [s['name'] for s in data['samples']],\n \"sample_versions\": [s['version'] for s in data['samples']]\n },\n \"index\": _SAMPLE_SET_INDEX_NAME,\n \"id\": sample_set_id\n }\n yield sample_set_index\n ver_sample_set_index = dict(sample_set_index)\n ver_sample_set_index['index'] = _VER_SAMPLE_SET_INDEX_NAME\n ver_sample_set_index['id'] = ver_sample_set_id\n yield ver_sample_set_index\n\n for samp in data[\"samples\"]:\n # query the sample service for sample\n sample = _get_sample(samp)\n sample_id = f\"{_SAMPLE_NAMESPACE}::{sample['id']}:{sample['version']}\"\n # not sure on how we need to handle more than 1 node.\n if len(sample['node_tree']) == 1:\n meta_controlled = _flatten_meta(\n sample['node_tree'][0]['meta_controlled']\n )\n meta_user = _flatten_meta(\n sample['node_tree'][0]['meta_user']\n )\n meta_controlled['node_id'] = sample['node_tree'][0]['id']\n else:\n meta_controlled, meta_user = {}, {}\n for idx, node in enumerate(sample['node_tree']):\n meta_controlled = _combine_meta(\n meta_controlled,\n _flatten_meta(\n node['meta_controlled']\n ),\n idx\n )\n meta_user = _combine_meta(\n meta_user,\n _flatten_meta(\n node['meta_user']\n ),\n idx\n )\n meta_controlled['node_id'] = node['id']\n\n sample_index = {\n \"_action\": \"index\",\n \"doc\": {\n \"save_date\": sample['save_date'],\n \"sample_version\": sample['version'],\n \"name\": sample['name'],\n \"parent_id\": sample_set_id,\n **meta_user,\n **meta_controlled\n },\n \"index\": _SAMPLE_INDEX_NAME,\n \"id\": sample_id\n }\n yield sample_index\n","sub_path":"src/index_runner/es_indexers/sample_set.py","file_name":"sample_set.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"10333880","text":"\"\"\"\nAuthor: Konstantin (k0nze) Lübeck\nLicense: BSD 3-Clause License\nCopyright (c) 2021 Konstantin (k0nze) Lübeck\n\"\"\"\n\ntry:\n import Tkinter as Tk\n from Tkinter import ttk\nexcept ModuleNotFoundError:\n import tkinter as Tk\n from tkinter import ttk\n\nclass IncludedSoftwareDialog(Tk.Toplevel):\n def __init__(self, master):\n Tk.Toplevel.__init__(self, master)\n\n self.minsize(190, 100)\n\n self.resizable(False, False)\n\n self.title(\"Included Software\")\n\n wrapper_frame = ttk.Frame(self)\n\n # Version\n python_label = ttk.Label(wrapper_frame, text=\"Python 3.9.6 - PSF License\")\n python_label.grid(row=0, column=0, columnspan=2)\n\n ffmpeg_label = ttk.Label(wrapper_frame, text=\"FFmpeg n4.4 - GPLv3 License\")\n ffmpeg_label.grid(row=1, column=0, columnspan=2)\n\n pillow_label = ttk.Label(wrapper_frame, text=\"Pillow 8.3.1 - HPND License\")\n pillow_label.grid(row=2, column=0, columnspan=2)\n\n azure_ttk_theme = ttk.Label(wrapper_frame, text=\"Azure TTK Theme 1.4.1 - LGPL v2.1\")\n azure_ttk_theme.grid(row=3, column=0, columnspan=2)\n\n # Close button\n close_button = ttk.Button(wrapper_frame, text=\"Close\", command=self.on_close).grid(row=4, column=0, columnspan=2, pady=10)\n\n wrapper_frame.grid(row=0, column=0, padx=10)\n\n #self.update()\n #print(self.winfo_height())\n #print(self.winfo_width())\n\n def on_close(self):\n self.destroy()\n\n\n","sub_path":"included_software_dialog.py","file_name":"included_software_dialog.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"6547164","text":"from datetime import datetime, date, time\nfrom datetime import timedelta, datetime, tzinfo\nclass GMT1(tzinfo):\n def utcoffset(self, dt):\n return timedelta(hours=1) + self.dst(dt)\n def dst(self, dt):\n # DST starts last Sunday in March\n d = datetime(dt.year, 4, 1) # ends last Sunday in October\n self.dston = d - timedelta(days=d.weekday() + 1)\n d = datetime(dt.year, 11, 1)\n self.dstoff = d - timedelta(days=d.weekday() + 1)\n if self.dston <= dt.replace(tzinfo=None) < self.dstoff:\n return timedelta(hours=1)\n else:\n return timedelta(0)\n def tzname(self,dt):\n return \"GMT +1\"\n \nclass GMT2(tzinfo):\n def utcoffset(self, dt):\n return timedelta(hours=2) + self.dst(dt)\n def dst(self, dt):\n d = datetime(dt.year, 4, 1)\n self.dston = d - timedelta(days=d.weekday() + 1)\n d = datetime(dt.year, 11, 1)\n self.dstoff = d - timedelta(days=d.weekday() + 1)\n if self.dston <= dt.replace(tzinfo=None) < self.dstoff:\n return timedelta(hours=1)\n else:\n return timedelta(0)\n def tzname(self,dt):\n return \"GMT +2\"\n \ngmt1 = GMT1()\n# Daylight Saving Time\ndt1 = datetime(2006, 11, 21, 16, 30, tzinfo=gmt1)\nprint(dt1.dst())\nprint(dt1.utcoffset())\ndt2 = datetime(2006, 6, 14, 13, 0, tzinfo=gmt1)\nprint(dt2.dst())\nprint(dt2.utcoffset())\n# Convert datetime to another time zone\ndt3 = dt2.astimezone(GMT2())\nprint(dt3)\nprint(dt2)\nprint(dt2.utctimetuple() == dt3.utctimetuple())\n\n","sub_path":"04/01/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"637064966","text":"# 2015-01-26 149 tests, 48 ms\nclass Solution(object):\n def minPatches(self, nums, n):\n \"\"\"\n :type nums: List[int]\n :type n: int\n :rtype: int\n \"\"\"\n i, patchCount, maxReach, numLength = 0, 0, 1, len(nums)\n while maxReach <= n:\n if i < numLength and nums[i] <= maxReach:\n maxReach += nums[i]\n i += 1\n else:\n maxReach <<= 1\n patchCount += 1 # add maxReach into nums\n return patchCount\n","sub_path":"LeetCode_Python_Accepted-chaoren/330_Patching_Array.py","file_name":"330_Patching_Array.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"320949576","text":"import mysklearn.myutils as myutils\nimport random\nimport numpy as np\nimport math\n\ndef train_test_split(X, y, test_size=0.33, random_state=None, shuffle=True):\n \"\"\"Split dataset into train and test sets (sublists) based on a test set size.\n\n Args:\n X(list of list of obj): The list of samples\n The shape of X is (n_samples, n_features)\n y(list of obj): The target y values (parallel to X)\n The shape of y is n_samples\n test_size(float or int): float for proportion of dataset to be in test set (e.g. 0.33 for a 2:1 split)\n or int for absolute number of instances to be in test set (e.g. 5 for 5 instances in test set)\n random_state(int): integer used for seeding a random number generator for reproducible results\n shuffle(bool): whether or not to randomize the order of the instances before splitting\n\n Returns:\n X_train(list of list of obj): The list of training samples\n X_test(list of list of obj): The list of testing samples\n y_train(list of obj): The list of target y values for training (parallel to X_train)\n y_test(list of obj): The list of target y values for testing (parallel to X_test)\n \n Note:\n Loosely based on sklearn's train_test_split(): https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\n \"\"\"\n if random_state is not None:\n random.seed(random_state)\n if shuffle: \n newX = X\n newY = y\n for i in range(len(X)):\n index = random.randrange(0,len(X))\n newX[i], newX[index] = newX[index], newX[i]\n newY[i], newY[index] = newY[index], newY[i]\n X = newX\n y = newY\n randX = X\n randY = y\n if isinstance(test_size, float):\n split_index = int(len(X)*(1-test_size))\n else:\n split_index = len(X)-test_size\n return randX[:split_index], randX[split_index:], randY[:split_index], randY[split_index:]\n\ndef kfold_cross_validation(X, n_splits=5):\n \"\"\"Split dataset into cross validation folds.\n\n Args:\n X(list of list of obj): The list of samples\n The shape of X is (n_samples, n_features)\n n_splits(int): Number of folds.\n\n Returns:\n X_train_folds(list of list of int): The list of training set indices for each fold\n X_test_folds(list of list of int): The list of testing set indices for each fold\n\n Notes: \n The first n_samples % n_splits folds have size n_samples // n_splits + 1, \n other folds have size n_samples // n_splits, where n_samples is the number of samples\n (e.g. 11 samples and 4 splits, the sizes of the 4 folds are 3, 3, 3, 2 samples)\n Loosely based on sklearn's KFold split(): https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html\n \"\"\"\n split_range = math.ceil(len(X)/n_splits)\n test_data = []\n train_data = []\n start_test = 0\n for index in range(n_splits):\n test = []\n train = []\n for index in range(len(X)):\n if index >= start_test and index < start_test + split_range:\n test.append(index)\n else:\n train.append(index)\n test_data.append(test)\n train_data.append(train)\n start_test = start_test + split_range\n return train_data, test_data\n\ndef stratified_kfold_cross_validation(X, y, n_splits=5):\n \"\"\"Split dataset into stratified cross validation folds.\n\n Args:\n X(list of list of obj): The list of instances (samples). \n The shape of X is (n_samples, n_features)\n y(list of obj): The target y values (parallel to X). \n The shape of y is n_samples\n n_splits(int): Number of folds.\n \n Returns:\n X_train_folds(list of list of int): The list of training set indices for each fold.\n X_test_folds(list of list of int): The list of testing set indices for each fold.\n\n Notes: \n Loosely based on sklearn's StratifiedKFold split(): https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold\n \"\"\"\n classification = []\n indexes = []\n for x in range(len(X)):\n if y[x] in classification:\n index = classification.index(y[x])\n indexes[index].append(x)\n else:\n classification.append(y[x])\n indexes += [[x]]\n partioned = []\n for _ in range(n_splits):\n partioned.append([])\n\n count = 0\n for classes in range(len(classification)):\n for index in indexes[classes]: \n partioned[count].append(index)\n count+=1\n count = count%len(partioned)\n X_train = []\n X_test = []\n \n for x in range(n_splits):\n test = []\n train = []\n for index in range(len(partioned)):\n if index == x:\n test+=partioned[index]\n else:\n train += partioned[index]\n X_train.append(train)\n X_test.append(test)\n return X_train, X_test\n \n\ndef confusion_matrix(y_true, y_pred, labels):\n \"\"\"Compute confusion matrix to evaluate the accuracy of a classification.\n\n Args:\n y_true(list of obj): The ground_truth target y values\n The shape of y is n_samples\n y_pred(list of obj): The predicted target y values (parallel to y_true)\n The shape of y is n_samples\n labels(list of str): The list of all possible target y labels used to index the matrix\n\n Returns:\n matrix(list of list of int): Confusion matrix whose i-th row and j-th column entry \n indicates the number of samples with true label being i-th class \n and predicted label being j-th class\n\n Notes:\n Loosely based on sklearn's confusion_matrix(): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html\n \"\"\"\n matrix = []\n row = []\n for x in range(len(labels)):\n row = []\n for x in range(len(labels)):\n row.append(0)\n matrix.append(row)\n \n for x in range(len(y_true)):\n i1 = labels.index(y_true[x])\n i2 = labels.index(y_pred[x])\n matrix[i1][i2] +=1\n return matrix \n","sub_path":"mysklearn/myevaluation.py","file_name":"myevaluation.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"458101042","text":"import collections\nimport csv\nimport itertools\nimport json\nimport logging\nimport os\nimport time\nfrom typing import Iterator\n\nimport fastavro\nimport numpy as np\nimport tensorflow as tf\n\nfrom gdmix.models.schemas import BAYESIAN_LINEAR_MODEL_SCHEMA\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nINTERCEPT = \"(INTERCEPT)\"\n\n\ndef try_write_avro_blocks(f, schema, records, suc_msg=None, err_msg=None):\n \"\"\"\n write a block into avro file. This is used continuously when the whole file does not fit in memory.\n\n :param f: file handle.\n :param schema: avro schema used by the writer.\n :param records: a set of records to be written to the avro file.\n :param suc_msg: message to print when write succeeds.\n :param err_msg: message to print when write fails.\n :return: none\n \"\"\"\n try:\n fastavro.writer(f, schema, records)\n if suc_msg:\n logger.info(suc_msg)\n except Exception as exp:\n if err_msg:\n logger.error(exp)\n logger.error(err_msg)\n raise\n\n\ndef load_linear_models_from_avro(model_file, feature_file):\n \"\"\"\n Load linear models from avro files.\n The models are in photon-ml format.\n Intercept is moved to the end of the coefficient array.\n :param model_file: Model avro file, photon-ml format\n :param feature_file: A file containing all features of the model (intercept excluded)\n :return:\n \"\"\"\n\n def get_one_model_weights(model_record, feature_map):\n \"\"\"\n Load a single model from avro record\n :param model_record: photon-ml LR model in avro record format\n :param feature_map: feature name to index map\n :return: a numpy array of the model coefficients, intercept is at the end. Elements are in np.float64.\n \"\"\"\n num_features = 0 if feature_map is None else len(feature_map)\n model_coefficients = np.zeros(num_features+1, dtype=np.float64)\n for ntv in model_record[\"means\"]:\n name, term, value = ntv['name'], ntv['term'], np.float64(ntv['value'])\n if name == INTERCEPT and term == '':\n model_coefficients[num_features] = value # Intercept at the end.\n elif feature_map is not None:\n feature_index = feature_map.get((name, term), None)\n if feature_index is not None: # Take only the features that in the current training dataset.\n model_coefficients[feature_index] = value\n return model_coefficients\n\n if feature_file is None:\n feature_map = None\n else:\n feature_map = get_feature_map(feature_file)\n with tf.io.gfile.GFile(model_file, 'rb') as fo:\n avro_reader = fastavro.reader(fo)\n return tuple(get_one_model_weights(record, feature_map) for record in avro_reader)\n\n\ndef add_dummy_weight(models):\n \"\"\"\n This function adds a dummy weight 0.0 to the first element of the weight vector.\n It should be only used for the intercept-only model where no feature is present.\n :param models: the models with intercept only.\n :return: models with zero prepend to the intercept.\n \"\"\"\n def process_one_model(model):\n model_coefficients = np.zeros(2, dtype=np.float64)\n model_coefficients[1] = model[0]\n return model_coefficients\n return tuple(process_one_model(m) for m in models)\n\n\ndef gen_one_avro_model(model_id, model_class, weight_indices, weight_values, bias, feature_list):\n \"\"\"\n generate the record for one LR model in photon-ml avro format\n :param model_id: model id\n :param model_class: model class\n :param weight_indices: LR weight vector indices\n :param weight_values: LR weight vector values\n :param bias: the bias/offset/intercept\n :param feature_list: corresponding feature names\n :return: a model in avro format\n \"\"\"\n record = {u'name': INTERCEPT, u'term': '', u'value': bias}\n records = {u'modelId': model_id, u'modelClass': model_class, u'means': [record], u'lossFunction': \"\"}\n if weight_indices is not None and weight_values is not None:\n for w_i, w_v in zip(weight_indices.flatten(), weight_values.flatten()):\n feat = feature_list[w_i]\n name, term = feat[0], feat[1]\n record = {u'name': name, u'term': term, u'value': w_v}\n records[u'means'].append(record)\n return records\n\n\ndef export_linear_model_to_avro(model_ids,\n list_of_weight_indices,\n list_of_weight_values,\n biases,\n feature_file,\n output_file,\n model_log_interval=1000,\n model_class=\"com.linkedin.photon.ml.supervised.classification.LogisticRegressionModel\"):\n \"\"\"\n Export random effect logistic regression model in avro format for photon-ml to consume\n :param model_ids: a list of model ids used in generated avro file\n :param list_of_weight_indices: list of indices for entity-specific model weights\n :param list_of_weight_values: list of values for entity-specific model weights\n :param biases: list of entity bias terms\n :param feature_file: a file containing all the features, typically generated by avro2tf.\n :param output_file: full file path for the generated avro file.\n :param model_log_interval: write model every model_log_interval models.\n :param model_class: the model class defined by photon-ml.\n :return: None\n \"\"\"\n # STEP [1] - Read feature list\n feature_list = read_feature_list(feature_file) if feature_file else None\n\n # STEP [2] - Read number of features and models\n num_models = len(biases)\n logger.info(f\"To save {num_models} models.\")\n if feature_file:\n logger.info(f\"Found {len(feature_list)} features in {feature_file}\")\n\n # STEP [3]\n schema = fastavro.parse_schema(json.loads(BAYESIAN_LINEAR_MODEL_SCHEMA))\n\n def gen_records():\n if list_of_weight_indices is None or list_of_weight_values is None or feature_list is None:\n for i in range(num_models):\n yield gen_one_avro_model(str(model_ids[i]), model_class, None, None, biases[i], feature_list)\n else:\n for i in range(num_models):\n yield gen_one_avro_model(str(model_ids[i]), model_class, list_of_weight_indices[i],\n list_of_weight_values[i], biases[i], feature_list)\n batched_write_avro(gen_records(), output_file, schema, model_log_interval)\n logger.info(f\"dumped {num_models} models to avro file at {output_file}.\")\n\n\ndef read_feature_list(feature_file):\n \"\"\"\n Get feature names from the feature file.\n Note: intercept is not included here since it is not part of the raw data.\n :param feature_file: user provided feature file, each row is a \"name,term\" feature name\n :return: list of feature (name, term) tuple\n \"\"\"\n result = []\n with tf.io.gfile.GFile(feature_file) as f:\n f.seekable = lambda: False\n for row in csv.reader(f):\n assert len(row) == 2, f\"Each feature name should have exactly name and term only, but I got {row}.\"\n result.append(tuple(row))\n return result\n\n\ndef get_feature_map(feature_file):\n \"\"\"\n Get feature (name, term) -> index map.\n The index of a feature is the position of the feature in the file.\n The index starts from zero.\n :param feature_file: The file containing a list of features.\n :return: a dict of feature (name, term) and its index.\n \"\"\"\n return {feature: index for index, feature in enumerate(read_feature_list(feature_file))}\n\n\ndef read_json_file(file_path: str):\n \"\"\" Load a json file from a path.\n\n :param file_path: Path string to json file.\n :return: dict. The decoded json object.\n\n Raises IOError if path does not exist.\n Raises ValueError if load fails.\n \"\"\"\n\n if not tf.io.gfile.exists(file_path):\n raise IOError(f\"Path {file_path!r} does not exist.\")\n try:\n with tf.io.gfile.GFile(file_path) as json_file:\n return json.load(json_file)\n except Exception as e:\n raise ValueError(f\"Failed loading file {file_path!r}.\") from e\n\n\ndef copy_files(input_files, output_dir):\n \"\"\"\n Copy a list of files to the output directory.\n The destination files will be overwritten.\n :param input_files: a list of files\n :param output_dir: output directory\n :return: the list of copied files\n \"\"\"\n\n logger.info(\"Copy files to local\")\n if not tf.io.gfile.exists(output_dir):\n tf.io.gfile.mkdir(output_dir)\n start_time = time.time()\n copied_files = []\n for f in input_files:\n fname = os.path.join(output_dir, os.path.basename(f))\n tf.io.gfile.copy(f, fname, overwrite=True)\n copied_files.append(fname)\n logger.info(f\"Files copied to Local: {copied_files}\")\n logger.info(f\"--- {time.time() - start_time} seconds ---\")\n return copied_files\n\n\ndef namedtuple_with_defaults(typename, field_names, defaults=()):\n \"\"\"\n Namedtuple with default values is supported since 3.7, wrap it to be compatible with version <= 3.6\n :param typename: the type name of the namedtuple\n :param field_names: the field names of the namedtuple\n :param defaults: the default values of the namedtuple\n :return: namedtuple with defaults\n \"\"\"\n T = collections.namedtuple(typename, field_names)\n T.__new__.__defaults__ = (None,) * len(T._fields)\n prototype = T(**defaults) if isinstance(defaults, collections.Mapping) else T(*defaults)\n T.__new__.__defaults__ = tuple(prototype)\n return T\n\n\ndef batched_write_avro(records: Iterator, output_file, schema, write_frequency=1000, batch_size=1024):\n \"\"\" For the first block, the file needs to be open in ‘w’ mode, while the\n rest of the blocks needs the ‘a’ mode. This restriction makes it\n necessary to open the files at least twice, one for the first block,\n one for the remaining. So it’s not possible to put them into the\n while loop within a file context. \"\"\"\n f = None\n t0 = time.time()\n n_batch = 0\n logger.info(f\"Writing to {output_file} with batch size of {batch_size}.\")\n try:\n for batch in _chunked_iterator(records, batch_size):\n if n_batch == 0:\n with tf.io.gfile.GFile(output_file, 'wb') as f0: # Create the file in 'w' mode\n f0.seekable = lambda: False\n try_write_avro_blocks(f0, schema, batch, None, create_error_message(n_batch, output_file))\n f = tf.io.gfile.GFile(output_file, 'ab+') # reopen the file in 'a' mode for later writes\n f.seekable = f.readable = lambda: True\n f.seek(0, 2) # seek to the end of the file, 0 is offset, 2 means the end of file\n else:\n try_write_avro_blocks(f, schema, batch, None, create_error_message(n_batch, output_file))\n n_batch += 1\n if n_batch % write_frequency == 0:\n delta_time = time.time() - t0\n logger.info(f\"nbatch = {n_batch}, deltaT = {delta_time:0.2f} seconds, speed = {n_batch / delta_time :0.2f} batches/sec\")\n logger.info(f\"Finished writing to {output_file}.\")\n finally:\n f and f.close()\n\n\ndef _chunked_iterator(iterator: Iterator, chuck_size):\n while True:\n chunk_it = itertools.islice(iterator, chuck_size)\n try:\n first_el = next(chunk_it)\n yield itertools.chain((first_el,), chunk_it)\n except StopIteration:\n return\n\n\ndef create_error_message(n_batch, output_file) -> str:\n return f'An error occurred while writing batch #{n_batch} to path {output_file}'\n\n\ndef dataset_reader(iterator):\n # Iterate through TF dataset in a throttled manner\n # (Forking after the TensorFlow runtime creates internal threads is unsafe, use config provided in this\n # link -\n # https://github.com/tensorflow/tensorflow/issues/14442)\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(use_per_session_threads=True)) as sess:\n sess.run(iterator.initializer)\n while True:\n try:\n # Extract and process raw entity data\n yield sess.run(iterator.get_next())\n except tf.errors.OutOfRangeError:\n break\n\n\ndef get_inference_output_avro_schema(metadata, has_logits_per_coordinate, schema_params, has_weight=False):\n fields = [{'name': schema_params.uid_column_name, 'type': 'long'}, {'name': schema_params.prediction_score_column_name, 'type': 'float'},\n {'name': schema_params.label_column_name, 'type': ['null', 'int'], \"default\": None}]\n if has_weight or metadata.get(schema_params.weight_column_name) is not None:\n fields.append({'name': schema_params.weight_column_name, 'type': 'float'})\n if has_logits_per_coordinate:\n fields.append({'name': schema_params.prediction_score_per_coordinate_column_name, 'type': 'float'})\n return {'name': 'validation_result', 'type': 'record', 'fields': fields}\n","sub_path":"gdmix-trainer/src/gdmix/util/io_utils.py","file_name":"io_utils.py","file_ext":"py","file_size_in_byte":13220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"149814662","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom util import full_stack\nfrom util import exec_remote_command\nfrom util import build_context_script\nfrom dbaas_cloudstack.models import HostAttr as CS_HostAttr\nfrom workflow.steps.util.base import BaseStep\nfrom workflow.exceptions.error_codes import DBAAS_0020\nfrom workflow.steps.util import test_bash_script_error\nfrom workflow.steps.mongodb.util import build_mongodb_connect_string\nfrom workflow.steps.mongodb.util import build_remove_replica_set_members_script\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass RemoveInstancesReplicaSet(BaseStep):\n\n def __unicode__(self):\n return \"Removing instances from Replica Set...\"\n\n def do(self, workflow_dict):\n try:\n\n databaseinfra = workflow_dict['databaseinfra']\n target_instances = []\n\n for source_instance in workflow_dict['source_instances']:\n target_instances.append(source_instance.future_instance)\n\n connect_string = build_mongodb_connect_string(instances=target_instances,\n databaseinfra=databaseinfra)\n\n context_dict = {\n 'CONNECT_STRING': connect_string,\n 'SECUNDARY_ONE': \"{}:{}\".format(workflow_dict['source_instances'][0].address, workflow_dict['source_instances'][0].port),\n 'SECUNDARY_TWO': \"{}:{}\".format(workflow_dict['source_instances'][1].address, workflow_dict['source_instances'][1].port),\n 'ARBITER': \"{}:{}\".format(workflow_dict['source_instances'][2].address, workflow_dict['source_instances'][2].port),\n }\n\n script = test_bash_script_error()\n script += build_remove_replica_set_members_script()\n\n script = build_context_script(context_dict, script)\n output = {}\n\n host = workflow_dict['source_instances'][0].hostname\n cs_host_attr = CS_HostAttr.objects.get(host=host)\n return_code = exec_remote_command(server=host.address,\n username=cs_host_attr.vm_user,\n password=cs_host_attr.vm_password,\n command=script,\n output=output)\n LOG.info(output)\n if return_code != 0:\n raise Exception(str(output))\n\n return True\n except Exception:\n traceback = full_stack()\n\n workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)\n workflow_dict['exceptions']['traceback'].append(traceback)\n\n return False\n\n def undo(self, workflow_dict):\n LOG.info(\"Running undo...\")\n try:\n\n return True\n except Exception:\n traceback = full_stack()\n\n workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)\n workflow_dict['exceptions']['traceback'].append(traceback)\n\n return False\n","sub_path":"dbaas/workflow/steps/mongodb/region_migration/remove_old_instances_replica_set.py","file_name":"remove_old_instances_replica_set.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"96131784","text":"from utils import open_page, select_crossfit_workout, parse_wod, zenplanner_login,navigate_to_workout_page\nfrom selenium.webdriver.support.ui import Select\nimport json\nfrom datetime import date\n\n\nif __name__ == '__main__':\n login_url = 'https://crossfitjohnscreek.sites.zenplanner.com/login.cfm'\n\n # Login\n driver = open_page(login_url)\n zenplanner_login(driver)\n # Find the workout\n now_date = date.today().strftime('%Y-%m-%d')\n workout_link = driver.find_element_by_partial_link_text('Workouts')\n workout_link.click()\n day = driver.find_element_by_id('block_{}'.format(now_date))\n day.click()\n view = driver.find_element_by_link_text('View')\n view.click()\n # Choose the right session\n select = Select(driver.find_element_by_name('objectid'))\n select.select_by_visible_text('CrossFit - All levels')\n\n workout = driver.find_element_by_class_name('workout')\n wod_breakdown = parse_wod(workout)\n driver.close()\n\n print(json.dumps(wod_breakdown, indent=2)) ","sub_path":"daily_leaderboard_scraper.py","file_name":"daily_leaderboard_scraper.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"339503537","text":"# coding : utf-8\n# author : ilkyu lee\n# mail : xxjjvxb@gmail.com\n\nimport numpy as np\nimport pandas as pd\nimport glob\n\nclass data_manager(object):\n\n def __init__(self, batchsize=1):\n if batchsize == 1:\n print('batchsize is 1')\n\n def cifar10(self):\n train = \"../../../bigdata/cifar-10-batches-py/data_batch*\"\n test = \"../../../bigdata/cifar-10-batches-py/test_batch*\"\n\n self.trainList = glob.glob(train)\n self.testList = glob.glob(testi)\n\n def getTrain(self):\n\n for each in self.trainList:\n print(each)\n\n yield X, Y\n\n def getTest(self):\n\n return X, Y\n\n\n\nif __name__ == \"__main__\":\n\n dm = data_manager()\n dm.cifar10()\n","sub_path":"data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"560754217","text":"\nimport acm\nimport RiskFactorCalculationTools\n\ndef configurationForSBAMeasure( path ):\n params = {}\n params[ acm.FSymbol(\"RiskFactorPathValues\") ] = StaticArray( list(path)[::-1] )\n config = acm.Sheet().Column().ConfigurationFromColumnParameterDefinitionNamesAndValues(params, None )\n return config\n \ndef BaseParametersForVectorItemDimension( populator ):\n extParams = {}\n if populator.ShiftType() == \"Absolute\":\n extParams[ acm.FSymbol(\"RiskFactorTopOnlyOverride\") ] = False\n return extParams\n \ndef StaticDictionary( dictionary ):\n return acm.FStaticDictionary( dictionary )\n\ndef StaticArray( arr ):\n return acm.FStaticArray( arr )\n \n\nclass RiskFactorDimensionPopulator(object):\n\n def __init__( self, rfColls, dimensionInformation, isVectorItem ):\n \n self.m_isVectorItem = isVectorItem\n self.m_dimensionInformation = dimensionInformation\n self.m_commonParams = { RiskFactorCalculationTools.s_riskFactorCollectionSym : rfColls,\n RiskFactorCalculationTools.s_shiftShapeSym : acm.FSymbol( self.ShiftShape() )\n }\n \n pe, pc = self.PerimeterInformation()\n \n if pe:\n self.m_commonParams[RiskFactorCalculationTools.s_perimeter] = pe\n self.m_commonParams[RiskFactorCalculationTools.s_perimeterCriteria] = pc\n\n def ShiftShape( self ):\n assert( 0 )\n\n def ShiftType( self ):\n assert( 0 )\n\n def ShiftDivisor( self, shiftSize ):\n assert( 0 )\n\n def PerimeterInformation( self ):\n assert( 0 )\n\n def ShiftSize( self ):\n assert( 0 )\n\n def ShiftSizeAndDivisor( self ):\n shiftSize = self.ShiftSize()\n return ( shiftSize, self.ShiftDivisor( shiftSize ) )\n\n def PopulateCommon( self ):\n shiftDict = dict( self.m_commonParams )\n shiftSize, shiftDivisor = self.ShiftSizeAndDivisor( )\n if shiftSize:\n shiftDict[RiskFactorCalculationTools.s_shiftSizeSym] = shiftSize\n if shiftDivisor:\n shiftDict[RiskFactorCalculationTools.s_shiftDivisorSym] = shiftDivisor\n shiftDict[ RiskFactorCalculationTools.s_shiftTypeSym] = acm.FSymbol( self.ShiftType() )\n return shiftDict\n \n def Populate(self, path, dimensions, instances, shiftDataTarget):\n points = [self.m_dimensionInformation.InstanceValue( instance ) for instance in instances]\n shifts = []\n labels = []\n pSet = set()\n for point in points:\n if not point in pSet:\n pSet.add( point )\n labels.append(point)\n self.SetPath( self.m_dimensionInformation, point )\n shiftDict = self.PopulateCommon()\n shiftDict[ RiskFactorCalculationTools.s_riskFactorPathValues ] = StaticArray( path[::-1] )\n shiftDict[ RiskFactorCalculationTools.s_riskFactorPathKeys ] = StaticArray( dimensions[::-1] )\n \n if str(self.ShiftType()) == \"Absolute\":\n shiftDict[ RiskFactorCalculationTools.s_topOnlyExtensionGroup ] = False\n \n if not self.IsVectorItemPopulator():\n shiftDict[self.m_dimensionInformation.m_id] = acm.FSymbol( point )\n shifts.append( [StaticDictionary( shiftDict )] )\n else:\n shifts.append( StaticDictionary( shiftDict ) )\n \n shiftDataTarget.AddShiftVector( [acm.FSymbol(i) for i in path], shifts, labels )\n\n def IsVectorItemPopulator( self ):\n return self.m_isVectorItem\n\n def SetPath( self, dimensionInformation, item ):\n assert( 0 )\n\n\ndef PopulateShiftDataRecursive( populator, instances, dependentsOn, dimensions, path, shiftData ):\n if len(dependentsOn) == 0:\n populator.Populate( path, dimensions, instances, shiftData )\n else:\n depDim = dependentsOn[0]\n nextDimensions = list(dimensions)\n nextDimensions.append( depDim.AsDictionary() )\n depDimItems = RiskFactorCalculationTools.FilteredInstancesByItemId( depDim, instances )\n for item in depDimItems.keys():\n populator.SetPath( depDim, item )\n newPath = list(path)\n newPath.append( item )\n PopulateShiftDataRecursive( populator, depDimItems[item], dependentsOn[1:], nextDimensions, newPath, shiftData )\n\n","sub_path":"Extensions/Default/FPythonCode/RiskFactorDimensionPopulator.py","file_name":"RiskFactorDimensionPopulator.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"253930574","text":"###############################################################################################33\n# Google calendar event sheduler\n# v0.2\n# + variable names refactored\n# + added github repo: \n# v0.1\n#\n\n\n#from __future__ import print_function\n#import apiclient.discovery\n#import apiclient.service\n#from apiclient.service import service\n\nfrom apiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nimport datetime, json, csv\nimport rfc3339\nimport iso8601\nfrom pytz import timezone\n\n###############################################################################################33\n# Reading data from CSV file\n#\nsettings = json.load(open('settings.json'))\nevents = []\n\nwith open(settings['csv'], newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n events.append(row)\n\ndef findNextEvent(event):\n ret = None\n for e in events:\n if (e[settings['PROJECT_NAME']] == event[settings['PROJECT_NAME']]) and (e[settings['PREV_EVENT']] == event[settings['EVENT_NAME']]):\n ret = e\n return ret\n\n\n###############################################################################################33\n# Build ordered event list\n#\nscheduled_events = {}\ntime_max = \"00000000\"\ntime_min = \"99999999\"\nfor event in events:\n current_project = event[settings['PROJECT_NAME']]\n if current_project in scheduled_events:\n continue\n else:\n first_event = None\n for event_tmp in events:\n if (event_tmp[settings['START_DATE']] < time_min):\n time_min = event_tmp[settings['START_DATE']]\n if (event_tmp[settings['END_DATE']] > time_min):\n time_max = event_tmp[settings['END_DATE']]\n if (event_tmp[settings['PROJECT_NAME']] == current_project) and (event_tmp[settings['PREV_EVENT']] == \"\"):\n first_event = event_tmp\n if first_event == None:\n print(\"Error! Initial event not found!!!\")\n else:\n scheduled_events[current_project] = [first_event]\n next_event = findNextEvent(first_event)\n while next_event != None:\n scheduled_events[current_project].append(next_event)\n next_event = findNextEvent(next_event)\n\n###############################################################################################33\n# Setup the Calendar API\n#\n\nSCOPES = 'https://www.googleapis.com/auth/calendar'\nstore = file.Storage('credentials.json')\ncreds = store.get()\nif not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n creds = tools.run_flow(flow, store)\nservice = build('calendar', 'v3', http=creds.authorize(Http()))\n\nglobal_min_date = datetime.datetime(int(time_min[0:4]), int(time_min[4:6]), int(time_min[6:8]))\nglobal_max_date = datetime.datetime(int(time_max[0:4]), int(time_max[4:6]), int(time_max[6:8]))\n\nprint(\"Get calendars:\")\npage_token = None\nwhile True:\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\n for calendar in calendar_list['items']:\n print(\"id: {} summary: {} kind: {} etag: {}\".format(calendar['id'],calendar['summary'],calendar['kind'],calendar['etag']))\n page_token = calendar_list.get('nextPageToken')\n if not page_token:\n break\n\n\nevents_result = service.events().list(calendarId=settings[\"calendar_id\"],\n timeMin=global_min_date.isoformat() + 'Z', timeMax=global_max_date.isoformat() + 'Z',\n maxResults=100, singleEvents=True,\n orderBy='startTime').execute()\ncalendar_events = events_result.get('items', [])\n\ntime_zone = events_result['timeZone']\nprint(\"TIMEZONE: {}\".format(time_zone))\n\n###############################################################################################33\n# Main loop\n#\nlast_event_date = None\n\nfor project in scheduled_events:\n print(\"Start handling project:{}\".format(project))\n for event in scheduled_events[project]:\n print(\"===\\nevent:{}\\nglobal min: {} max: {}===\\n\".format(event, global_min_date, global_max_date))\n\n# update event list to include newly added events\n events_result = service.events().list(calendarId=settings[\"calendar_id\"],\n timeMin=global_min_date.isoformat() + 'Z', timeMax=global_max_date.isoformat() + 'Z',\n maxResults=100, singleEvents=True,\n orderBy='startTime').execute()\n calendar_events = events_result.get('items', [])\n\n event_earliest_day = event[settings['START_DATE']]\n event_latest_day = event[settings['END_DATE']]\n event_gap = int(event[settings['GAP']]) if event[settings['GAP']] != \"\" else 0\n print(\"event gap: {}\".format(event[settings['GAP']]))\n\n event_earliest_date = datetime.datetime(int(event_earliest_day[0:4]), int(event_earliest_day[4:6]), int(event_earliest_day[6:8]))\n event_latest_date = datetime.datetime(int(event_latest_day[0:4]), int(event_latest_day[4:6]), int(event_latest_day[6:8]))\n\n###############################################################################################33\n# Loop inside event Start/End dates\n#\n new_event_reached = False\n while event_earliest_date < event_latest_date and not new_event_reached:\n if event_earliest_date < global_min_date:\n event_earliest_date = global_min_date\n if last_event_date != None and event_gap > 0:\n print(\"Gap detected! Time shifted from: {}\".format(event_earliest_date))\n event_earliest_date = last_event_date + datetime.timedelta(days=event_gap)\n print(\"To: {}\".format(event_earliest_date))\n week_day = str(event_earliest_date.weekday())\n print(\"event earliest date: {} latest date: {}\".format(event_earliest_date, event_latest_date))\n print(\"week day:{}\\nsettings:{}\".format(week_day, settings['weekly_schedule']))\n if week_day in settings['weekly_schedule'] and len(settings['weekly_schedule'][week_day]) > 0:\n m = settings['weekly_schedule'][week_day]['start'].split(\":\")\n m = int(m[0]) * 60 + int(m[1])\n print(\"Minutes start: {}\".format(m))\n event_min_date = event_earliest_date + datetime.timedelta(minutes=m)\n event_start_date = event_min_date\n event_end_date = event_start_date + datetime.timedelta(hours=int(event[settings['HOURS']]))\n m = settings['weekly_schedule'][week_day]['end'].split(\":\")\n m = int(m[0]) * 60 + int(m[1])\n print(\"Minutes end: {}\".format(m))\n event_max_date = event_earliest_date + datetime.timedelta(minutes=m)\n\n print(\"event dates => min: {} start: {} end: {} max: {}\".format(event_min_date, event_start_date, event_end_date, event_max_date))\n print(\"start checking calendar events...\\n\")\n\n while event_end_date < event_max_date:\n all_events_old = True\n for calendar_event in calendar_events:\n# print(\"EVENT: {}\".format(calendar_event))\n if ('dateTime' not in calendar_event['start']):\n continue\n calendar_event_start_date = iso8601.parse_date(calendar_event['start']['dateTime']).replace(tzinfo=None)\n calendar_event_end_date = iso8601.parse_date(calendar_event['end']['dateTime']).replace(tzinfo=None) \n print(\"{} => start: {} end: {}\\n---\".format(calendar_event['summary'], calendar_event_start_date, calendar_event_end_date))\n# if calendar_event_end_date < event_earliest_date:\n if calendar_event_end_date < event_min_date:\n print(\"old event {} < {}\".format(calendar_event_end_date, event_earliest_date))\n if all_events_old and calendar_event != calendar_events[-1]:\n continue\n else:\n print(\"all events was OLD!!!\")\n else:\n all_events_old = False\n if (event_min_date <= event_start_date < event_max_date) and (event_min_date < event_end_date <= event_max_date):\n# if (event_start_date < calendar_event_start_date and event_end_date <= calendar_event_start_date) or (event_start_date >= calendar_event_end_date and event_end_date >= calendar_event_end_date):\n if (event_end_date <= calendar_event_start_date) or (event_start_date >= calendar_event_end_date):\n new_event = {\n 'summary': \"{}-{}\".format(event[settings['PROJECT_NAME']], event[settings['EVENT_NAME']]),\n 'start': {\n 'dateTime': rfc3339.rfc3339(timezone(time_zone).localize(event_start_date)),\n },\n 'end': {\n 'dateTime': rfc3339.rfc3339(timezone(time_zone).localize(event_end_date)),\n },\n }\n print(\"New Event generation: {} => s: {} e: {}\".format(new_event[\"summary\"], event_start_date, event_end_date))\n new_event = service.events().insert(calendarId=settings[\"calendar_id\"], body=new_event).execute()\n print('Event created: %s' % (new_event.get('htmlLink')))\n# event_start_date = event_end_date\n\n# shift global min date to latest event\n# global_min_date = last_event_date\n# global_min_date = event_end_date\n# exit from event loop\n event_earliest_date = event_end_date\n# exit from day loop\n event_end_date = event_max_date\n new_event_reached = True\n last_event_date = event_start_date.replace(hour=0, minute=0)\n global_min_date = last_event_date\n print(\"event dates => min: {} start: {} end: {} max: {}\".format(event_min_date, event_start_date, event_end_date, event_max_date))\n break\n# continue\n else:\n print(\"scheduled event start time overlaps with existing event\")\n if calendar_event_start_date.day > event_start_date.day:\n event_start_date = event_max_date\n print(\"calendar_event_start_date.day > event_start_date.day: jump to next day\")\n break\n event_start_date = calendar_event_end_date\n event_end_date = event_start_date + datetime.timedelta(hours=int(event[settings['HOURS']]))\n if event_end_date > event_max_date:\n event_start_date = event_max_date\n global_min_date = global_min_date + datetime.timedelta(days=1)\n print(\"no more space to fit scheduled event wihin currend day: jump to next day: {}\".format(global_min_date))\n break\n else:\n print(\"current dates out of day range! {0} <= {2} < {1} || {0} < {3} <= {1}\".format(\n event_min_date,event_max_date,event_start_date,event_end_date\n ))\n else:\n print(\"Non working day!!! Skipped\")\n# global_min_date = global_min_date + datetime.timedelta(days=1)\n event_earliest_date = event_earliest_date + datetime.timedelta(days=1)\n \n if not calendar_events:\n print('No upcoming events found.')\n","sub_path":"eventsheduler.py","file_name":"eventsheduler.py","file_ext":"py","file_size_in_byte":12358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"18782611","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport vplot as vpl\nimport sys\nimport os\nimport subprocess\n\n\n# Check correct number of arguments\nif (len(sys.argv) != 2):\n print('ERROR: Incorrect number of arguments.')\n print('Usage: '+sys.argv[0]+' ')\n exit(1)\nif (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):\n print('ERROR: Unknown file format: '+sys.argv[1])\n print('Options are: pdf, png')\n exit(1)\n\nmpl.rcParams['figure.figsize'] = (6.5,6)\nmpl.rcParams['font.size'] = 10.0\n\n# Run simulations\nos.chdir('Lopez12CPL/auto')\nprint('Running Lopez12CPL-Auto.')\nsubprocess.call(['vplanet', 'vpl.in'])\n\nos.chdir('../Bondi')\nprint('Running Lopez12CPL-Bondi.')\nsubprocess.call(['vplanet', 'vpl.in'])\n\nos.chdir('../ELim')\nprint('Running Lopez12CPL-ELim.')\nsubprocess.call(['vplanet', 'vpl.in'])\n\nos.chdir('../RR')\nprint('Running Lopez12CPL-RR.')\nsubprocess.call(['vplanet', 'vpl.in'])\n\nos.chdir('../..')\n\n# Load data\ncplauto = vpl.GetOutput(\"./Lopez12CPL/Auto\")\ncplbondi = vpl.GetOutput(\"./Lopez12CPL/Bondi\")\ncplelim = vpl.GetOutput(\"./Lopez12CPL/ELim\")\ncplrr = vpl.GetOutput(\"./Lopez12CPL/RR\")\n\n# Plot\nfig, axes = plt.subplots(nrows=3, ncols=2)\n\ntimeauto = cplauto.auto.Time/1e6\ntimebondi = cplbondi.bondi.Time/1e6\ntimeelim = cplelim.el.Time/1e6\ntimerr = cplrr.rr.Time/1e6\n\n## Upper left: Envelope Mass ##\naxes[0,0].plot(timeauto, cplauto.auto.EnvelopeMass,color='k')\naxes[0,0].plot(timebondi, cplbondi.bondi.EnvelopeMass, color=vpl.colors.red)\naxes[0,0].plot(timeelim, cplelim.el.EnvelopeMass,color=vpl.colors.dark_blue)\naxes[0,0].plot(timerr, cplrr.rr.EnvelopeMass, color=vpl.colors.pale_blue)\n# Format\naxes[0,0].set_ylim(-0.02, 1.1)\naxes[0,0].set_ylabel(r\"Envelope Mass (M$_\\oplus$)\")\naxes[0,0].set_xlabel('Time (Myr)')\naxes[0,0].set_xlim(0,1)\n\n## Upper right: Radius ##\naxes[0,1].plot(timeauto, cplauto.auto.PlanetRadius, color='k')\naxes[0,1].plot(timebondi, cplbondi.bondi.PlanetRadius, color=vpl.colors.red)\naxes[0,1].plot(timeelim, cplelim.el.PlanetRadius, color=vpl.colors.dark_blue)\naxes[0,1].plot(timerr, cplrr.rr.PlanetRadius, color=vpl.colors.pale_blue)\n# Format\naxes[0,1].set_ylim(0,35)\naxes[0,1].set_ylabel(r'Radius (R$_\\oplus$)')\naxes[0,1].set_xlabel('Time (Myr)')\naxes[0,1].set_xlim(0,1)\n\n## Middle left: semi-major axis ##\naxes[1,0].plot(timeauto, cplauto.auto.SemiMajorAxis, color='k')\naxes[1,0].plot(timebondi, cplbondi.bondi.SemiMajorAxis, color=vpl.colors.red)\naxes[1,0].plot(timeelim, cplelim.el.SemiMajorAxis, color=vpl.colors.dark_blue)\naxes[1,0].plot(timerr, cplrr.rr.SemiMajorAxis, color=vpl.colors.pale_blue)\n# Format\naxes[1,0].set_ylim(0.0949,0.101)\naxes[1,0].set_ylabel('Semi-Major Axis (AU)')\naxes[1,0].set_xlabel('Time (Myr)')\naxes[1,0].set_xlim(0,1)\n\n## Middle Right: Eccentricity ##\naxes[1,1].plot(timeauto, cplauto.auto.Eccentricity, color='k')\naxes[1,1].plot(timebondi, cplbondi.bondi.Eccentricity, color=vpl.colors.red)\naxes[1,1].plot(timeelim, cplelim.el.Eccentricity, color=vpl.colors.dark_blue)\naxes[1,1].plot(timerr, cplrr.rr.Eccentricity, color=vpl.colors.pale_blue)\n# Format\naxes[1,1].set_ylim(0.05, 0.21)\naxes[1,1].set_ylabel('Eccentricity')\naxes[1,1].set_xlabel('Time (Myr)')\naxes[1,1].set_xlim(0,1)\n\n## Lower left: Rotation Period ##\naxes[2,0].plot(timeauto, cplauto.auto.RotPer, color='k')\naxes[2,0].plot(timebondi, cplbondi.bondi.RotPer,color=vpl.colors.red)\naxes[2,0].plot(timeelim, cplelim.el.RotPer, color=vpl.colors.dark_blue)\naxes[2,0].plot(timerr, cplrr.rr.RotPer,color=vpl.colors.pale_blue)\n# Format\naxes[2,0].set_xlabel(\"Time (yr)\")\naxes[2,0].set_ylim(0,12)\naxes[2,0].set_ylabel('Rotation Period (days)')\naxes[2,0].set_xlabel('Time (Myr)')\naxes[2,0].set_xlim(0,1)\n\n## Lower right: Obliquity ##\naxes[2,1].plot(timeauto, cplauto.auto.Obliquity, color='k',label='Auto')\naxes[2,1].plot(timebondi, cplbondi.bondi.Obliquity,color=vpl.colors.red,label='Bondi')\naxes[2,1].plot(timeelim, cplelim.el.Obliquity, color=vpl.colors.dark_blue,label='E-Lim')\naxes[2,1].plot(timerr, cplrr.rr.Obliquity,color=vpl.colors.pale_blue,label='RR-Lim')\n# Format\naxes[2,1].set_xlabel(\"Time (yr)\")\naxes[2,1].set_ylim(-1,100)\naxes[2,1].set_ylabel('Obliquity (degrees)')\naxes[2,1].set_xlabel('Time (Myr)')\naxes[2,1].set_xlim(0,1)\naxes[2,1].legend(loc=\"upper right\", fontsize=8, ncol=1)\n\nfor ax in axes.flatten():\n # Set rasterization\n ax.set_rasterization_zorder(0)\n\nfig.tight_layout()\nif (sys.argv[1] == 'pdf'):\n plt.savefig('Lopez12CPL.pdf', bbox_inches=\"tight\", dpi=200)\nif (sys.argv[1] == 'png'):\n plt.savefig('Lopez12CPL.png', bbox_inches=\"tight\", dpi=200)\nplt.close()\n","sub_path":"examples/HLossTides/makeplot.py","file_name":"makeplot.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"137686199","text":"#!/bin/python3\n\nimport requests\nimport pandas\nfrom bs4 import BeautifulSoup\n\n\n# Updated URL: r = requests.get(\"http://www.pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/\", headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'})\n\nbase_url = \"https://www.century21.com\"\n\n# Search site above for appropriate URL, the one below will not load correctly.\nr = requests.get(\"https://www.century21.com/real-estate/city-stateabbreviation/\")\ncontent = r.content\n\nsoup = BeautifulSoup(content, \"html.parser\")\n\nresults = soup.find_all('div', {'class':'results-label'})\ntotal_listings = results[0].find('strong').text.replace('(', '').replace(')', '')\n\nprint(\"Total listings: \" + total_listings + \"\\n\")\n\nproperties = soup.find_all('div',{'class':'property-card'}, limit=None)\nproperties_list = []\n\nfor item in properties:\n d = {}\n try:\n d['Price'] = properties[properties.index(item)].find('a',{'class':'listing-price'}).text.strip()\n except:\n d['Price'] = None\n try:\n d['Beds'] = properties[properties.index(item)].find('div',{'class':'property-beds'}).text.strip()\n except:\n d['Beds'] = None\n try:\n d['Baths'] = properties[properties.index(item)].find('div',{'class':'property-baths'}).text.strip()\n except:\n d['Baths'] = None\n try:\n d['SqFeet'] = properties[properties.index(item)].find('div',{'class':'property-sqft'}).text.strip()\n except:\n d['SqFeet'] = None\n try:\n d['Address'] = properties[properties.index(item)].find('div',{'class':'property-address'}).text.strip()\n except:\n d['Address'] = None\n try:\n d['City'] = properties[properties.index(item)].find('div',{'class':'property-city'}).text.strip()\n except:\n d['City'] = None\n try:\n d['URL'] = base_url + properties[properties.index(item)].find('a',{'class':'listing-price'})['href']\n except:\n d['URL'] = None\n \n \n \n \n \n \n\n # print(price)\n # print(beds)\n # print(baths)\n # print(sqfeet)\n # print(address)\n # print(city)\n # print(base_url + link)\n # print()\n properties_list.append(d)\n\ndf = pandas.DataFrame(properties_list)\ndf.to_csv(\"output.csv\")","sub_path":"web_scraping/real_estate_scraper.py","file_name":"real_estate_scraper.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"554388325","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport math\nimport numpy as np\nimport os\nimport scipy.io.wavfile as wav\nfrom matplotlib.pyplot import plot, show, title\nfrom lib import config\n\n\n# 把wav文件导入array\ndef load_wav(filename):\n data = np.array([])\n if os.path.splitext(filename)[1] == '.wav':\n (_, data) = wav.read(filename)\n if len(data.shape) == 2: # 如果是双声道,转换成单声道\n data = data[:, 0]\n return data\n\n\ndef rms(series):\n return math.sqrt(sum(series ** 2) / series.size)\n\n\ndef normalize(series):\n return series / rms(series)\n\n\ndef extract_press(f, config, plot_wav=False):\n keypress_list = []\n\n data = load_wav(f)\n rem = len(data) % 441\n data = np.array(data[:len(data) - rem]) # 最后一段不足10ms的去掉\n\n sample_length = (44100 * config.keypress_win_length) / 1000\n\n energy = []\n for x in range(0, len(data) - 440):\n energy.append(np.sum(np.absolute(np.fft.fft(data[x:x + 440])))) # fft 绝对值的总和 代表能量\n\n energy = np.array(energy)\n energy_threshold = np.percentile(energy, config.keypress_event_threshold)\n # 切割的门限值 config.keypress_event_threshold 表示百分比 超过某个数值百分比代表击键发生\n\n x = 0\n keypress_count = 0\n keypress_start_win_num = []\n\n past_x = - config.keystroke_min_interval - 1\n while x < energy.size:\n if energy[x] >= energy_threshold: # 如果能量超过门限值\n if x - past_x >= config.keystroke_min_interval: # 如果超过最小间隔\n # It is a keypress event (maybe)\n keypress = normalize(data[x:x + sample_length]) # 标准化数据 # sample_length一次切割的长度\n past_x = x\n keypress_list.append(keypress)\n keypress_count += 1\n keypress_start_win_num.append(x)\n\n x = past_x + config.keystroke_min_interval\n else:\n x += 1\n\n # print \"{} keystroke(s) detected.\".format(keypress_count)\n\n if plot_wav:\n energy = np.array(energy)\n if energy.any():\n X = np.array(keypress_start_win_num)\n Y = np.array([energy[x] for x in keypress_start_win_num])\n title('Energy')\n plot(energy)\n plot(X, Y, 'r+')\n show()\n\n return keypress_list\n\n\nif __name__ == '__main__':\n pwd = os.getcwd()\n father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + \".\") # 当前文件的父路径\n\n test_audio_path = father_path + '\\\\data\\\\test\\\\test4.wav'\n extract_press(test_audio_path, config, plot_wav=True)\n","sub_path":"lib/extractKeypresses.py","file_name":"extractKeypresses.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"56641557","text":"import argparse\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport random\nimport pdb\nimport math\nfrom distutils.version import LooseVersion\n\nimport network.network as network\nimport utils.loss as loss\nimport utils.lr_schedule as lr_schedule\nimport dataset.preprocess as prep\nfrom dataset.dataloader import ImageList, ImageSourceList\n\n##test the model\ndef image_classification_test(loader, model, heuristic=False):\n start_test = True\n with torch.no_grad():\n iter_test = iter(loader[\"test\"])\n for i in range(len(loader['test'])):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n labels = labels.cuda()\n _, outputs ,_ = model(inputs,heuristic=heuristic) \n if start_test:\n all_output = outputs.float()\n all_label = labels.float()\n start_test = False\n else:\n all_output = torch.cat((all_output, outputs.float()), 0)\n all_label = torch.cat((all_label, labels.float()), 0)\n _, predict = torch.max(all_output, 1)\n accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n return accuracy\n\n##calculate the gaussianity\ndef nogauss(a):\n num = a.shape[1]\n std = torch.std(a, dim=1, keepdim=True).repeat(1,num)\n mean = torch.mean(a, dim=1, keepdim=True).repeat(1,num)\n cal = (a-mean)/std\n y = torch.mean(torch.pow(cal,4),1)-3*torch.pow(torch.mean(torch.pow(cal,2),1),2)\n return torch.mean(torch.abs(y))\n\ndef train_msda(config):\n ## set pre-process\n prep_dict = {}\n dsets = {}\n dset_loaders = {}\n data_config = config[\"data\"]\n prep_config = config[\"prep\"]\n prep_dict[\"source\"] = prep.image_target(**config[\"prep\"]['params'])\n prep_dict[\"target\"] = prep.image_target(**config[\"prep\"]['params'])\n prep_dict[\"test\"] = prep.image_test(**config[\"prep\"]['params'])\n\n ## prepare data\n train_bs = data_config[\"target\"][\"batch_size\"]\n test_bs = data_config[\"test\"][\"batch_size\"]\n config['data_list'].remove(data_config[\"target\"][\"list_path\"])\n domain_number = len(config['data_list'])\n\n dsets[\"source\"] = ImageSourceList(config['data_list'], \\\n transform=prep_dict[\"source\"], batch_number = train_bs )\n dset_loaders[\"source\"] = DataLoader(dsets[\"source\"], batch_size=train_bs*domain_number, \\\n shuffle=True, num_workers=10, drop_last=True)\n dsets[\"target\"] = ImageList(open(data_config[\"target\"][\"list_path\"]).readlines(), \\\n transform=prep_dict[\"target\"])\n dset_loaders[\"target\"] = DataLoader(dsets[\"target\"], batch_size=train_bs, \\\n shuffle=True, num_workers=2, drop_last=True)\n\n dsets[\"test\"] = ImageList(open(data_config[\"test\"][\"list_path\"]).readlines(), \\\n transform=prep_dict[\"test\"])\n dset_loaders[\"test\"] = DataLoader(dsets[\"test\"], batch_size=test_bs, \\\n shuffle=False, num_workers=4)\n\n ## set base network\n class_num = config[\"network\"][\"params\"][\"class_num\"]\n net_config = config[\"network\"]\n base_network = net_config[\"name\"](**net_config[\"params\"])\n base_network = base_network.cuda()\n\n ## add additional network for some methods\n ad_net = network.AdversarialNetwork( class_num, 1024, multi=domain_number +1)\n ad_net = ad_net.cuda()\n \n ## set optimizer\n parameter_list = base_network.get_parameters() + ad_net.get_parameters()\n optimizer_config = config[\"optimizer\"]\n optimizer = optimizer_config[\"type\"](parameter_list, \\\n **(optimizer_config[\"optim_params\"]))\n param_lr = []\n for param_group in optimizer.param_groups:\n param_lr.append(param_group[\"lr\"])\n schedule_param = optimizer_config[\"lr_param\"]\n lr_scheduler = lr_schedule.schedule_dict[optimizer_config[\"lr_type\"]]\n\n #multi gpu\n gpus = config['gpu'].split(',')\n if len(gpus) > 1:\n ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i,k in enumerate(gpus)])\n base_network = nn.DataParallel(base_network, device_ids=[int(i) for i,k in enumerate(gpus)])\n \n ## train \n len_train_source = len(dset_loaders[\"source\"])\n len_train_target = len(dset_loaders[\"target\"])\n for i in range(config[\"num_iterations\"]):\n #test\n if i % config[\"test_interval\"] == config[\"test_interval\"] - 1:\n base_network.train(False)\n temp_acc = image_classification_test(dset_loaders, base_network, heuristic=config[\"heuristic\"])\n temp_model = nn.Sequential(base_network)\n log_str = \"iter: {:05d}, precision: {:.5f}\".format(i, temp_acc)\n config[\"out_file\"].write(log_str+\"\\n\")\n config[\"out_file\"].flush()\n print(log_str)\n #save model\n if i % config[\"snapshot_interval\"] == 0 and i:\n torch.save(base_network.state_dict(), osp.join(config[\"output_path\"], \\\n \"iter_{:05d}_model.pth.tar\".format(i)))\n\n ## train one iter\n base_network.train(True)\n ad_net.train(True)\n loss_params = config[\"loss\"] \n optimizer = lr_scheduler(optimizer, i, **schedule_param)\n optimizer.zero_grad()\n\n #dataloader\n if i % len_train_source == 0:\n iter_source = iter(dset_loaders[\"source\"])\n if i % len_train_target == 0:\n iter_target = iter(dset_loaders[\"target\"])\n \n #network\n inputs_source, labels_source, label_domain = iter_source.next()\n inputs_target, _ = iter_target.next()\n inputs_source, inputs_target, labels_source, label_domain = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda(), label_domain.cuda()\n\n #network\n features_source, outputs_source, focal_source = base_network(inputs_source,heuristic=config[\"heuristic\"])\n features_target, outputs_target, focal_target = base_network(inputs_target,heuristic=config[\"heuristic\"])\n features = torch.cat((features_source, features_target), dim=0)\n outputs = torch.cat((outputs_source, outputs_target), dim=0)\n focals = torch.cat((focal_source,focal_target),dim=0)\n softmax_out = nn.Softmax(dim=1)(outputs)\n\n #similarity\n sim_source = torch.sum(outputs_source *focal_source,1)/torch.sqrt(torch.sum(torch.pow(outputs_source,2),1))/torch.sqrt(torch.sum(torch.pow(focal_source,2),1))\n sim_target = torch.sum(outputs_target *focal_target,1)/torch.sqrt(torch.sum(torch.pow(outputs_target,2),1))/torch.sqrt(torch.sum(torch.pow(focal_target,2),1))\n relate_source = torch.mean(torch.abs(sim_source))\n relate_target = torch.mean(torch.abs(sim_target))\n relate_all = relate_source + relate_target\n\n # calculate the theta value\n #theta = torch.acos(torch.cat((sim_source,sim_target)))\n #m_theta = torch.mean(theta)\n #s_theta = torch.std(theta)\n\n #calculate the gaussian\n gaussian = torch.abs(nogauss(outputs) - nogauss(outputs+focals))\n\n #loss calculation\n transfer_loss, mean_entropy, heuristic = loss.HDA_MSDA([softmax_out,focals,label_domain], ad_net, network.calc_coeff(i))\n classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)\n total_loss = loss_params[\"trade_off\"] * transfer_loss + classifier_loss + config[\"heuristic\"] * heuristic #+ config['gauss'] * gaussian\n\n if i % (config[\"print_num\"]) == 0:\n log_str = \"iter:{:05d},transfer:{:.5f},classifier:{:.5f},heuristic:{:.5f},relate:{:.5f},gaussian:{:.5f}\".format(i, transfer_loss, classifier_loss, heuristic, relate_all, gaussian)\n config[\"out_file\"].write(log_str+\"\\n\")\n config[\"out_file\"].flush()\n print(log_str)\n\n total_loss.backward()\n optimizer.step()\n","sub_path":"scripts/train_msda.py","file_name":"train_msda.py","file_ext":"py","file_size_in_byte":8025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"291127884","text":"# -*- coding: utf-8 -*-\n\"\"\" This module contains the functions to help in analysis of data\"\"\"\nimport pstats\nimport sys\nimport pandas as pd\nfrom IPython.display import HTML\nfrom pytools.prog.decorators import deprecated\n\nDEFAULT_OUTPUT = sys.stdout\n\n# Constant of result dict\nSHAPE = \"SHAPE\"\nDESCRIPTION = \"DESCRIPTION\"\nINDEX = 'INDEX'\n\nHTML_MODEL = \"\"\"\n
    \n

    Taille des données

    \n
      \n
    • Nombre de lignes : {nb_lig}
    • \n
    • Nombre de colonnes : {nb_col}
    • \n
    \n

    Description

    \n {description}\n
    \n\"\"\"\n\ndef _html_formatter(analysis_dict, html_model=HTML_MODEL):\n \"\"\"\n Formatter function \n :param analysis_dict: Analysis result with \n {\n SHAPE: size of frame, \n DESCRIPTION: data's description,\n } \n :type analysis_dict: dict\n :param html_model: Model for HTML\n :type html_model: str\n :return: HTML format\n :rtype: str\n \"\"\"\n return html_model.format(\n nb_lig = analysis_dict[SHAPE][0],\n nb_col = analysis_dict[SHAPE][1],\n description=analysis_dict[DESCRIPTION].style.hide(axis='index').format({\n 'TYPE': lambda l: ', '.join(l) if isinstance(l, list) else l,\n 'FILLING': \"{:.2%}\"\n }).to_html()\n )\n\ndef _type_list(series):\n \"\"\"\n This shadow function return the unique type not None list.\n \n For identify the no-None objet, it uses the ``pandas.notnull`` function\n :param series: Series objets who contains data to analysis \n :type series: iterative object\n :return: unique no-None type list\n :rtype: list\n \"\"\"\n if not isinstance(series, pd.Series):\n raise TypeError(f\"_type_list : le type attendu est pandas.Series. Le type est {type(series)}\")\n _result = series[series.notnull()].map(lambda x: type(x).__name__).unique()\n if len(_result)==0:\n return ''\n elif len(_result)==1:\n return _result[0]\n else:\n return list(_result)\n\ndef _min_value(series, type_list=None):\n \"\"\"\n Défine the min value of the series :\n - for 'str' type it's the min lenght of the string value\n - for 'int' or 'float' type, it's the min value of the serie\n for other type, this function return 'None'\n :param series: \n :type series: \n :param type_list: \n :type type_list: \n :return: \n :rtype: \n \"\"\"\n if type_list is None:\n type_list = _type_list(series)\n \n if isinstance(type_list, str) and type_list=='str':\n return min(series[series.notnull()].map(len))\n elif isinstance(type_list, str) and (type_list=='int' or type_list=='float'):\n return series[series.notnull()].min()\n else:\n return None\n\ndef _max_value(series, type_list=None):\n \"\"\"\n Défine the max value of the series :\n - for 'str' type it's the max lenght of the string value\n - for 'int' or 'float' type, it's the max value of the serie\n for other type, this function return 'None'\n :param series: \n :type series: \n :param type_list: \n :type type_list: \n :return: \n :rtype: \n \"\"\"\n if type_list is None:\n type_list = _type_list(series)\n\n if isinstance(type_list, str) and type_list == 'str':\n return max(series[series.notnull()].map(len))\n elif isinstance(type_list, str) and (type_list == 'int' or type_list == 'float'):\n return series[series.notnull()].max()\n else:\n return None\n\n@deprecated(\"The new location is pytool.dataframe.analysis\")\ndef summarize(dataset=None, size_head=80, output = DEFAULT_OUTPUT, html_formatter=_html_formatter):\n \"\"\"\n This function returns a summary of information about the DataFrame.\n The informations is printing to standard output and are :\n - The column name\n - The type of data\n - The count of not null- data and its percentage\n - The head of unique values\n :param dataset: Object to analysis\n :type dataset: pandas.DataFrame\n :param size_head: max lenght of header of data's example\n :type size_head: int\n :param output: \n IO Object : writable object (ie. object with `write` function with string argument\n 'HTML' : output in HTML format\n 'DICT' : Analysis result : dictionnary with \n {\n SHAPE: size of frame, \n DESCRIPTION: data's description,\n INDEX: index's description\n } \n :return: None if IO Object else type of selected output \n \"\"\"\n if isinstance(dataset, pd.DataFrame):\n _result = {\n SHAPE: dataset.shape,\n DESCRIPTION: pd.DataFrame(columns=['NAME', 'TYPE', 'COUNT', 'FILLING', 'MIN', 'MAX', 'SAMPLE'], index=dataset.columns),\n INDEX: pd.DataFrame(columns=['NAME', 'TYPE', 'N_UNIQUE', 'SAMPLE'], index=dataset.index.names),\n }\n # Description des données\n for col in dataset.columns:\n col_notnull_size = len(dataset[~dataset[col].isnull()][col])\n _result[DESCRIPTION].at[col, 'NAME'] = col\n _result[DESCRIPTION].at[col, 'TYPE'] = _type_list(dataset.loc[:,col])\n _result[DESCRIPTION].at[col, 'COUNT'] = col_notnull_size\n _result[DESCRIPTION].loc[col, 'FILLING'] = col_notnull_size/len(dataset) if len(dataset)>0 else 0\n _result[DESCRIPTION].loc[col, 'MIN'] = _min_value(dataset.loc[:,col], _result[DESCRIPTION].loc[col, 'TYPE'])\n _result[DESCRIPTION].loc[col, 'MAX'] = _max_value(dataset.loc[:,col], _result[DESCRIPTION].loc[col, 'TYPE'])\n _result[DESCRIPTION].loc[col, 'SAMPLE'] = str(dataset[dataset[col].notnull()][col].unique())[1:-1][:size_head]\n \n # Description de l'index\n for name in dataset.index.names:\n _result[INDEX].loc[name, 'NAME'] = name\n _result[INDEX].loc[name, 'TYPE'] = dataset.index.get_level_values(name).map(lambda x: type(x).__name__).unique()\n _result[INDEX].loc[name, 'N_UNIQUE'] = dataset.index.get_level_values(name).nunique()\n _result[INDEX].loc[name, 'N_UNIQUE'] = str(dataset.index.get_level_values(name).unique())[1:-1][:size_head]\n else:\n raise TypeError(f\"Le type d'objet à analyser doit être au format `pandas.DataFrame`.\\nLe type d'objet en entrée est de type {type(dataset)}\")\n\n if isinstance(output, str):\n if output=='DICT':\n return _result \n elif output=='HTML':\n return HTML(_html_formatter(_result))\n else:\n print(f\"Shape : {_result[SHAPE]}\", file=output)\n for idx, row in _result[DESCRIPTION].iterrows():\n print(\"{col_name:<10} : {col_type:<10} {col_limite} : {col_size} : [{col_value}]\"\n .format(col_name=row.NAME,\n col_type=', '.join(row.TYPE) if isinstance(row.TYPE, list) else row.TYPE,\n col_limite=f\"[{int(row.MIN)}, {int(row.MAX)}]\" if pd.notnull(row.MIN) else '',\n col_size=f\"{row.COUNT} ({row.FILLING:0.2%})\",\n col_value=row.SAMPLE\n )\n , file=output\n )\n return None","sub_path":"pytools/console/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"27363699","text":"from straxen.misc import TimeWidgets\n\n\ndef test_widgets():\n tw = TimeWidgets()\n wig = tw.create_widgets()\n start, end = tw.get_start_end()\n\n assert isinstance(start, int) and isinstance(end, int), \"Should have returned unix time in ns as integer!\"\n assert end > start, \"By default end should be larger than start\"\n\n # Now manually change time zone and compare:\n wig.children[0].children[0].value = 1\n start_utc, end_utc = tw.get_start_end()\n\n h_in_ns_unix = 60*60*10**9\n unix_conversion_worked = start_utc - start == h_in_ns_unix or start_utc - start == 2 * h_in_ns_unix\n assert unix_conversion_worked\n unix_conversion_worked = start_utc - end == h_in_ns_unix or start_utc - end == 2 * h_in_ns_unix\n assert unix_conversion_worked\n\n\ndef test_change_in_fields():\n tw = TimeWidgets()\n wig = tw.create_widgets()\n start, end = tw.get_start_end()\n\n # Modify the nano-second value:\n wig.children[1].children[2].value = '20'\n wig.children[2].children[2].value = '20'\n\n start20, end20 = tw.get_start_end()\n assert start20 - start == 20, 'Start nano-second field did not update.'\n assert end20 - end == 20, 'End nano-second field did not update.'\n\n # Modify Minutes:\n time = wig.children[1].children[1].value\n minutes = int(time[-2:])\n minutes *= 60*10**9\n wig.children[1].children[1].value = time[:-2] + '00' # .value is a string \"HH:MM\"\n\n start00, _ = tw.get_start_end()\n assert start20 - start00 == minutes, 'Time field did not update its value!'\n","sub_path":"tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"230101959","text":"#######\r\n# Hello! Thanks for reading my application.\r\n# I do not have microsoft office installed on my home machine so did not feel comfortable submitting an Excel workbook.\r\n# Instead, here's a small python script to complete the task.\r\n# The output is a pdf file that you can find here: \r\n# And a notebook going through the code is here:\r\n# Note that because the output is a PDF and there are 170 countries/regions, this script may take up to 2 minutes.\r\n# Please feel free to contact me at pmcbr@sas.upenn.edu.\r\n#######\r\n\r\n\r\nimport pandas as pd\r\nfrom pandas.tseries.offsets import MonthEnd\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\n\r\n# data source\r\nsource = 'https://www.treasury.gov/resource-center/data-chart-center/tic/Documents/slt1d_globl.csv'\r\n\r\n# read in raw data\r\ntic = pd.read_csv(source, skiprows=13, skipfooter=10, \r\n names = ('Country Name', 'Country Code', 'End of Month', \r\n 'Total US Long-Term Securities', 'US Treasury', \r\n 'US Agency Bonds', 'US Corporate and Other Bonds', \r\n 'US Corporate Stocks'), \r\n engine='python', thousands=',', na_values = 'n.a.')\r\n\r\n\r\n# add native date formatting\r\ntic['End of Month'] = pd.to_datetime(tic['End of Month']) + MonthEnd(1)\r\ntic.set_index(['Country Name', 'End of Month'], inplace = True)\r\n\r\n\r\ndef table_plots(country, df):\r\n \"\"\"\r\n country is a string used to title the plots\r\n df should contain appropriate data\r\n returns matplotlib.pyplot figure for printing\r\n \"\"\"\r\n fig, ax = plt.subplots(3,2, figsize = (18, 12))\r\n fig.suptitle(country, fontsize=20)\r\n for i, x in enumerate(ax.ravel()):\r\n\r\n if i == 0:\r\n x.table(cellText=list(zip(list(df.iloc[0,1:].index), list(df.iloc[0,1:].values))), \r\n colLabels=['', str(df.index[0])[:-12]],\r\n loc='center')\r\n x.xaxis.set_visible(False) \r\n x.yaxis.set_visible(False)\r\n x.set_title('Most Recent Data (millions USD)')\r\n continue\r\n x.grid(True)\r\n x.plot(df.iloc[:,i].dropna())\r\n x.set_title(df.iloc[:,i].name)\r\n\r\n return fig\r\n\r\n# list of country names\r\ncountries = list(tic.index.levels[0])\r\n\r\n# print to pdf\r\nwith PdfPages('tic_update.pdf') as pdf:\r\n for c in countries:\r\n\r\n fig = table_plots(c, tic.loc[c])\r\n pdf.savefig(fig)\r\n\r\n plt.close()\r\n\r\n\r\n","sub_path":"mcbrearty_tic.py","file_name":"mcbrearty_tic.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"425425846","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nimport pdb\nimport sys\nimport random\nimport os \nos.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6'\nfrom keras.datasets import cifar100\nfrom keras.models import Model, Sequential\nfrom keras.utils import get_file\nfrom keras.layers import Input, Flatten, Dense, Dropout, Lambda, Conv2D, MaxPooling2D, InputLayer\nfrom keras.optimizers import RMSprop\nfrom keras import backend as K\nsys.path.insert(1, \"../\")\nfrom load_data import oxford_images, oxford_labels, oxford_categories\nfrom sklearn.model_selection import train_test_split\nfrom PIL import Image\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg16 import preprocess_input\nWEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'\nweights_path = get_file('vgg16_weights.h5', WEIGHTS_PATH_NO_TOP)\n\nnum_classes = len(oxford_categories)\nepochs = 50\n\ndef euclidean_distance(vects):\n x, y = vects\n sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)\n return K.sqrt(K.maximum(sum_square, K.epsilon()))\n\ndef eucl_dist_output_shape(shapes):\n shape1, shape2 = shapes\n return (shape1[0], 1)\n\ndef contrastive_loss(y_true, y_pred):\n margin = 1\n square_pred = K.square(y_pred)\n margin_square = K.square(K.maximum(margin - y_pred, 0))\n return K.mean(y_true * square_pred + (1 - y_true) * margin_square)\n\ndef create_pairs(x, digit_indices):\n pairs = []\n labels = []\n temp = min([len(digit_indices[d]) for d in range(num_classes)]) - 1\n for d in range(num_classes):\n for i in range(temp):\n z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]\n pairs += [[x[z1], x[z2]]]\n inc = random.randrange(1, num_classes)\n dn = (d + inc) % num_classes\n z1, z2 = digit_indices[d][i], digit_indices[dn][i]\n pairs += [[x[z1], x[z2]]]\n labels += [1, 0]\n return np.array(pairs), np.array(labels)\n\n\ndef create_base_network(input_shape):\n model = Sequential()\n model.add(InputLayer(input_shape=(150, 150, 3)))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))\n model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))\n model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_maxpool'))\n\n model.load_weights(weights_path)\n\n for layer in model.layers:\n layer.trainable = False\n\n for layer in model.layers[-4:]:\n layer.trainable = True\n\n return model\n\n\n\ndef compute_accuracy(y_true, y_pred):\n pred = y_pred.ravel() < 0.5\n return np.mean(pred == y_true)\n\ndef accuracy(y_true, y_pred):\n return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))\n\n# # create train and test \n# x_train, x_test, y_train, y_test = train_test_split(oxford_images, oxford_labels, test_size=0.2, stratify=oxford_labels, random_state=6)\n\n# # resize train dataset images to 224 x 224 x 3\n# x_train_resized = []\n# for tr in x_train:\n# if len(tr.shape) == 2: # grayscale image\n# temp = np.stack((tr,tr,tr), axis=2)\n# x_train_resized.append(np.array(Image.fromarray(temp).resize((224, 224), Image.ANTIALIAS)))\n# else: # RGB image\n# x_train_resized.append(np.array(Image.fromarray(tr).resize((224, 224), Image.ANTIALIAS)))\n# x_train_resized = np.array(x_train_resized)\n\n# # resize test dataset images to 224 x 224 x 3\n# x_test_resized = []\n# for tr in x_test:\n# if len(tr.shape) == 2: # grayscale image\n# temp = np.stack((tr,tr,tr), axis=2)\n# x_test_resized.append(np.array(Image.fromarray(temp).resize((224, 224), Image.ANTIALIAS)))\n# else: # RGB image\n# x_test_resized.append(np.array(Image.fromarray(tr).resize((224, 224), Image.ANTIALIAS)))\n# x_test_resized = np.array(x_test_resized)\n\n# y_train = np.array(y_train)\n# y_test = np.array(y_test)\n# np.save(\"./oxford_train_224.npy\", x_train_resized)\n# np.save(\"./oxford_train_label.npy\", y_train)\n# np.save(\"./oxford_test_224.npy\", x_test_resized)\n# np.save(\"./oxford_test_label.npy\", y_test)\n\nx_train_resized = np.load(\"./oxford_train_224.npy\")\ny_train = np.load(\"./oxford_train_label.npy\")\nx_test_resized = np.load(\"./oxford_test_224.npy\")\ny_test = np.load(\"./oxford_test_label.npy\")\n\n\n\n# preprocess input wi\nx_train_normalized = preprocess_input(x_train_resized)\nx_test_normalized = preprocess_input(x_test_resized)\ninput_shape = x_train_normalized.shape[1:]\n\n# create training+test positive and negative pairs\nclass_indices = [np.where(y_train == i)[0] for i in range(num_classes)]\ntemp = min([len(class_indices[d]) for d in range(num_classes)])\ntr_pairs, tr_y = create_pairs(x_train_normalized, class_indices)\n\nclass_indices = [np.where(y_test == i)[0] for i in range(num_classes)]\nte_pairs, te_y = create_pairs(x_test_normalized, class_indices)\n\n# network definition\ncnn = create_base_network(input_shape)\n\ninput_a = Input(shape=input_shape)\ninput_b = Input(shape=input_shape)\n\n# because we re-use the same instance `base_network`,\n# the weights of the network\n# will be shared across the two branches\nprocessed_a = cnn(input_a)\nprocessed_b = cnn(input_b)\n\ndistance = Lambda(euclidean_distance,\n output_shape=eucl_dist_output_shape)([processed_a, processed_b])\n\nmodel = Model([input_a, input_b], distance)\n\n# train\nmodel.compile(loss=contrastive_loss, optimizer=\"adam\", metrics=[accuracy])\nmodel.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,\n batch_size=32,\n epochs=epochs,\n validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))\n\n# compute final accuracy on training and test sets\ny_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])\ntr_acc = compute_accuracy(tr_y, y_pred)\ny_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])\nte_acc = compute_accuracy(te_y, y_pred)\n\nprint('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))\nprint('* Accuracy on test set: %0.2f%%' % (100 * te_acc))\n\n\nmodel.save('../../models/oxford/siamese_vgg_50epochs.h5')\n\npdb.set_trace()\n","sub_path":"scripts/deep_learning/oxford/siamese_vgg.py","file_name":"siamese_vgg.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"329161964","text":"'''class Solution:\n def countCharacters(self, words: List[str], chars: str) -> int::\n res = 0\n for word in words:\n lens = len(word)\n temp = 0\n for i in word:\n if word.count(i) <= chars.count(i):\n temp += 1\n else:\n break\n if temp == lens:\n res += len(word)\n return res'''\nwords = [\"cat\",\"bt\",\"hat\",\"tree\"]\nchars = \"atach\"\nres = 0\nfor word in words:\n lens = len(word) #下面的for loop中与temp储存值进行比较 相等说明所有字符都在chars中数量也符合\n temp = 0\n for i in word:\n if word.count(i) <= chars.count(i): #.count() 函数的使用 求该字符的和\n temp += 1 #+1表示该数位满足 \n else:\n break #提高速度 不符合就出这个for loop 进行下一个word\n if temp == lens:\n res += len(word) #符合就增加字符长度\nprint(res)","sub_path":"Leetcode1160.拼写单词.py","file_name":"Leetcode1160.拼写单词.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"605157081","text":"from google.cloud import aiplatform\n\nPROJECT_ID = 'gcp-practice-0123'\nPREBUILT_SERVING_CONTAINER = \"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-0:latest\"\npipeline_root_path = 'gs://gcp-practice-0123-18jun2023/test-kfp/kfp/'\n\nmlops_pipeline_job = aiplatform.PipelineJob(display_name = \"model-deployment-pipeline-test-json\",\n pipeline_root = pipeline_root_path,\n template_path = \"gs://gcp-practice-0123-18jun2023/docker-kfp-test/kfp-json/mlopsdeploypipeline-1.json\",\n #template_path = \"E:\\\\Python-code\\\\vertex ai notes\\\\json\\\\mlopsdeploypipeline-1.json\"\n project = PROJECT_ID,\n parameter_values={\"project_in\":PROJECT_ID,\n \"endpoint_display_name_in\":\"mlopsDeployPipeline-1\",\n \"model_gcs_path_in\":'gs://gcp-practice-0123-18jun2023/custom-trained-model/model/aiplatform-custom-training-2023-01-22-18:07:17.227/model/',\n \"serving_container_in\":PREBUILT_SERVING_CONTAINER,\n \"bigquery_source_in\":\"bq://gcp-practice-0123.dataset2.california_housing_test\",\n \"bigquery_destination_prefix_in\":'bq://gcp-practice-0123.dataset2',\n \"predictions_format_in\":\"bigquery\",\n \"machine_type_in\":\"e2-standard-4\",\n \"bucket_uri_in\":\"gs://gcp-practice-0123-18jun2023/custom-trained-model/training-data\"\n }\n \n )\n\n#mlops_pipeline_job.submit(service_account='sa-nonprod-corp-1cdh-214e-01@nonprod-corp-1cdh-214e.iam.gserviceaccount.com')\nmlops_pipeline_job.submit()\n","sub_path":"cloud-build/src/kfp-batch-prediction/execute/execute_pipeline.py","file_name":"execute_pipeline.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"210371973","text":"import os, sys\r\nimport json\r\nfrom neuroner import neuromodel\r\n\r\n\r\nclass HiddenPrints:\r\n # Prevent printing\r\n def __enter__(self):\r\n self._original_stdout = sys.stdout\r\n sys.stdout = open(os.devnull, 'w')\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n sys.stdout.close()\r\n sys.stdout = self._original_stdout\r\n\r\n\r\ndataset = 'i2b2_2014_deid'\r\n# 'conll2003'\r\n# 'example_unannotated_texts'\r\n# 'i2b2_2014_deid'\r\n\r\nmodel = 'i2b2_2014_glove_spacy_bioes'\r\n\r\n\r\n# 'conll_2003_en'\r\n# 'i2b2_2014_glove_spacy_bioes'\r\n# 'i2b2_2014_glove_stanford_bioes'\r\n# 'mimic_glove_spacy_bioes'\r\n# 'mimic_glove_stanford_bioes'\r\n\r\n\r\ndef entity_detect(sentence):\r\n # print(\"Building model\")\r\n with HiddenPrints():\r\n neuromodel.fetch_data(dataset)\r\n neuromodel.fetch_model(model)\r\n nn = neuromodel.NeuroNER(train_model=False, use_pretrained_model=True)\r\n\r\n # print(\"predicting\")\r\n entities = nn.predict(sentence)\r\n return entities\r\n\r\n\r\nwith open(\"parsed_json.json\", 'r') as load_f:\r\n load_dict = json.load(load_f)\r\nnewJson = {}\r\nfor i in load_dict.keys():\r\n strx = ' '\r\n strxNew = strx.join(load_dict.get(i))\r\n newJson[i] = entity_detect(strxNew)\r\nwith open(\"entities_json.json\", \"w\") as f:\r\n # json.dump(dict_, f)\r\n json.dump(newJson, f, indent=2, sort_keys=True, ensure_ascii=False)\r\n","sub_path":"Assignment1/NeuroNER.py","file_name":"NeuroNER.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"273199117","text":"#!/usr/bin/env python3\nfrom ev3dev.ev3 import *\nimport time\nimport atexit\nimport itertools\n\ndef exit_handler():\n\tfor i in range(3):\n\t\tRoutine.motors[i].stop(stop_action='brake')\n\tRoutine.motors[0].polarity = 'normal'\n\nclass Routine():\n\n\tmotors = None\n\tS = 1/1000 # nozzle speed cm/ms\n\tCx = 1/0.00138675 # ticks x-motor rotation per cm traveled\n\tCy = 1/0.0115625 # ticks y-motor rotation per cm traveled\n\tC = (Cx,Cy,Cy)\n\n\tdef __init__(self, lines):\n\t\traw_units = [ list(y) for x, y in \n\t\t\titertools.groupby(lines, lambda z: z == 'unit break') if not x ]\n\n\t\tself.queue = [ DrawingUnit(self, x) for x in raw_units ]\n\n\t\tself.startRotation = self.queue[0].positions[0] # cm\n\t\tself.startRotation = [ self.startRotation[i]/Routine.C[i] for i in range(2) ] # ticks\n\n\t\tself.connectUnits()\n\n\t\tself.report_data = []\n\n\tdef connectUnits(self):\n\t\t\"\"\"Generates transition attribute for units that are to be printed sequentially.\"\"\"\n\t\tunits = self.queue\n\t\tif not len(units) > 1:\n\t\t\treturn\n\t\tfor i in range(len(units)-1):\n\t\t\tif units[i].stop_action == 1:\n\t\t\t\tpoints = [ units[i].positions[-1], units[i+1].positions[0] ]\n\t\t\t\tunits[i].transition = units[i].cleanData(points=points)\n\t\t\t\tunits[i].transition.pop(0)\n\n\tdef executeCommand(self, command):\n\t\t\"\"\"\n\t\tExecutes the command generated by genCommand() using run_timed().\n\t\t\"\"\"\n\t\tRoutine.motors[0].speed_sp = command[0]\n\t\tRoutine.motors[0].time_sp = command[2]\n\t\tRoutine.motors[1].speed_sp = command[1]\n\t\tRoutine.motors[1].time_sp = command[2]\n\t\tRoutine.motors[2].speed_sp = command[1]\n\n\t\t# start moving x-motor\n\t\tRoutine.motors[0].run_timed()\n\t\t# start moving y-motors\n\t\tRoutine.motors[1].run_timed()\n\t\tRoutine.motors[2].run_forever()\n\t\t# stop 2nd y-motor when first y-motor stops\n\t\tRoutine.motors[1].wait_while('running')\t\n\t\tRoutine.motors[2].stop()\n\n\t\tmotor_positions = [Routine.motors[i].position/Routine.C[i] for i in range(3)] # current motor positions, cm\n\t\tmotor_positions[1] = (motor_positions[1] + motor_positions.pop())*0.5 # averaging two y-motor positions\n\n\t\tself.report_data[-1] += motor_positions\n\n\tdef executeCommand_dep(self, command):\n\t\t\"\"\"\n\t\tExecutes the command generated by genCommand() using run_to_abs_pos().\n\n\t\tThis method was depracated because using run_timed() allows better control over the simultaneous\n\t\tstart and stop of the motors and allows us to better maintain a constant nozel speed \n\t\t\"\"\"\n\n\t\t# break command into next position and velocity vector B and v\n\t\tB, v = command\n\t\t# start moving x-motor\n\t\tRoutine.motors[0].run_to_abs_pos(position_sp=B[0], speed_sp=v[0], stop_action='coast')\n\t\t# start moving y-motors\n\t\tRoutine.motors[1].run_to_abs_pos(position_sp=B[1], speed_sp=v[1], stop_action='coast')\n\t\tRoutine.motors[2].run_to_abs_pos(position_sp=B[1], speed_sp=v[1], stop_action='coast')\n\t\t# wait for x-motor to stop - not sure if this is necessary\n\t\tRoutine.motors[0].wait_while('running')\n\n\t\t# self.report_data[-1] += [Routine.motors[i].position/Routine.C[i] for i in range(2)]\n\n\tdef moveToNext(self, command):\n\t\t\"\"\" Moves nozzle to starting position of next unit.\"\"\"\n\n\t\tx_speed = 500 # ticks/s\n\t\ty_speed = 100 # ticks/s\n\n\t\tB,v = command\n\n\t\tRoutine.motors[0].position_sp = B[0]\n\t\tRoutine.motors[1].position_sp = B[1]\n\n\t\tRoutine.motors[0].speed_sp = x_speed\n\t\tRoutine.motors[1].speed_sp = y_speed\n\t\tRoutine.motors[2].speed_sp = y_speed\n\n\t\tfor x in Routine.motors:\n\t\t\tx.stop_action = 'brake'\n\n\n\t\tRoutine.motors[0].run_to_abs_pos()\n\t\tRoutine.motors[1].run_to_abs_pos()\n\t\tRoutine.motors[2].run_forever()\n\t\tRoutine.motors[1].wait_while('running')\n\t\tRoutine.motors[2].stop()\n\t\tRoutine.motors[0].wait_while('running')\n\n\t\tRoutine.motors[0].run_to_abs_pos()\n\t\tRoutine.motors[1].run_to_abs_pos()\n\t\tRoutine.motors[2].run_forever()\n\t\tRoutine.motors[1].wait_while('running', timeout=500)\n\t\tRoutine.motors[2].stop()\n\t\tRoutine.motors[0].wait_while('running', timeout=2000)\n\n\t\tmotor_positions = [Routine.motors[i].position/Routine.C[i] for i in range(3)] # current motor positions, cm\n\t\tmotor_positions[1] = (motor_positions[1] + motor_positions.pop())*0.5 # averaging two y-motor positions\n\n\t\tself.report_data[-1] += motor_positions\n\n\nclass DrawingUnit():\n\n\tdef __eq__(self, other):\n\t\t# override default equality implementation, enable checking for duplicate units\n\t\tif isinstance(other, DrawingUnit):\n\t\t\treturn self.__dict__ == other.__dict__\n\t\treturn False\n\n\tdef __init__(self, routine, raw_unit_data):\n\t\t# stop_action = 0 -> stop on completion\n\t\t# stop_action = 1 -> continue to next drawing unit\n\t\tunit_sections = [ list(y) for x, y in \n\t\t\t\titertools.groupby(raw_unit_data, lambda z: z == '---') if not x ]\t\t\n\t\tself.stop_action = int(unit_sections[0][0].split(',')[1])\n\n\t\tself.positions = []\n\t\tfor p in unit_sections[2]:\n\t\t\tself.positions.append([ float(x) for x in p.split(',') ])\n\n\t\tself.transition = []\n\n\t\tself.routine = routine\n\n\t\tself.cleanData()\n\n\tdef genCommand(self, n=None, B=None):\n\t\t\"\"\"Generates a command to move from current position to point B using run_timed()\"\"\"\n\n\t\t# if n is not None, then n is an int\n\t\t# if n != None, generates a command to move from current position to n+1th position\n\t\t# if B != None, then B is a position (in cm) and a command is generated to move from \n\t\t# the current position to B\n\t\t# cm -> centimeters\n\t\t# ms -> milliseconds\n\t\tA = [ Routine.motors[i].position for i in range(3) ] # current location of motors in ticks\n\t\tA[1] = (A[1] + A.pop())*0.5 # averaging position of the two y motors\n\t\tA = [ A[i]/Routine.C[i] for i in range(2) ] # current position, cm\n\t\t\n\t\tif n!= None:\n\t\t\tB = self.positions[n + 1] # next location, cm\n\n\t\tp = [ B[i] - A[i] for i in range(2) ] # position vector, cm\n\t\tmp = (p[0]**2 + p[1]**2)**0.5 # magnitude of position vector, cm\n\t\tt = mp/Routine.S # execution time, ms\n\t\tv = [ Routine.S*p[i]*1000*Routine.C[i]/mp for i in range(2) ] # velocity vector, tick/s\n\t\tself.routine.report_data.append(A + B)\n\t\treturn [v[0], v[1], t]\n\n\tdef genCommand_dep(self, n=None, B=None):\n\t\t\"\"\"Generates a command to move from current position to point B using run_to_abs_pos()\"\"\"\n\n\t\t# if n is not None, then n is an int\n\t\t# if n != None, generates a command to move from current position to n+1th position\n\t\t# if B != None, then B is a position (in cm) and a command is generated to move from \n\t\t# the current position to B\n\t\t# cm -> centimeters\n\t\t# ms -> milliseconds\n\t\tA = [ Routine.motors[i].position for i in range(3) ] # current location of motors in ticks\n\t\tA[1] = (A[1] + A.pop())*0.5 # averaging position of the two y motors\n\t\tA = [ A[i]/Routine.C[i] for i in range(2) ] # current position, cm\n\t\t\n\t\tif n!= None:\n\t\t\tB = self.positions[n + 1] # next location, cm\n\n\t\tp = [ B[i] - A[i] for i in range(2) ] # position vector, cm\n\t\tmp = (p[0]**2 + p[1]**2)**0.5 # magnitude of position vector, cm\n\t\tt = mp/Routine.S # execution time, ms\n\t\tv = [ Routine.S*p[i]*1000*Routine.C[i]/mp for i in range(2) ] # velocity vector, tick/s\n\n\t\tself.routine.report_data.append(A + B)\n\n\t\tB = [ B[i]*Routine.C[i] for i in range(2) ] # next location, ticks\n\n\n\t\treturn [B, v]\n\n\tdef cleanData(self, points=None):\n\t\t\"\"\"\n\t\tTakes any 2 points > 1.5cm apart in self.positions and adds equidistant points in between.\n\t\t\n\t\tWhen using run_timed(), the motors are too innaccurate over long distances. If two points are 5cm apart, during\n\t\texecution the location of the nozzle will have a significant error moving from the 1st point to the 2nd.\n\t\tThis function will add points inbetween the points that are 5cm apart so that during execution, at intervals <1.5cm\n\t\tthe script will check to see where the nozzle currently is and correct itself accordingly, reducing error that\n\t\toccurs over long distances. \n\t\t\"\"\"\n\n\t\tcleaned_positions = []\n\n\t\tif points is None:\n\t\t\tpositions = self.positions\n\t\telse:\n\t\t\tpositions = points\n\n\t\tfor n in range(len(positions) - 1):\n\t\t\tA = positions[n]\n\t\t\tB = positions[n + 1]\n\t\t\tcleaned_positions.append(A)\n\n\t\t\tp = [ B[i] - A[i] for i in range(2) ] # position vector, cm\n\t\t\tmp = (p[0]**2 + p[1]**2)**0.5 # magnitude of position vector, cm\n\n\t\t\tif mp < 1.5:\n\t\t\t\tcontinue\n\n\t\t\tnum_divisions = int(mp / 1.5) + 1 # number of vectors to divide the current vector into\n\n\t\t\tm = mp/num_divisions # magnitude of each smaller position vector\n\t\t\td = [ p[i]/mp for i in range(2) ] # the directional vector (the unit vector of the position vector)\n\n\t\t\tstart = A\n\t\t\tfor j in range(num_divisions):\n\t\t\t\tend = [ m*d[i] + start[i] for i in range(2) ] # the location of the end of this division\n\t\t\t\tcleaned_positions.append(end)\n\t\t\t\tstart = end\n\n\t\tif points is None:\n\t\t\tself.positions = cleaned_positions\n\t\telse:\n\t\t\treturn cleaned_positions\t\t\n\n\natexit.register(exit_handler)\n\n# extracts routine that has just been sent over from computer\nwith open(\"pathData.csv\",\"r\") as f:\n\tlines = f.readlines()\n\tf.close()\n\nlines = [ x.strip() for x in lines ]\nroutine = Routine(lines)\t\n\nmX = MediumMotor('outA')\nmY1 = LargeMotor('outB')\nmY2 = LargeMotor('outC')\nmX.position = 0\nmY1.position = 0\nmY2.position = 0\n\n# change the below line??\nmX.polarity = 'inversed'\nRoutine.motors = [mX, mY1, mY2]\n\nwith open('comm.txt', \"w\") as f:\n\tf.write('moving to initial position')\n\tf.close()\n\nprint('moving to initial position')\n\n\nx_speed = 500 # ticks/s\ny_speed = 100 # ticks/s\n\n\nRoutine.motors[0].position_sp = Routine.startRotation[0]\nRoutine.motors[1].position_sp = Routine.startRotation[1]\n\nRoutine.motors[0].speed_sp = x_speed\nRoutine.motors[1].speed_sp = y_speed\nRoutine.motors[2].speed_sp = y_speed\n\nfor x in Routine.motors:\n\tx.stop_action = 'brake'\n\nRoutine.motors[0].run_to_abs_pos()\nRoutine.motors[1].run_to_abs_pos()\nRoutine.motors[2].run_forever()\nRoutine.motors[1].wait_while('running')\nRoutine.motors[2].stop()\nRoutine.motors[0].wait_while('running')\n\nRoutine.motors[0].run_to_abs_pos()\nRoutine.motors[1].run_to_abs_pos()\nRoutine.motors[2].run_forever()\nRoutine.motors[1].wait_while('running')\nRoutine.motors[2].stop()\nRoutine.motors[0].wait_while('running')\n\nwith open('comm.txt', \"w\") as f:\n\tf.write('ready to print. click \"start\" and begin extrusion')\n\tf.close()\ninput('ready to print. click \"start\" and begin extrusion')\nwith open('comm.txt', \"w\") as f:\n\tf.write('printing')\n\tf.close()\nprint('printing')\n\nfor u in range(len(routine.queue)):\n\tunit = routine.queue[u]\n\tfor x in Routine.motors:\n\t\tx.stop_action = 'coast'\n\tfor i in range(len(unit.positions-1)):\n\t\tcommand = unit.genCommand(n=i)\n\t\troutine.executeCommand(command)\n\tif unit.stop_action == 0 and u != len(routine.queue) - 1:\n\t\t# if we need to stop before going to the next unit\n\t\twith open('comm.txt', \"w\") as f:\n\t\t\tf.write('stop extrusion and press continue')\n\t\t\tf.close()\n\n\t\tinput('stop extrusion and press continue')\n\n\t\twith open('comm.txt', \"w\") as f:\n\t\t\tf.write('moving to starting position of next unit')\n\t\t\tf.close()\n\n\t\tprint('moving to starting position of next unit')\n\n\t\tcommand = unit.genCommand_dep(B=routine.queue[u+1].positions[0])\n\t\troutine.moveToNext(command)\n\n\t\twith open('comm.txt', \"w\") as f:\n\t\t\tf.write('begin extrusion and press continue')\n\t\t\tf.close()\n\n\t\tinput('begin extrusion and press continue')\n\n\telif unit.stop_action == 1 and u != len(routine.queue) - 1:\n\t\t# if we are going to continue directly to the next unit\n\n\t\tfor point in unit.transition:\n\t\t\tcommand = unit.genCommand(point)\n\t\t\troutine.executeCommand(command)\n\nwith open('comm.txt', \"w\") as f:\n\tf.write(\"print complete. please raise marker and then press end button\")\n\tf.close()\n\ninput(\"print complete. please raise marker and then press end button\")\n\n\nwith open('comm.txt', \"w\") as f:\n\tf.write(\"returning to initial position\")\n\tf.close()\n\nprint('returning to origin')\n\n# maybe adjust these speeds?\nRoutine.motors[0].position_sp = 0\nRoutine.motors[1].position_sp = 0\n\nRoutine.motors[0].speed_sp = x_speed\nRoutine.motors[1].speed_sp = y_speed\nRoutine.motors[2].speed_sp = y_speed\n\nfor x in Routine.motors:\n\tx.stop_action = 'brake'\n\nRoutine.motors[0].run_to_abs_pos()\nRoutine.motors[1].run_to_abs_pos()\nRoutine.motors[2].run_forever()\nRoutine.motors[1].wait_while('running')\nRoutine.motors[2].stop()\nRoutine.motors[0].wait_while('running')\n\nRoutine.motors[0].run_to_abs_pos()\nRoutine.motors[1].run_to_abs_pos()\nRoutine.motors[2].run_forever()\nRoutine.motors[1].wait_while('running')\nRoutine.motors[2].stop()\nRoutine.motors[0].wait_while('running')\n\nwith open('comm.txt', \"w\") as f:\n\tf.write(\"print complete\")\n\tf.close()\n\nprint('print complete')\n\nwith open('report.csv', 'w') as f:\n\tf.write('actual start,,end,,actual end\\n')\n\tfor cmd in routine.report_data:\n\t\tf.write('{},{},{},{},{},{}\\n'.format(*cmd))\n\tf.close()\nexit()\n","sub_path":"scripts_brick/execute_routine.py","file_name":"execute_routine.py","file_ext":"py","file_size_in_byte":12452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"502655001","text":"from platform import system\n\n# Checks if operating system is Windows as formatting for text doesn't work in Windows without windll.\nif \"wind\" in system().lower():\n try:\n from ctypes import windll\n kernel32 = windll.kernel32\n kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)\n except ImportError:\n windll = None\n print(\"\\n\\nImport error whilst attempting to import module[windll]. This should not cause a problem\"\n \" if using unix-based platforms. Contact author for help.\")\n\n\nclass Colours:\n \"\"\"Enum-styled selection of text formats\"\"\"\n\n BOLD = '\\033[1m'\n NOTIFY = '\\033[36m'\n PASS = '\\033[32m'\n WARNING = '\\033[33m'\n FAIL = '\\033[31m'\n END = '\\033[0m'\n\n\nclass TerminalLog:\n \"\"\"Prints styled text to console\"\"\"\n\n @staticmethod\n def bold(message):\n print(Colours.BOLD, message, Colours.END)\n\n @staticmethod\n def notify(message):\n print(Colours.NOTIFY, message, Colours.END)\n\n @staticmethod\n def confirm(message):\n print(Colours.PASS, message, Colours.END)\n\n @staticmethod\n def warning(message):\n print(Colours.WARNING, message, Colours.END)\n\n @staticmethod\n def fail(message):\n print(Colours.FAIL,\n message,\n '''\\n\\n===========================================================\n \\n ERROR: EXITING...\n \\n===========================================================''',\n Colours.END)\n\n @staticmethod\n def start():\n TerminalLog.bold(\"\\n\\nThis is NotSoFastQC!\\n\\tCreated by James Fox\\n\\n\")\n\n @staticmethod\n def complete():\n TerminalLog.notify(\n '''\\n\\n===========================================================\n \\n Analysis Complete. Thank you for using NotSoFastQC!\n \\n===========================================================''')\n","sub_path":"NotSoFastQC/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"272222043","text":"\n\"\"\"\nFADEOUT SOFTWARE \n\n**Disclaimer ** \nThe library is provided \"as-is\" without warranty\n\nNeither FadeOut Software (IT) Srl or any of its partners or agents shall be liable for any direct, indirect, incidental, special, exemplary, or consequential \ndamages (including, but not limited to, breach of expressed or implied contract; procurement of substitute goods or services; loss of use, data or profits; \nbusiness interruption; or damage to any equipment, software and/or data files) however caused and on any legal theory of liability, whether for contract, \ntort, strict liability, or a combination thereof (including negligence or otherwise) arising in any way out of the direct or indirect use of software, \neven if advised of the possibility of such risk and potential damage.\n\nFadeOut Software (IT) Srl uses all reasonable care to ensure that software products and other files that are made available are safe to use when installed, \nand that all products are free from any known software virus. For your own protection, you should scan all files for viruses prior to installation.\n\n\n# WASDI\n\nThis is WASPY, the WASDI Python lib.\n\nWASDI is an ESA GSTP Project sponsored by ASI in 2016. The system is a fully scalable and distributed Cloud based EO analytical platform. The system is cross-cloud and cross DIAS. \nWASDI is an operating platform that offers services to develop and deploy DIAS based EO on-line applications, designed \nto extract value-added information, made and distributed by EO-Experts without any specific IT/Cloud skills. \nWASDI offers as well to End-Users the opportunity to run EO applications both from a dedicated user-friendly interface \nand from an API based software interface, fulfilling the real-world business needs. \nEO-Developers can work using the WASDI Libraries in their usual programming languages and add to the platform these new blocks \nin the simplest possible way.\n\nNote:\nthe philosophy of safe programming is adopted as widely as possible, the lib will try to workaround issues such as\nfaulty input, and print an error rather than raise an exception, so that your program can possibly go on. Please check\nthe return statues\n\nVersion 0.6.0\nLast Update: 25/09/2020\n\nTested with: Python 2.7, Python 3.7\n\nCreated on 11 Jun 2018\n\n@author: p.campanella\n\"\"\"\nfrom time import sleep\n\nname = \"wasdi\"\n\nimport json\nimport os\nimport re\nimport time\nimport traceback\nimport zipfile\nimport requests\nimport getpass\nimport sys\nimport os.path\nimport inspect\n\n# Initialize \"Members\"\nm_sUser = None\nm_sPassword = None\n\nm_sActiveWorkspace = None\nm_sWorkspaceOwner = ''\nm_sWorkspaceBaseUrl = ''\n\nm_sParametersFilePath = None\nm_sSessionId = ''\nm_bValidSession = False\nm_sBasePath = None\n\nm_bDownloadActive = True\nm_bUploadActive = True\nm_bVerbose = True\nm_aoParamsDictionary = {}\n\nm_sMyProcId = ''\nm_sBaseUrl = 'http://www.wasdi.net/wasdiwebserver/rest'\nm_bIsOnServer = False\n\n\ndef printStatus():\n \"\"\"Prints status\n \"\"\"\n global m_sActiveWorkspace\n global m_sWorkspaceOwner\n global m_sWorkspaceBaseUrl\n global m_sParametersFilePath\n global m_sSessionId\n global m_sBasePath\n global m_bDownloadActive\n global m_bUploadActive\n global m_bVerbose\n global m_aoParamsDictionary\n global m_sMyProcId\n global m_sBaseUrl\n global m_bIsOnServer\n\n _log('')\n _log('[INFO] waspy.printStatus: user: ' + str(getUser()))\n _log('[INFO] waspy.printStatus: password: ***********')\n _log('[INFO] waspy.printStatus: session id: ' + str(getSessionId()))\n _log('[INFO] waspy.printStatus: active workspace: ' + str(getActiveWorkspaceId()))\n _log('[INFO] waspy.printStatus: workspace owner: ' + str(m_sWorkspaceOwner))\n _log('[INFO] waspy.printStatus: parameters file path: ' + str(getParametersFilePath()))\n _log('[INFO] waspy.printStatus: base path: ' + str(getBasePath()))\n _log('[INFO] waspy.printStatus: download active: ' + str(getDownloadActive()))\n _log('[INFO] waspy.printStatus: upload active: ' + str(getUploadActive()))\n _log('[INFO] waspy.printStatus: verbose: ' + str(getVerbose()))\n _log('[INFO] waspy.printStatus: param dict: ' + str(getParametersDict()))\n _log('[INFO] waspy.printStatus: proc id: ' + str(getProcId()))\n _log('[INFO] waspy.printStatus: base url: ' + str(getBaseUrl()))\n _log('[INFO] waspy.printStatus: is on server: ' + str(getIsOnServer()))\n _log('[INFO] waspy.printStatus: workspace base url: ' + str(getWorkspaceBaseUrl()))\n if m_bValidSession:\n _log('[INFO] waspy.printStatus: session is valid :-)')\n else:\n print('[ERROR] waspy.printStatus: session is not valid :-(' +\n ' ******************************************************************************')\n\n\ndef setVerbose(bVerbose):\n \"\"\"Sets verbosity\n\n :param bVerbose: False non verbose, True verbose\n :return:\n \"\"\"\n if bVerbose is None:\n print('[ERROR] waspy.setVerbose: passed None, won\\'t change' +\n ' ******************************************************************************')\n return\n if not isinstance(bVerbose, bool):\n print('[ERROR] waspy.setVerbose: passed non boolean, trying to convert' +\n ' ******************************************************************************')\n try:\n bVerbose = bool(bVerbose)\n except:\n print('[ERROR] waspy.setVerbose: cannot convert argument into boolean, won\\'t change' +\n ' ******************************************************************************')\n return\n\n global m_bVerbose\n m_bVerbose = bVerbose\n\n\ndef getVerbose():\n \"\"\"\n Get Verbose Flag\n :return: True or False\n \"\"\"\n global m_bVerbose\n return m_bVerbose\n\n\ndef getParametersDict():\n \"\"\"\n Get the full Params Dictionary\n :return: a dictionary containing the parameters\n \"\"\"\n global m_aoParamsDictionary\n return m_aoParamsDictionary\n\n\ndef setParametersDict(aoParams):\n \"\"\"\n Get the full Params Dictionary\n :param aoParams: dictionary of Parameters\n :return: a dictionary containing the parameters\n \"\"\"\n global m_aoParamsDictionary\n m_aoParamsDictionary = aoParams\n\n\ndef addParameter(sKey, oValue):\n \"\"\"\n Adds a parameter\n :param sKey: parameter key\n :param oValue: parameter value\n \"\"\"\n global m_aoParamsDictionary\n m_aoParamsDictionary[sKey] = oValue\n\n\ndef getParameter(sKey, oDefault=None):\n \"\"\"\n Gets a parameter using its key\n :param sKey: parameter key\n :param oDefault: Default value to return if parameter is not present\n :return: parameter value\n \"\"\"\n global m_aoParamsDictionary\n try:\n return m_aoParamsDictionary[sKey]\n except:\n return oDefault\n\n\ndef setUser(sUser):\n \"\"\"\n Sets the WASDI User\n :param sUser: WASDI UserID\n :return:\n \"\"\"\n global m_sUser\n m_sUser = sUser\n\n\ndef getUser():\n \"\"\"\n Get the WASDI User\n \"\"\"\n global m_sUser\n return m_sUser\n\n\ndef setPassword(sPassword):\n \"\"\"\n Set the WASDI Password\n \"\"\"\n global m_sPassword\n m_sPassword = sPassword\n\n\ndef getPassword():\n \"\"\"\n Get the WASDI Password\n \"\"\"\n global m_sPassword\n return m_sPassword\n\n\ndef setSessionId(sSessionId):\n \"\"\"\n Set the WASDI Session\n \"\"\"\n global m_sSessionId\n m_sSessionId = sSessionId\n\n\ndef setParametersFilePath(sParamPath):\n \"\"\"\n Set The Parameters JSON File Path\n :param sParamPath Local Path of the parameters file\n \"\"\"\n if sParamPath is None:\n print('[ERROR] waspy.setParametersFilePath: passed None as path, won\\'t change' +\n ' ******************************************************************************')\n return\n if len(sParamPath) < 1:\n print('[ERROR] waspy.setParametersFilePath: string passed has zero length, won\\'t change' +\n ' ******************************************************************************')\n return\n\n global m_sParametersFilePath\n m_sParametersFilePath = sParamPath\n\n\ndef getParametersFilePath():\n \"\"\"\n Get the local parameters file Path\n :return: local paramters file path\n \"\"\"\n global m_sParametersFilePath\n return m_sParametersFilePath\n\n\ndef getSessionId():\n \"\"\"\n Get the WASDI Session\n :return: Session Id [String]\n \"\"\"\n global m_sSessionId\n return m_sSessionId\n\n\ndef setBasePath(sBasePath):\n \"\"\"\n Set the local Base Path for WASDI\n :param sBasePath: local WASDI base Path. If not set, by default WASDI uses [USERHOME].wasdi\n \"\"\"\n global m_sBasePath\n m_sBasePath = sBasePath\n\n\ndef getBasePath():\n \"\"\"\n Get the local Base Path for WASDI\n :return: local base path for WASDI\n \"\"\"\n global m_sBasePath\n return m_sBasePath\n\n\ndef setBaseUrl(sBaseUrl):\n \"\"\"\n Set the WASDI API URL\n :param sBaseUrl: WASDI API URL\n \"\"\"\n global m_sBaseUrl\n m_sBaseUrl = sBaseUrl\n\n\ndef getBaseUrl():\n \"\"\"\n Get the WASDI API URL\n :return: WASDI API URL\n \"\"\"\n global m_sBaseUrl\n return m_sBaseUrl\n\n\ndef setWorkspaceBaseUrl(sWorkspaceBaseUrl):\n \"\"\"\n Set the Workspace specific API URL\n :param sWorkspaceBaseUrl: Workspace API URL\n \"\"\"\n global m_sWorkspaceBaseUrl\n m_sWorkspaceBaseUrl = sWorkspaceBaseUrl\n\n\ndef getWorkspaceBaseUrl():\n \"\"\"\n Get the Workspace API URL\n :return: Workspace API URL\n \"\"\"\n global m_sWorkspaceBaseUrl\n return m_sWorkspaceBaseUrl\n\n\ndef setIsOnServer(bIsOnServer):\n \"\"\"\n Set the Is on Server Flag: keep it false, as default, while developing\n :param bIsOnServer: set the flag to know if the processor is running on the server or on the local PC\n \"\"\"\n global m_bIsOnServer\n m_bIsOnServer = bIsOnServer\n\n\ndef getIsOnServer():\n \"\"\"\n Get the WASDI API URL\n :return: True if it is running on server, False if it is running on the local Machine\n \"\"\"\n global m_bIsOnServer\n return m_bIsOnServer\n\n\ndef setDownloadActive(bDownloadActive):\n \"\"\"\n When in development, set True to download locally files from Server.\n Set it to false to NOT donwload data. In this case the developer must check the availability of the files\n :param bDownloadActive: True (default) to activate autodownload. False to disactivate\n \"\"\"\n\n if bDownloadActive is None:\n print('[ERROR] waspy.setDownloadActive: passed None, won\\'t change' +\n ' ******************************************************************************')\n return\n\n global m_bDownloadActive\n m_bDownloadActive = bDownloadActive\n\n\ndef getDownloadActive():\n \"\"\"\n Get the Download Active Flag\n :return: True if auto download is active, False if it is not active \n \"\"\"\n global m_bDownloadActive\n return m_bDownloadActive\n\n\ndef setUploadActive(bUploadActive):\n \"\"\"\n When in development, set True to upload local files on Server.\n Set it to false to NOT upload data. In this case the developer must check the availability of the files\n :param bUploadActive: True to activate Auto Upload, False to disactivate auto upload\n \"\"\"\n\n if bUploadActive is None:\n print('[ERROR] waspy.setUploadActive: passed None, won\\'t change' +\n ' ******************************************************************************')\n return\n\n global m_bUploadActive\n m_bUploadActive = bUploadActive\n\n\ndef getUploadActive():\n \"\"\"\n Get the Upload Active Flag\n :return: True if Auto Upload is Active, False if it is NOT Active\n \"\"\"\n global m_bUploadActive\n return m_bUploadActive\n\n\ndef setProcId(sProcID):\n \"\"\"\n Own Proc Id\n :param sProcID: self processor identifier\n \"\"\"\n global m_sMyProcId\n m_sMyProcId = sProcID\n\n\ndef getProcId():\n \"\"\"\n Get the Own Proc Id\n :return: Own Processor Identifier\n \"\"\"\n global m_sMyProcId\n return m_sMyProcId\n\n\ndef setActiveWorkspaceId(sActiveWorkspace):\n \"\"\"\n Set the Active Workspace Id\n :param sActiveWorkpsace: Active Workspace Id\n \"\"\"\n global m_sActiveWorkspace\n m_sActiveWorkspace = sActiveWorkspace\n\n\ndef getActiveWorkspaceId():\n \"\"\"\n Get Active workspace Id\n :return: the WorkspaceId as a String, '' if there is any error\n \"\"\"\n global m_sActiveWorkspace\n return m_sActiveWorkspace\n\ndef refreshParameters():\n \"\"\"\n Refresh parameters, reading the file again\n \"\"\"\n _loadParams()\n\n\ndef init(sConfigFilePath=None):\n \"\"\"\n Init WASDI Library. Call it after setting user, password, path and url or use it with a config file\n :param sConfigFilePath: local path of the config file. In None or the file does not exists, WASDI will ask for login in the console\n :return: True if login was successful, False otherwise\n \"\"\"\n global m_sUser\n global m_sPassword\n global m_sBaseUrl\n global m_sSessionId\n global m_sBasePath\n global m_bValidSession\n\n sWname = None\n sWId = None\n m_bValidSession = False\n\n if sConfigFilePath is not None:\n bConfigOk, sWname, sWId = _loadConfig(sConfigFilePath)\n\n if bConfigOk is True:\n _loadParams()\n\n if m_sUser is None and m_sPassword is None:\n\n if (sys.version_info > (3, 0)):\n m_sUser = input('[INFO] waspy.init: Please Insert WASDI User:')\n else:\n m_sUser = raw_input('[INFO] waspy.init: Please Insert WASDI User:')\n\n m_sPassword = getpass.getpass(prompt='[INFO] waspy.init: Please Insert WASDI Password:', stream=None)\n\n m_sUser = m_sUser.rstrip()\n m_sPassword = m_sPassword.rstrip()\n\n if (sys.version_info > (3, 0)):\n sWname = input('[INFO] waspy.init: Please Insert Active Workspace Name (Enter to jump):')\n else:\n sWname = raw_input('[INFO] waspy.init: Please Insert Active Workspace Name (Enter to jump):')\n\n if m_sUser is None:\n print('[ERROR] waspy.init: must initialize user first, but None given' +\n ' ******************************************************************************')\n return False\n\n if m_sBasePath is None:\n if m_bIsOnServer is True:\n m_sBasePath = '/data/wasdi/'\n else:\n sHome = os.path.expanduser(\"~\")\n # the empty string at the end adds a separator\n m_sBasePath = os.path.join(sHome, \".wasdi\", \"\")\n\n if m_sSessionId != '':\n asHeaders = _getStandardHeaders()\n sUrl = m_sBaseUrl + '/auth/checksession'\n oResponse = requests.get(sUrl, headers=asHeaders)\n if (oResponse is not None) and (oResponse.ok is True):\n oJsonResult = oResponse.json()\n try:\n sUser = str(oJsonResult['userId'])\n if sUser == m_sUser:\n m_bValidSession = True\n else:\n m_bValidSession = False\n except:\n m_bValidSession = False\n else:\n m_bValidSession = False\n else:\n if m_sPassword is None:\n print('[ERROR] waspy.init: must initialize password first, but None given' +\n ' ******************************************************************************')\n return False\n\n asHeaders = {'Content-Type': 'application/json'}\n sUrl = m_sBaseUrl + '/auth/login'\n sPayload = '{\"userId\":\"' + m_sUser + '\",\"userPassword\":\"' + m_sPassword + '\" }'\n oResponse = requests.post(sUrl, data=sPayload, headers=asHeaders)\n\n if oResponse is None:\n print('[ERROR] waspy.init: cannot authenticate' +\n ' ******************************************************************************')\n m_bValidSession = False\n elif oResponse.ok is not True:\n print('[ERROR] waspy.init: cannot authenticate, server replied: ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n m_bValidSession = False\n else:\n oJsonResult = oResponse.json()\n try:\n m_sSessionId = str(oJsonResult['sessionId'])\n _log('[INFO] waspy.init: returned session is: ' + str(m_sSessionId) + '\\n')\n if m_sSessionId is not None and m_sSessionId != '' and m_sSessionId != 'None':\n m_bValidSession = True\n else:\n m_bValidSession = False\n except:\n m_bValidSession = False\n\n if m_bValidSession is True:\n _log('[INFO] waspy.init: WASPY successfully initiated :-)')\n sW = getActiveWorkspaceId()\n if (sW is None) or (len(sW) < 1):\n if sWname is not None:\n openWorkspace(sWname)\n elif sWId is not None:\n openWorkspaceById(sWId)\n else:\n print('[ERROR] waspy.init: could not init WASPY :-(' +\n ' ******************************************************************************')\n\n printStatus()\n return m_bValidSession\n\n\ndef hello():\n \"\"\"\n Hello Wasdi to test the connection.\n :return: the hello message as Text\n \"\"\"\n global m_sBaseUrl\n\n sUrl = m_sBaseUrl + '/wasdi/hello'\n oResult = requests.get(sUrl)\n return oResult.text\n\ndef getWorkspaces():\n \"\"\"\n Get List of user workspaces\n :return: an array of WASDI Workspace JSON Objects.\n Each Object is like this\n {\n \"ownerUserId\":STRING,\n \"sharedUsers\":[STRING],\n \"workspaceId\":STRING,\n \"workspaceName\":STRING\n }\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = m_sBaseUrl + '/ws/byuser'\n\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n return oJsonResult\n else:\n return None\n\ndef createWorkspace(sName=None):\n \"\"\"\n Create a new workspaces and set it as ACTIVE Workspace\n :param sName: Name of the workspace to create. Null by default\n :return: Workspace Id as a String if it is a success, None otherwise\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = m_sBaseUrl + '/ws/create'\n\n if sName is not None:\n sUrl = sUrl + \"?name=\" + sName\n\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n\n openWorkspaceById(oJsonResult[\"stringValue\"])\n\n return oJsonResult[\"stringValue\"]\n else:\n return None\n\n\n\ndef deleteWorkspace(sWorkspaceId):\n \"\"\"\n Delete a workspace\n :param workspaceId: Id of the workspace to delete\n :return: Workspace Id as a String if it is a success, None otherwise\n \"\"\"\n asHeaders = _getStandardHeaders()\n \n if sWorkspaceId is None:\n print('[ERROR] waspy.deleteWorkspace: sWorkspaceId passed is None' +\n ' ******************************************************************************')\n return False\n\n bDeleteLayer = True\n bDeleteFile = True\n \n sActualWorkspaceId = getActiveWorkspaceId()\n \n openWorkspaceById(sWorkspaceId)\n \n try:\n sUrl = getWorkspaceBaseUrl() + '/ws/delete?sWorkspaceId='+sWorkspaceId+'&bDeleteLayer='+str(bDeleteLayer) + \"&bDeleteFile=\" + str(bDeleteFile)\n \n oResult = requests.delete(sUrl, headers=asHeaders)\n \n if (oResult is not None) and (oResult.ok is True):\n return True\n else:\n return False\n finally:\n openWorkspaceById(sActualWorkspaceId)\n\ndef getWorkspaceIdByName(sName):\n \"\"\"\n Get Id of a Workspace from the name\n :param sName: Workspace Name\n :return: the WorkspaceId as a String, '' if there is any error\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = m_sBaseUrl + '/ws/byuser'\n\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n\n for oWorkspace in oJsonResult:\n try:\n if oWorkspace['workspaceName'] == sName:\n return oWorkspace['workspaceId']\n except:\n return ''\n\n return ''\n\n\ndef getWorkspaceOwnerByName(sName):\n \"\"\"\n Get user Id of the owner of Workspace from the name\n :param sName: Name of the workspace\n :return: the userId as a String, '' if there is any error\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = m_sBaseUrl + '/ws/byuser'\n\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n\n for oWorkspace in oJsonResult:\n try:\n if oWorkspace['workspaceName'] == sName:\n return oWorkspace['ownerUserId']\n except:\n return ''\n\n return ''\n\n\ndef getWorkspaceOwnerByWsId(sWsId):\n \"\"\"\n Get user Id of the owner of Workspace from the Workspace Id\n :param sWsId: Workspace Id\n :return: the userId as a String, '' if there is any error\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = m_sBaseUrl + '/ws/byuser'\n\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n\n for oWorkspace in oJsonResult:\n try:\n if oWorkspace['workspaceId'] == sWsId:\n return oWorkspace['ownerUserId']\n except:\n return ''\n\n return ''\n\n\ndef getWorkspaceUrlByWsId(sWsId):\n \"\"\"\n Get Base Url of a Workspace from the Workspace Id\n :param sWsId: Workspace Id\n :return: the Workspace Base Url as a String, '' if there is any error\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = m_sBaseUrl + '/ws?sWorkspaceId=' + sWsId\n\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n try:\n return oJsonResult['apiUrl']\n except:\n return ''\n\n return ''\n\n\ndef openWorkspaceById(sWorkspaceId):\n \"\"\"\n Open a workspace by Id\n :param sWorkspaceId: Workspace Id\n :return: the WorkspaceId as a String, '' if there is any error\n \"\"\"\n global m_sActiveWorkspace\n global m_sWorkspaceOwner\n global m_sWorkspaceBaseUrl\n\n m_sActiveWorkspace = sWorkspaceId\n m_sWorkspaceOwner = getWorkspaceOwnerByWsId(sWorkspaceId)\n m_sWorkspaceBaseUrl = getWorkspaceUrlByWsId(sWorkspaceId)\n\n if not m_sWorkspaceBaseUrl:\n m_sWorkspaceBaseUrl = getBaseUrl()\n\n return m_sActiveWorkspace\n\n\ndef openWorkspace(sWorkspaceName):\n \"\"\"\n Open a workspace\n :param sWorkspaceName: Workspace Name\n :return: the WorkspaceId as a String, '' if there is any error\n \"\"\"\n global m_sActiveWorkspace\n global m_sWorkspaceOwner\n global m_sWorkspaceBaseUrl\n\n m_sActiveWorkspace = getWorkspaceIdByName(sWorkspaceName)\n m_sWorkspaceOwner = getWorkspaceOwnerByName(sWorkspaceName)\n m_sWorkspaceBaseUrl = getWorkspaceUrlByWsId(m_sActiveWorkspace)\n if not m_sWorkspaceBaseUrl:\n m_sWorkspaceBaseUrl = getBaseUrl()\n\n return m_sActiveWorkspace\n\ndef getProductsByWorkspace(sWorkspaceName):\n \"\"\"\n Get the list of products in a workspace by Name\n :param sWorkspaceName: Name of the workspace\n :return: the list is an array of string. Can be empty if there is any error\n \"\"\"\n\n sWorkspaceId = getWorkspaceIdByName(sWorkspaceName)\n return getProductsByWorkspaceId(sWorkspaceId)\n\n\ndef getProductsByWorkspaceId(sWorkspaceId):\n \"\"\"\n Get the list of products in a workspace by Id\n :param sWorkspaceId: Workspace Id\n :return: the list is an array of string. Can be empty if there is any error\n \"\"\"\n global m_sBaseUrl\n global m_sActiveWorkspace\n\n m_sActiveWorkspace = sWorkspaceId\n asHeaders = _getStandardHeaders()\n payload = {'sWorkspaceId': sWorkspaceId}\n\n sUrl = m_sBaseUrl + '/product/byws'\n\n asProducts = []\n\n oResult = requests.get(sUrl, headers=asHeaders, params=payload)\n\n if oResult.ok is True:\n oJsonResults = oResult.json()\n\n for oProduct in oJsonResults:\n try:\n asProducts.append(oProduct['fileName'])\n except:\n continue\n\n return asProducts\n\n\ndef getProductsByActiveWorkspace():\n \"\"\"\n Get the list of products in the active workspace\n :return: the list is an array of string. Can be empty if there is any error\n \"\"\"\n global m_sActiveWorkspace\n\n return getProductsByWorkspaceId(m_sActiveWorkspace)\n\n\ndef getPath(sFile):\n \"\"\"\n Get Local File Path. If the file exists and needed the file will be automatically downloaded.\n Returns the full local path where to read or write sFile\n :param sFile name of the file\n :return: Local path where to read or write sFile \n \"\"\"\n\n if fileExistsOnWasdi(sFile) is True:\n return getFullProductPath(sFile)\n else:\n return getSavePath() + sFile\n\n\ndef getFullProductPath(sProductName):\n \"\"\"\n Get the full local path of a product given the product name. If auto download is true and the code is running locally, WASDI will download the image and keep the file on the local PC\n Use the output of this API to get the full path to open a file\n :param sProductName: name of the product to get the path open (WITH the final extension)\n :return: local path of the Product File\n \"\"\"\n global m_sBasePath\n global m_sActiveWorkspace\n global m_sUser\n global m_bIsOnServer\n global m_bDownloadActive\n global m_sWorkspaceOwner\n\n if m_bIsOnServer is True:\n sFullPath = '/data/wasdi/'\n else:\n sFullPath = m_sBasePath\n\n # Normalize the path and extract the name\n sProductName = os.path.basename(os.path.normpath(sProductName))\n sFullPath = os.path.join(sFullPath, m_sWorkspaceOwner, m_sActiveWorkspace, sProductName)\n\n # If we are on the local PC\n if m_bIsOnServer is False:\n # If the download is active\n if m_bDownloadActive is True:\n # If there is no local file\n if os.path.isfile(sFullPath) is False:\n # If the file exists on server\n if fileExistsOnWasdi(sProductName) is True:\n # Download The File from WASDI\n print('[INFO] waspy.getFullProductPath: LOCAL WASDI FILE MISSING: START DOWNLOAD... PLEASE WAIT')\n _downloadFile(sProductName)\n print('[INFO] waspy.getFullProductPath: DONWLOAD COMPLETED')\n else:\n try:\n # We are in the server and there is no local file\n if os.path.isfile(sFullPath) is False:\n # If the file exists on server\n if fileExistsOnWasdi(sProductName) is True:\n # Download The File from WASDI\n wasdiLog('[WARNING] waspy.getFullProductPath: WASDI FILE ON ANOTHER NODE: START DOWNLOAD... PLEASE WAIT')\n _downloadFile(sProductName)\n wasdiLog('[WARNING] waspy.getFullProductPath: DONWLOAD COMPLETED')\n except:\n wasdiLog('[ERROR] waspy.getFullProductPath: error downloading the file from the workspace node')\n \n\n return sFullPath\n\n\ndef getSavePath():\n \"\"\"\n Get the local base save path for a product. To save use this path + fileName. Path already include '/' as last char\n :return: local path to use to save files (with '/' as last char)\n \"\"\"\n global m_sBasePath\n global m_sActiveWorkspace\n global m_sUser\n\n if m_bIsOnServer is True:\n sFullPath = '/data/wasdi/'\n else:\n sFullPath = m_sBasePath\n\n # empty string at the ends adds a final separator\n sFullPath = os.path.join(sFullPath, m_sWorkspaceOwner, m_sActiveWorkspace, \"\")\n\n return sFullPath\n\n\ndef getProcessStatus(sProcessId):\n \"\"\"\n get the status of a Process\n :param sProcessId: Id of the process to query\n :return: the status or 'ERROR' if there was any error\n\n STATUS are CREATED, RUNNING, STOPPED, DONE, ERROR, WAITING, READY\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n \n if sProcessId is None:\n wasdiLog('[ERROR] waspy.getProcessStatus: Passed None, expected a process ID' +\n ' ******************************************************************************')\n return \"ERROR\"\n\n if sProcessId == '':\n wasdiLog('[ERROR] waspy.getProcessStatus: Passed empty, expected a process ID' +\n ' ******************************************************************************')\n return \"ERROR\" \n\n asHeaders = _getStandardHeaders()\n payload = {'processObjId': sProcessId}\n\n sUrl = getWorkspaceBaseUrl() + '/process/getstatusbyid'\n\n oResult = requests.get(sUrl, headers=asHeaders, params=payload)\n\n sStatus = ''\n\n if (oResult is not None) and oResult.ok:\n try:\n sStatus = oResult.text\n except Exception as oE:\n wasdiLog('[ERROR] waspy.getProcessStatus: ' + str(oE))\n sStatus = 'ERROR'\n\n return sStatus\n\n\ndef updateProcessStatus(sProcessId, sStatus, iPerc=-1):\n \"\"\"\n Update the status of a process\n :param sProcessId: Id of the process to update. \n :param sStatus: Status of the process. Can be CREATED, RUNNING, STOPPED, DONE, ERROR, WAITING, READY\n :param iPerc: percentage of complete of the processor. Use -1 to ignore Percentage. Use a value between 0 and 100 to set it. \n :return: the updated status as a String or '' if there was any problem\n \"\"\"\n\n if sProcessId is None:\n wasdiLog('[ERROR] waspy.updateProcessStatus: cannot update status, process ID is None' +\n ' ******************************************************************************')\n return ''\n elif sProcessId == '':\n return ''\n\n if sStatus is None:\n wasdiLog('[ERROR] waspy.updateProcessStatus: cannot update status, status is None' +\n ' ******************************************************************************')\n return ''\n elif sStatus not in {'CREATED', 'RUNNING', 'STOPPED', 'DONE', 'ERROR', 'WAITING', 'READY'}:\n wasdiLog(\n '[ERROR] waspy.updateProcessStatus: sStatus must be a string in: ' +\n '{CREATED, RUNNING, STOPPED, DONE, ERROR, WAITING, READY' +\n ' ******************************************************************************')\n return ''\n\n if iPerc is None:\n wasdiLog('[ERROR] waspy.updateProcessStatus: percentage is None' +\n ' ******************************************************************************')\n return ''\n\n if iPerc < 0:\n if iPerc != -1:\n wasdiLog('[ERROR] waspy.updateProcessStatus: iPerc < 0 not valid' +\n ' ******************************************************************************')\n return ''\n else:\n _log('[INFO] waspy.updateProcessStatus: iPerc = -1 - Not considered')\n elif iPerc > 100:\n wasdiLog('[ERROR] waspy.updateProcessStatus: iPerc > 100 not valid' +\n ' ******************************************************************************')\n return ''\n\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n payload = {'sProcessId': sProcessId, 'status': sStatus, 'perc': iPerc}\n\n sUrl = getWorkspaceBaseUrl() + '/process/updatebyid'\n\n oResult = requests.get(sUrl, headers=asHeaders, params=payload)\n\n sStatus = ''\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n try:\n sStatus = oJsonResult['status']\n except:\n sStatus = ''\n\n return sStatus\n\n\ndef updateStatus(sStatus, iPerc=-1):\n \"\"\"\n Update the status of the running process\n :param sStatus: new status. Can be CREATED, RUNNING, STOPPED, DONE, ERROR, WAITING, READY\n :param iPerc: new Percentage.-1 By default, means no change percentage. Use a value between 0 and 100 to set it.\n :return: the updated status as a String or '' if there was any problem\n \"\"\"\n try:\n\n if m_bIsOnServer is False:\n _log(\"[INFO] Running Locally, will not update status on server\")\n return sStatus\n\n return updateProcessStatus(getProcId(), sStatus, iPerc)\n except Exception as oEx:\n wasdiLog(\"[ERROR] waspy.updateStatus: exception \" + str(oEx))\n return ''\n\ndef waitProcess(sProcessId):\n \"\"\"\n Wait for a process to End\n :param sProcessId: Id of the process to wait\n :return: output status of the process\n \"\"\"\n if sProcessId is None:\n _log('[ERROR] waspy.waitProcess: Passed None, expected a process ID' +\n ' ******************************************************************************')\n return \"ERROR\"\n\n if sProcessId == '':\n _log('[ERROR] waspy.waitProcess: Passed empty, expected a process ID' +\n ' ******************************************************************************')\n return \"ERROR\"\n\n # Put this processor in WAITING\n updateStatus(\"WAITING\")\n\n try:\n sStatus = ''\n\n while sStatus not in {\"DONE\", \"STOPPED\", \"ERROR\"}:\n sStatus = getProcessStatus(sProcessId)\n time.sleep(5)\n except:\n _log(\"[ERROR] Exception in the waitProcess\")\n\n # Wait to be resumed\n _waitForResume()\n\n return sStatus\n\ndef waitProcesses(asProcIdList):\n \"\"\"\n Wait for a list of processes to wait.\n The list of processes is an array of strings, each with a proc id to wait\n \n :param asProcIdList: list of strings, procId, to wait\n \n :return list of strings with the same number of elements in input, with the exit status of the processes\n \"\"\"\n \n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = getWorkspaceBaseUrl() + '/process/statusbyid'\n \n asReturnStatus = []\n \n # Check the input\n if asProcIdList is None:\n wasdiLog(\"[WARNING] waitProcesses asProcIdList is none, return empty list\")\n return asReturnStatus;\n\n if not isinstance(asProcIdList, list):\n wasdiLog(\"[WARNING] waitProcesses asProcIdList is not a list, return empty list\")\n return asReturnStatus;\n\n iTotalTime = 0\n\n # Put this process in WAITING\n updateStatus(\"WAITING\")\n \n bAllDone = False\n \n while not bAllDone:\n \n oResult = requests.post(sUrl, data=json.dumps(asProcIdList), headers=asHeaders)\n \n if (oResult is not None) and (oResult.ok is True):\n asResultStatus = oResult.json()\n asReturnStatus = asResultStatus\n \n bAllDone = True\n \n for sProcStatus in asResultStatus:\n if not (sProcStatus == \"DONE\" or sProcStatus == \"ERROR\" or sProcStatus == \"STOPPED\"):\n bAllDone = False\n break \n \n if not bAllDone:\n # Sleep a little bit\n sleep(5)\n # Trace the time needed\n iTotalTime = iTotalTime + 2\n \n # Wait to be resumed\n _waitForResume()\n\n # Return the list of status\n return asReturnStatus\n\n\ndef updateProgressPerc(iPerc):\n \"\"\"\n Update the actual progress Percentage of the processor\n :param iPerc: new Percentage. Use a value between 0 and 100 to set it.\n :return: updated status of the process or '' if there was any error\n \"\"\"\n try:\n _log('[INFO] waspy.updateProgressPerc( ' + str(iPerc) + ' )')\n if iPerc is None:\n wasdiLog('[ERROR] waspy.updateProgressPerc: Passed None, expected a percentage' +\n ' ******************************************************************************')\n return ''\n \n if 0 > iPerc or 100 < iPerc:\n wasdiLog('[WARNING] waspy.updateProgressPerc: passed' + str(iPerc) + ', automatically resetting in [0, 100]')\n if iPerc < 0:\n iPerc = 0\n if iPerc > 100:\n iPerc = 100\n\n if m_bIsOnServer is False:\n _log(\"[INFO] Running locally, will not updateProgressPerc on server\")\n return \"RUNNING\"\n else: \n if (getProcId() is None) or (len(getProcId()) < 1):\n wasdiLog('[ERROR] waspy.updateProgressPerc: Cannot update progress: process ID is not known' +\n ' ******************************************************************************')\n return ''\n \n sStatus = \"RUNNING\"\n sUrl = getWorkspaceBaseUrl() + \"/process/updatebyid?sProcessId=\" + getProcId() + \"&status=\" + sStatus + \"&perc=\" + str(iPerc) + \"&sendrabbit=1\"\n asHeaders = _getStandardHeaders()\n oResponse = requests.get(sUrl, headers=asHeaders)\n sResult = \"\"\n if (oResponse is not None) and (oResponse.ok is True):\n oJson = oResponse.json()\n if (oJson is not None) and (\"status\" in oJson):\n sResult = str(oJson['status'])\n else:\n wasdiLog('[ERROR] waspy.updateProgressPerc: could not update progress' +\n ' ******************************************************************************')\n return sResult\n except Exception as oEx:\n wasdiLog(\"[ERROR] waspy.updateProgressPerc: exception \" + str(oEx))\n return ''\n\n\ndef setProcessPayload(sProcessId, data):\n \"\"\"\n Saves the Payload of a process\n :param sProcessId: Id of the process\n :param data: data to write in the payload. Suggestion to use a JSON\n :return: the updated status as a String or '' if there was any problem\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n try:\n asHeaders = _getStandardHeaders()\n payload = {'sProcessId': sProcessId, 'payload': json.dumps(data)}\n\n sUrl = getWorkspaceBaseUrl() + '/process/setpayload'\n\n oResult = requests.get(sUrl, headers=asHeaders, params=payload)\n\n sStatus = ''\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n try:\n sStatus = oJsonResult['status']\n except:\n sStatus = ''\n\n return sStatus\n except Exception as oEx:\n wasdiLog(\"[ERROR] waspy.setProcessPayload: exception \" + str(oEx))\n return ''\n\ndef setPayload(data):\n \"\"\"\n Set the payload of the actual running process.\n The payload is saved only when run on Server. In local mode is just a print.\n :param data: data to save in the payload. Suggestion is to use JSON\n return None\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n global m_sMyProcId\n global m_bIsOnServer\n\n if m_bIsOnServer is True:\n setProcessPayload(m_sMyProcId, data)\n else:\n _log('wasdi.setPayload( ' + str(data))\n\n\ndef getProcessorPayload(sProcessObjId, bAsJson=False):\n \"\"\"\n Retrieves the payload\n :param sProcessObjId: a valid processor obj id\n :param bAsJson: flag to indicate whether the payload is a json object: if True, then a dictionary is returned\n :return: the processor payload if present, None otherwise\n \"\"\"\n try:\n if sProcessObjId is None:\n wasdiLog('[WARNING] waspy.getProcessorPayload: process obj id is None, aborting')\n return None\n sUrl = getWorkspaceBaseUrl() + '/process/payload'\n asParams = {'processObjId': sProcessObjId}\n asHeaders = _getStandardHeaders()\n oResponse = requests.get(url=sUrl, headers=asHeaders, params=asParams)\n if oResponse is None:\n wasdiLog('[ERROR] waspy.getProcessorPayload: response is None, failing')\n return None\n if oResponse.ok:\n if bAsJson:\n return oResponse.json()\n else:\n return oResponse.text\n else:\n wasdiLog('[ERROR] waspy.getProcessorPayload: response status not ok: ' + str(oResponse.status_code) + ': ' + str(oResponse.text))\n except Exception as oE:\n wasdiLog('[ERROR] waspy.getProcessorPayload: ' + str(oE))\n return None\n\n\ndef getProcessorPayloadAsJson(sProcessorPayload):\n \"\"\"\n Retrieves the payload in json format using getProcessorPayload\n :param sProcessObjId: a valid processor obj id\n :return: the processor payload if present as a dictionary, None otherwise\n \"\"\"\n return getProcessorPayload(sProcessorPayload, True)\n\n\ndef setSubPid(sProcessId, iSubPid):\n \"\"\"\n Saves the Payload of a process\n :param sProcessId: Id of the process\n :param data: data to write in the payload. Suggestion to use a JSON\n :return: the updated status as a String or '' if there was any problem\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n try:\n asHeaders = _getStandardHeaders()\n payload = {'sProcessId': sProcessId, 'subpid': iSubPid}\n\n sUrl = getWorkspaceBaseUrl() + '/process/setsubpid'\n\n oResult = requests.get(sUrl, headers=asHeaders, params=payload)\n\n sStatus = ''\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n try:\n sStatus = oJsonResult['status']\n except:\n sStatus = ''\n\n return sStatus\n except Exception as oEx:\n wasdiLog(\"[ERROR] waspy.setSubPid: exception \" + str(oEx))\n return ''\n\n\ndef saveFile(sFileName):\n \"\"\"\n Ingest a new file in the Active WASDI Workspace.\n The method takes a file saved in the workspace root (see getSaveFilePath) not already added to the WS\n To work be sure that the file is on the server\n :param Name of the file to add to the workpsace\n :return: Status of the operation\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n global m_sActiveWorkspace\n\n asHeaders = _getStandardHeaders()\n payload = {'file': sFileName, 'workspace': m_sActiveWorkspace}\n\n # sUrl = m_sBaseUrl + '/catalog/upload/ingestinws'\n sUrl = getWorkspaceBaseUrl() + '/catalog/upload/ingestinws'\n\n oResult = requests.get(sUrl, headers=asHeaders, params=payload)\n\n sProcessId = ''\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResult = oResult.json()\n try:\n if oJsonResult['boolValue'] is True:\n sProcessId = oJsonResult['stringValue']\n except:\n sProcessId = ''\n\n return sProcessId\n\n\ndef _downloadFile(sFileName):\n \"\"\"\n Download a file from WASDI\n :param sFileName: file to download\n :return: None\n \"\"\"\n\n _log('[INFO] waspy.downloadFile( ' + sFileName + ' )')\n\n global m_sBaseUrl\n global m_sSessionId\n global m_sActiveWorkspace\n\n asHeaders = _getStandardHeaders()\n payload = {'filename': sFileName}\n\n sUrl = getWorkspaceBaseUrl()\n sUrl += '/catalog/downloadbyname?'\n sUrl += 'filename='\n sUrl += sFileName\n sUrl += \"&workspace=\"\n sUrl += getActiveWorkspaceId()\n\n _log('[INFO] waspy.downloadfile: send request to configured url ' + sUrl)\n\n oResponse = requests.get(sUrl, headers=asHeaders, params=payload, stream=True)\n\n if (oResponse is not None) and (oResponse.status_code == 200):\n _log('[INFO] waspy.downloadFile: got ok result, downloading')\n sAttachmentName = None\n asResponseHeaders = oResponse.headers\n if asResponseHeaders is not None:\n if 'Content-Disposition' in asResponseHeaders:\n sContentDisposition = asResponseHeaders['Content-Disposition']\n sAttachmentName = sContentDisposition.split('filename=')[1]\n bLoop = True\n while bLoop is True:\n if sAttachmentName[0] == '.':\n sAttachmentName = sAttachmentName[1:]\n bLoop = True\n else:\n bLoop = False\n if (sAttachmentName[0] == '/') or (sAttachmentName[0] == '\\\\'):\n sAttachmentName = sAttachmentName[1:]\n bLoop = True\n else:\n bLoop = False\n if (sAttachmentName[-1] == '/') or (sAttachmentName[-1] == '\\\\'):\n sAttachmentName = sAttachmentName[:-1]\n bLoop = True\n else:\n bLoop = False\n if (sAttachmentName[0] == '\\\"') or (sAttachmentName[0] == '\\''):\n sAttachmentName = sAttachmentName[1:]\n bLoop = True\n else:\n bLoop = False\n if (sAttachmentName[-1] == '\\\"') or (sAttachmentName[-1] == '\\''):\n sAttachmentName = sAttachmentName[:-1]\n bLoop = True\n else:\n bLoop = False\n sSavePath = getSavePath()\n sSavePath = os.path.join(sSavePath, sAttachmentName)\n\n if os.path.exists(os.path.dirname(sSavePath)) == False:\n try:\n os.makedirs(os.path.dirname(sSavePath))\n except: # Guard against race condition\n print('[ERROR] waspy.downloadFile: cannot create File Path, aborting' +\n ' ******************************************************************************')\n return\n\n _log('[INFO] waspy.downloadFile: downloading local file ' + sSavePath)\n\n with open(sSavePath, 'wb') as oFile:\n for oChunk in oResponse:\n # _log('.')\n oFile.write(oChunk)\n _log('[INFO] waspy.downloadFile: download done, new file locally available ' + sSavePath)\n\n if (sAttachmentName is not None) and \\\n (sAttachmentName != sFileName) and \\\n sAttachmentName.lower().endswith('.zip'):\n sPath = getSavePath()\n _unzip(sAttachmentName, sPath)\n\n else:\n print('[ERROR] waspy.downloadFile: download error, server code: ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n\n return\n\n\ndef wasdiLog(sLogRow):\n \"\"\"\n Write one row of Log\n :param sLogRow: text to log\n :return: None\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n global m_sActiveWorkspace\n\n sForceLogRow = str(sLogRow)\n\n if m_bIsOnServer:\n asHeaders = _getStandardHeaders()\n sUrl = getWorkspaceBaseUrl() + '/processors/logs/add?processworkspace=' + m_sMyProcId\n oResult = requests.post(sUrl, data=sForceLogRow, headers=asHeaders)\n if oResult is None:\n print('[WARNING] waspy.wasdiLog: could not log')\n elif oResult.ok is not True:\n print('[WARNING] waspy.wasdiLog: could not log, server returned: ' + str(oResult.status_code))\n else:\n _log(sForceLogRow)\n\n\ndef deleteProduct(sProduct):\n \"\"\"\n Delete a Product from a Workspace\n :param sProduct: Name of the product to delete (WITH EXTENSION)\n :return: True if the file has been deleted, False if there was any error\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n global m_sActiveWorkspace\n\n if sProduct is None:\n print('[ERROR] waspy.deleteProduct: product passed is None' +\n ' ******************************************************************************')\n return False\n\n asHeaders = _getStandardHeaders()\n sUrl = getWorkspaceBaseUrl()\n sUrl += \"/product/delete?sProductName=\"\n sUrl += sProduct\n sUrl += \"&bDeleteFile=true&sWorkspaceId=\"\n sUrl += m_sActiveWorkspace\n sUrl += \"&bDeleteLayer=true\"\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if oResult is None:\n wasdiLog('[ERROR] waspy.deleteProduct: deletion failed' +\n ' ******************************************************************************')\n return False\n elif oResult.ok is not True:\n wasdiLog('[ERROR] waspy.deleteProduct: deletion failed, server returned: ' + str(oResult.status_code) +\n ' ******************************************************************************')\n else:\n return oResult.ok\n\n\ndef searchEOImages(sPlatform, sDateFrom, sDateTo,\n fULLat=None, fULLon=None, fLRLat=None, fLRLon=None,\n sProductType=None, iOrbitNumber=None,\n sSensorOperationalMode=None, sCloudCoverage=None,\n sProvider=None):\n \"\"\"\n Search EO images\n\n :param sPlatform: satellite platform (S1 or S2)\n :param sDateFrom: inital date YYYY-MM-DD\n :param sDateTo: final date YYYY-MM-DD\n :param fULLat: Latitude of Upper-Left corner\n :param fULLon: Longitude of Upper-Left corner\n :param fLRLat: Latitude of Lower-Right corner\n :param fLRLon: Longitude of Lower-Right corner\n :param sProductType: type of EO product; If Platform = \"S1\" -> Accepts \"SLC\",\"GRD\", \"OCN\". If Platform = \"S2\" -> Accepts \"S2MSI1C\",\"S2MSI2Ap\",\"S2MSI2A\". Can be null.\n :param iOrbitNumber: orbit number\n :param sSensorOperationalMode: sensor operational mode\n :param sCloudCoverage: interval of allowed cloud coverage, e.g. \"[0 TO 22.5]\"\n :param sProvider: WASDI Data Provider to query. Null means default node provider\n :return: a list of results represented as a Dictionary with many properties. The dictionary has the \"fileName\" and \"relativeOrbit\" properties among the others \n \"\"\"\n aoReturnList = []\n\n if sPlatform is None:\n print('[ERROR] waspy.searchEOImages: platform cannot be None' +\n ' ******************************************************************************')\n return aoReturnList\n\n # todo support other platforms\n if (sPlatform != \"S1\") and (sPlatform != \"S2\"):\n wasdiLog('[ERROR] waspy.searchEOImages: platform must be S1 or S2. Received [' + sPlatform + ']' +\n ' ******************************************************************************')\n return aoReturnList\n\n if sPlatform == \"S1\":\n if sProductType is not None:\n if not (sProductType == \"SLC\" or sProductType == \"GRD\" or sProductType == \"OCN\"):\n wasdiLog(\"[ERROR] waspy.searchEOImages: Available Product Types for S1; SLC, GRD, OCN. Received [\" +\n sProductType +\n ' ******************************************************************************')\n return aoReturnList\n\n if sPlatform == \"S2\":\n if sProductType is not None:\n if not (sProductType == \"S2MSI1C\" or sProductType == \"S2MSI2Ap\" or sProductType == \"S2MSI2A\"):\n wasdiLog(\n \"[ERROR] waspy.searchEOImages: Available Product Types for S2; S2MSI1C, S2MSI2Ap, S2MSI2A. Received [\"\n + sProductType + \"]\" +\n ' ******************************************************************************')\n return aoReturnList\n\n if sDateFrom is None:\n wasdiLog(\"[ERROR] waspy.searchEOImages: sDateFrom cannot be None\" +\n ' ******************************************************************************')\n return aoReturnList\n\n # if (len(sDateFrom) < 10) or (sDateFrom[4] != '-') or (sDateFrom[7] != '-'):\n if not bool(re.match(r\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", sDateFrom)):\n wasdiLog(\"[ERROR] waspy.searchEOImages: sDateFrom must be in format YYYY-MM-DD\" +\n ' ******************************************************************************')\n return aoReturnList\n\n if sDateTo is None:\n wasdiLog(\"[ERROR] waspy.searchEOImages: sDateTo cannot be None\" +\n ' ******************************************************************************')\n return aoReturnList\n\n # if len(sDateTo) < 10 or sDateTo[4] != '-' or sDateTo[7] != '-':\n if not bool(re.match(r\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", sDateTo)):\n wasdiLog(\"[ERROR] waspy.searchEOImages: sDateTo must be in format YYYY-MM-DD\" +\n ' ******************************************************************************')\n return aoReturnList\n\n if sCloudCoverage is not None:\n # Force to be a String\n sCloudCoverage = str(sCloudCoverage)\n sCloudCoverage = sCloudCoverage.upper()\n\n # create query string:\n\n # platform name\n sQuery = \"( platformname:\"\n if sPlatform == \"S2\":\n sQuery += \"Sentinel-2 \"\n elif sPlatform == \"S1\":\n sQuery += \"Sentinel-1\"\n\n # If available add product type\n if sProductType is not None:\n sQuery += \" AND producttype:\" + str(sProductType)\n\n # If available Sensor Operational Mode\n if (sSensorOperationalMode is not None) and (sPlatform == \"S1\"):\n sQuery += \" AND sensoroperationalmode:\" + str(sSensorOperationalMode)\n\n # If available cloud coverage\n if (sCloudCoverage is not None) and (sPlatform == \"S2\"):\n sQuery += \" AND cloudcoverpercentage:\" + str(sCloudCoverage)\n\n # If available add orbit number\n if iOrbitNumber is not None:\n if isinstance(iOrbitNumber, int):\n sQuery += \" AND relativeorbitnumber:\" + str(iOrbitNumber)\n else:\n print('[WARNING] waspy.searchEOImages: iOrbitNumber is' + str(iOrbitNumber), ', but it should be an integer')\n try:\n iTmp = int(iOrbitNumber)\n wasdiLog('[WARNING] waspy.searchEOImages: iOrbitNumber converted to: ' + str(iTmp))\n sQuery += str(iTmp)\n except:\n wasdiLog('[WARNING] waspy.searchEOImages: could not convert iOrbitNumber to an int, ignoring it' +\n ' ******************************************************************************')\n\n # Close the first block\n sQuery += \") \"\n\n # Date Block\n sQuery += \"AND ( beginPosition:[\" + str(sDateFrom) + \"T00:00:00.000Z TO \" + str(sDateTo) + \"T23:59:59.999Z]\"\n sQuery += \"AND ( endPosition:[\" + str(sDateFrom) + \"T00:00:00.000Z TO \" + str(sDateTo) + \"T23:59:59.999Z]\"\n\n # Close the second block\n sQuery += \") \"\n\n # footprint polygon\n if (fULLat is not None) and (fULLon is not None) and (fLRLat is not None) and (fLRLon is not None):\n sFootPrint = \"( footprint:\\\"intersects(POLYGON(( \" + str(fULLon) + \" \" + str(fLRLat) + \",\" + \\\n str(fULLon) + \" \" + str(fULLat) + \",\" + str(fLRLon) + \" \" + str(fULLat) + \",\" + str(fLRLon) + \\\n \" \" + str(fLRLat) + \",\" + str(fULLon) + \" \" + str(fLRLat) + \")))\\\") AND \"\n sQuery = sFootPrint + sQuery\n\n sQueryBody = \"[\\\"\" + sQuery.replace(\"\\\"\", \"\\\\\\\"\") + \"\\\"]\"\n\n if sProvider is None:\n sProvider = \"ONDA\"\n\n sQuery = \"sQuery=\" + sQuery + \"&offset=0&limit=10&providers=\" + sProvider\n\n try:\n sUrl = getBaseUrl() + \"/search/querylist?\" + sQuery\n _log(\"[INFO] searchEOImages: Start Provider Query\")\n asHeaders = _getStandardHeaders()\n oResponse = requests.post(sUrl, data=sQueryBody, headers=asHeaders)\n _log(\"[INFO] searchEOImages: Query Done, starting conversion\")\n try:\n # populate list from response\n oJsonResponse = oResponse.json()\n _log(\"[INFO] searchEOImages: Conversion done\")\n aoReturnList = oJsonResponse\n except Exception as oEx:\n wasdiLog('[ERROR] waspy.searchEOImages: exception while trying to convert response into JSON object' +\n ' ******************************************************************************')\n return aoReturnList\n\n # For each got result\n for oSearchResult in aoReturnList:\n \n oSearchResult[\"fileName\"] = \"\"\n oSearchResult[\"relativeOrbit\"] = -1\n \n # Initialize the fileName property\n if oSearchResult[\"title\"] is not None:\n # Se the file name\n oSearchResult[\"fileName\"] = oSearchResult[\"title\"] + \".zip\"\n \n # Initialized the relative orbit\n if oSearchResult[\"properties\"] is not None:\n if oSearchResult[\"properties\"][\"relativeorbitnumber\"] is not None:\n # Set the relative Orbit\n oSearchResult[\"relativeOrbit\"] = oSearchResult[\"properties\"][\"relativeorbitnumber\"]\n \n return aoReturnList\n except Exception as oEx:\n wasdiLog('[ERROR] waspy.searchEOImages: an error occured' +\n ' ******************************************************************************')\n wasdiLog(type(oEx))\n traceback.print_exc()\n wasdiLog(oEx)\n\n return aoReturnList\n\n\ndef getFoundProductName(aoProduct):\n \"\"\"\n Get The name of a product from a Dictionary returned by Search EO Images\n :param aoProduct: dictionary representing the product as returned by Search EO Images\n :return: product name or '' if there was any error\n \"\"\"\n if aoProduct is None:\n wasdiLog('[ERROR] waspy.getFoundProductName: product is None, aborting' +\n ' ******************************************************************************')\n return ''\n elif \"title\" not in aoProduct:\n wasdiLog('[ERROR] waspy.getFoundProductName: title not found in product, aborting' +\n ' ******************************************************************************')\n return ''\n else:\n return aoProduct['title']\n\n\ndef fileExistsOnWasdi(sFileName):\n \"\"\"\n checks if a file already exists on WASDI or not\n :param sFileName: file name with extension\n :return: True if the file exists, False otherwise\n \"\"\"\n\n if sFileName is None:\n wasdiLog('[ERROR] waspy.fileExistsOnWasdi: file name must not be None' +\n ' ******************************************************************************')\n return False\n if len(sFileName) < 1:\n wasdiLog('[ERROR] waspy.fileExistsOnWasdi: File name too short' +\n ' ******************************************************************************')\n return False\n\n sSessionId = getSessionId()\n sActiveWorkspace = getActiveWorkspaceId()\n\n sUrl = getWorkspaceBaseUrl()\n sUrl += \"/catalog/checkdownloadavaialibitybyname?token=\"\n sUrl += sSessionId\n sUrl += \"&filename=\"\n sUrl += sFileName\n sUrl += \"&workspace=\"\n sUrl += sActiveWorkspace\n\n asHeaders = _getStandardHeaders()\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if oResult is None:\n wasdiLog('[ERROR] waspy.fileExistsOnWasdi: failed contacting the server' +\n ' ******************************************************************************')\n return False\n elif not oResult.ok and not 500 == oResult.status_code:\n wasdiLog('[ERROR] waspy.fileExistsOnWasdi: unexpected failure, server returned: ' + str(oResult.status_code) +\n ' ******************************************************************************')\n return False\n else:\n return oResult.ok\n\ndef getProductBBOX(sFileName):\n \"\"\"\n Gets the bounding box of a file\n :param sFileName: name of the file to query for bounding box\n :return: Bounding Box if available as a String comma separated in form SOUTH,WEST,EST,NORTH\n \"\"\"\n\n sUrl = getBaseUrl()\n sUrl += \"/product/byname?sProductName=\"\n sUrl += sFileName\n sUrl += \"&workspace=\"\n sUrl += getActiveWorkspaceId()\n\n asHeaders = _getStandardHeaders()\n\n oResponse = requests.get(sUrl, headers=asHeaders)\n\n try:\n if oResponse is None:\n wasdiLog('[ERROR] waspy.getProductBBOX: cannot get bbox for product' +\n ' ******************************************************************************')\n elif oResponse.ok is not True:\n wasdiLog('[ERROR] waspy.getProductBBOX: cannot get bbox product, server returned: ' + str(\n oResponse.status_code) +\n ' ******************************************************************************')\n else:\n oJsonResponse = oResponse.json()\n if (\"bbox\" in oJsonResponse):\n return oJsonResponse[\"bbox\"]\n\n except:\n return \"\"\n\n return \"\"\n\n\ndef importProductByFileUrl(sFileUrl=None, sBoundingBox=None, sProvider=None):\n \"\"\"\n Imports a product from a Provider in WASDI, starting from the File URL.\n :param sFileUrl: url of the file to import\n :param sBoundingBox: declared bounding box of the file to import\n :param sProvider: WASDI Data Provider to use. Use None for Default\n :return: execution status as a STRING. Can be DONE, ERROR, STOPPED.\n \"\"\"\n\n _log('[INFO] waspy.importProductByFileUrl( ' + str(sFileUrl) + ', ' + str(sBoundingBox) + ' )')\n\n sReturn = \"ERROR\"\n\n if sFileUrl is None:\n wasdiLog('[ERROR] waspy.importProductByFileUrl: cannot find a link to download the requested product' +\n ' ******************************************************************************')\n return sReturn\n\n if sProvider is None:\n sProvider = \"ONDA\"\n\n sUrl = getBaseUrl()\n sUrl += \"/filebuffer/download?sFileUrl=\"\n sUrl += sFileUrl\n sUrl += \"&sProvider=\" + sProvider\n sUrl += \"&sWorkspaceId=\"\n sUrl += getActiveWorkspaceId()\n\n if sBoundingBox is not None:\n sUrl += \"&sBoundingBox=\"\n sUrl += sBoundingBox\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n asHeaders = _getStandardHeaders()\n\n oResponse = requests.get(sUrl, headers=asHeaders)\n if oResponse is None:\n wasdiLog('[ERROR] waspy.importProductByFileUrl: cannot import product' +\n ' ******************************************************************************')\n elif oResponse.ok is not True:\n wasdiLog('[ERROR] waspy.importProductByFileUrl: cannot import product, server returned: ' + str(\n oResponse.status_code) +\n ' ******************************************************************************')\n else:\n oJsonResponse = oResponse.json()\n if (\"boolValue\" in oJsonResponse) and (oJsonResponse[\"boolValue\"] is True):\n if \"stringValue\" in oJsonResponse:\n sProcessId = str(oJsonResponse[\"stringValue\"])\n sReturn = waitProcess(sProcessId)\n\n return sReturn\n\n\ndef asynchImportProductByFileUrl(sFileUrl=None, sBoundingBox=None, sProvider=None):\n \"\"\"\n Asynch Import of a product from a Provider in WASDI, starting from file URL\n :param sFileUrl: url of the file to import\n :param sBoundingBox: declared bounding box of the file to import\n :param sProvider: WASDI Data Provider. Use None for default\n :return: ProcessId of the Download Operation or \"ERROR\" if there is any problem\n \"\"\"\n\n _log('[INFO] waspy.importProductByFileUrl( ' + str(sFileUrl) + ', ' + str(sBoundingBox) + ' )')\n\n sReturn = \"ERROR\"\n\n if sFileUrl is None:\n wasdiLog('[ERROR] waspy.importProductByFileUrl: cannot find a link to download the requested product' +\n ' ******************************************************************************')\n return sReturn\n\n if sProvider is None:\n sProvider = \"ONDA\"\n\n sUrl = getBaseUrl()\n sUrl += \"/filebuffer/download?sFileUrl=\"\n sUrl += sFileUrl\n sUrl += \"&sProvider=\"\n sUrl += sProvider\n sUrl += \"&sWorkspaceId=\"\n sUrl += getActiveWorkspaceId()\n if sBoundingBox is not None:\n sUrl += \"&sBoundingBox=\"\n sUrl += sBoundingBox\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n asHeaders = _getStandardHeaders()\n\n oResponse = requests.get(sUrl, headers=asHeaders)\n if oResponse is None:\n wasdiLog('[ERROR] waspy.importProductByFileUrl: cannot import product' +\n ' ******************************************************************************')\n elif oResponse.ok is not True:\n wasdiLog('[ERROR] waspy.importProductByFileUrl: cannot import product, server returned: ' + str(\n oResponse.status_code) +\n ' ******************************************************************************')\n else:\n oJsonResponse = oResponse.json()\n if (\"boolValue\" in oJsonResponse) and (oJsonResponse[\"boolValue\"] is True):\n if \"stringValue\" in oJsonResponse:\n sReturn = str(oJsonResponse[\"stringValue\"])\n\n return sReturn\n\n\ndef importProduct(asProduct, sProvider=None):\n \"\"\"\n Imports a product from a Provider in WASDI starting from the object returned by searchEOImages\n :param asProduct: product dictionary as returned by searchEOImages\n :param sProvider: WASDI Data Provider. Use None for default\n :return: execution status as a STRING. Can be DONE, ERROR, STOPPED.\n \"\"\"\n\n if asProduct is None:\n wasdiLog(\"[ERROR] waspy.importProduct: input asPRoduct is none\")\n return \"ERROR\"\n\n _log('[INFO] waspy.importProduct( ' + str(asProduct) + ' )')\n\n try:\n sBoundingBox = None\n sFileUrl = asProduct[\"link\"]\n if \"footprint\" in asProduct:\n sBoundingBox = asProduct[\"footprint\"]\n\n return importProductByFileUrl(sFileUrl, sBoundingBox, sProvider)\n except Exception as e:\n wasdiLog(\"[ERROR] waspy.importProduct: exception \" + str(e))\n return \"ERROR\"\n\n\ndef asynchImportProduct(asProduct, sProvider=None):\n \"\"\"\n Asynch Import a product from a Provider in WASDI starting from the object returned by searchEOImages\n :param asProduct: product dictionary as returned by searchEOImages\n :param sProvider: WASDI Data Provider. Use None for default\n :return: ProcessId of the Download Operation or \"ERROR\" if there is any problem\n \"\"\"\n\n if asProduct is None:\n wasdiLog(\"[ERROR] waspy.importProduct: input asPRoduct is none\")\n return \"ERROR\"\n\n _log('[INFO] waspy.importProduct( ' + str(asProduct) + ' )')\n\n try:\n sBoundingBox = None\n sFileUrl = asProduct[\"link\"]\n if \"footprint\" in asProduct:\n sBoundingBox = asProduct[\"footprint\"]\n\n return asynchImportProductByFileUrl(sFileUrl, sBoundingBox, sProvider)\n except Exception as e:\n wasdiLog(\"[ERROR] waspy.importProduct: exception \" + str(e))\n return \"ERROR\"\n\n\ndef importProductList(aasProduct, sProvider=None):\n \"\"\"\n Imports a list of product from a Provider in WASDI starting from an array of objects returned by searchEOImages\n :param aasProduct: Array of product dictionary as returned by searchEOImages\n :param sProvider: WASDI Data Provider. Use None for default \n :return: execution status as an array of STRINGs, one for each product in input. Can be DONE, ERROR, STOPPED.\n \"\"\"\n\n if aasProduct is None:\n wasdiLog(\"[ERROR] waspy.importProductList: input asPRoduct is none\")\n return \"ERROR\"\n\n _log('[INFO] waspy.importProductList( ' + str(aasProduct) + ' )')\n\n asReturnList = []\n\n # For Each product in input\n for asProduct in aasProduct:\n try:\n # Get BBOX and Link from the dictionary\n sBoundingBox = None\n sFileUrl = asProduct[\"link\"]\n if \"footprint\" in asProduct:\n sBoundingBox = asProduct[\"footprint\"]\n\n # Start the download propagating the Asynch Flag\n sReturn = asynchImportProductByFileUrl(sFileUrl, sBoundingBox, sProvider)\n\n # Append the process id to the list\n asReturnList.append(sReturn)\n except Exception as e:\n # Error!!\n wasdiLog(\"[ERROR] waspy.importProductList: exception \" + str(e))\n asReturnList.append(\"ERROR\")\n\n return waitProcesses(asReturnList)\n\n\ndef asynchImportProductList(aasProduct, sProvider=None):\n \"\"\"\n Asynch Import a list of product from a Provider in WASDI starting from an array of objects returned by searchEOImages\n :param aasProduct: Array of product dictionary as returned by searchEOImages\n :param sProvider: WASDI Data Provider. Use None for default\n :return: array of the ProcessId of the Download Operations. An element can be \"ERROR\" if there was any problem\n \"\"\"\n\n if aasProduct is None:\n wasdiLog(\"[ERROR] waspy.importProductList: input asPRoduct is none\")\n return \"ERROR\"\n\n _log('[INFO] waspy.importProductList( ' + str(aasProduct) + ' )')\n\n asReturnList = []\n\n # For Each product in input\n for asProduct in aasProduct:\n try:\n # Get BBOX and Link from the dictionary\n sBoundingBox = None\n sFileUrl = asProduct[\"link\"]\n if \"footprint\" in asProduct:\n sBoundingBox = asProduct[\"footprint\"]\n\n # Start the download propagating the Asynch Flag\n sReturn = asynchImportProductByFileUrl(sFileUrl, sBoundingBox, sProvider)\n # Append the process id to the list\n asReturnList.append(sReturn)\n except Exception as e:\n # Error!!\n wasdiLog(\"[ERROR] waspy.importProductList: exception \" + str(e))\n asReturnList.append(\"ERROR\")\n\n # In the ASYNCH MODE return the list of process Id\n return asReturnList\n\ndef importAndPreprocess(aoImages, sWorkflow, sPreProcSuffix=\"_proc.tif\", sProvider=None):\n \"\"\"\n Imports in WASDI and apply a SNAP Workflow to an array of EO Images as returned by searchEOImages\n :param aoImages: array of images to import as returned by searchEOImages\n :param sWorkflow: name of the workflow to apply to each imported images\n :param sProvider: WASDI Data Provider. Use None for default\n :param sPreProcSuffix: suffix to use for the name of the output of the workflows\n :return: \n \"\"\"\n asOriginalFiles = []\n asPreProcessedFiles = []\n asRunningProcList = []\n \n asRunningDownloadList = []\n\n # For each image found\n for oImage in aoImages:\n\n # Get the file name\n sFile = oImage[\"title\"] + \".zip\"\n _log(\"[INFO] Importing Image \" + sFile)\n\n # Import in WASDI\n sImportProcId = asynchImportProduct(oImage, sProvider)\n \n if sImportProcId != \"ERROR\":\n asRunningDownloadList.append(sImportProcId)\n asOriginalFiles.append(sFile)\n \n #Flag to know if we are waiting for a donwload\n bWaitingDonwload = True;\n \n # While there are download to wait for\n while bWaitingDonwload:\n \n # Suppose they are done\n bWaitingDonwload = False\n \n # For each running process\n for iImports in range(len(asRunningDownloadList)):\n \n # Get the status\n sImportProcId = asRunningDownloadList[iImports]\n \n if sImportProcId == \"DONE\" or sImportProcId == \"ERROR\" or sImportProcId == \"WAITING\":\n continue\n \n sImportStatus = getProcessStatus(sImportProcId)\n \n if sImportStatus == \"DONE\":\n # Yes, start the workflow\n sFile = asOriginalFiles[iImports] \n # Generate the output name\n sOutputFile = sFile.replace(\".zip\", sPreProcSuffix) \n\n _log(\"[INFO] \" + sFile + \" imported, starting workflow to get \" + sOutputFile)\n \n # Is already there for any reason?\n if not fileExistsOnWasdi(sOutputFile):\n # No, start the workflow\n sProcId = asynchExecuteWorkflow(sFile, sOutputFile, sWorkflow)\n asRunningProcList.append(sProcId)\n asPreProcessedFiles.append(sOutputFile)\n \n asRunningDownloadList[iImports] = \"DONE\"\n elif sImportStatus == \"ERROR\" or sImportStatus == \"STOPPED\":\n asRunningDownloadList[iImports] = sImportStatus\n pass\n else:\n bWaitingDonwload = True\n \n if bWaitingDonwload:\n time.sleep(5) \n\n # Checkpoint: wait for all asynch workflows to finish\n _log(\"[INFO] All image imported, waiting for all workflows to finish\")\n waitProcesses(asRunningProcList)\n\ndef asynchExecuteProcessor(sProcessorName, aoParams={}):\n \"\"\"\n Execute a WASDI processor asynchronously\n :param sProcessorName: WASDI processor name\n :param aoParams: a dictionary of parameters for the processor\n :return: processor ID\n \"\"\"\n \n global m_sActiveWorkspace\n\n _log('[INFO] waspy.asynchExecuteProcessor( ' + str(sProcessorName) + ', ' + str(aoParams) + ' )')\n\n if sProcessorName is None:\n wasdiLog('[ERROR] waspy.asynchExecuteProcessor: processor name is None, aborting' +\n ' ******************************************************************************')\n return ''\n elif len(sProcessorName) <= 0:\n wasdiLog('[ERROR] waspy.asynchExecuteProcessor: processor name empty, aborting' +\n ' ******************************************************************************')\n return ''\n if isinstance(aoParams, dict) is not True:\n wasdiLog('[ERROR] waspy.asynchExecuteProcessor: parameters must be a dictionary but it is not, aborting' +\n ' ******************************************************************************')\n return ''\n\n sEncodedParams = json.dumps(aoParams)\n asHeaders = _getStandardHeaders()\n aoWasdiParams = {'workspace': m_sActiveWorkspace,\n 'name': sProcessorName,\n 'encodedJson': sEncodedParams}\n\n if m_bIsOnServer:\n aoWasdiParams['parent'] = getProcId()\n\n sUrl = getBaseUrl() + \"/processors/run\"\n\n oResponse = requests.get(sUrl, headers=asHeaders, params=aoWasdiParams)\n \n if oResponse is None:\n wasdiLog('[ERROR] waspy.asynchExecuteProcessor: something broke when contacting the server, aborting' +\n ' ******************************************************************************')\n return ''\n elif oResponse.ok is True:\n _log('[INFO] waspy.asynchExecuteProcessor: API call OK')\n aoJson = oResponse.json()\n if \"processingIdentifier\" in aoJson:\n sProcessID = aoJson['processingIdentifier']\n return sProcessID\n else:\n wasdiLog('[ERROR] waspy.asynchExecuteProcessor: cannot extract processing identifier from response, aborting' +\n ' ******************************************************************************')\n else:\n wasdiLog('[ERROR] waspy.asynchExecuteProcessor: server returned status ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n\n return ''\n\ndef executeProcessor(sProcessorName, aoProcessParams):\n \"\"\"\n Executes a WASDI Processor asynchronously. The method try up to three time if there is any problem.\n :param sProcessorName: WASDI processor name\n :param aoParams: a dictionary of parameters for the processor \n :return: the Process Id if every thing is ok, '' if there was any problem\n \"\"\"\n global m_sActiveWorkspace\n \n if sProcessorName is None:\n wasdiLog('[ERROR] waspy.executeProcessor: processor name is None, aborting' +\n ' ******************************************************************************')\n return ''\n elif len(sProcessorName) <= 0:\n wasdiLog('[ERROR] waspy.executeProcessor: processor name empty, aborting' +\n ' ******************************************************************************')\n return ''\n if isinstance(aoProcessParams, dict) is not True:\n wasdiLog('[ERROR] waspy.executeProcessor: parameters must be a dictionary but it is not, aborting' +\n ' ******************************************************************************')\n return '' \n \n # Prepare API headers and params\n sEncodedParams = json.dumps(aoProcessParams)\n \n asHeaders = _getStandardHeaders()\n \n sUrl = getBaseUrl() + '/processors/run?workspace=' + m_sActiveWorkspace + '&name='+sProcessorName\n \n if m_bIsOnServer:\n sUrl = sUrl + '&parent=' + getProcId()\n \n # Try up to three time\n iMaxRetry = 3\n \n for iAttempt in range(iMaxRetry):\n \n _log(\"[INFO] waspy.executeProcessor: execute Processor Attempt # \" + str(iAttempt+1))\n \n oResult = requests.post(sUrl, data=sEncodedParams, headers=asHeaders)\n \n if oResult is None:\n wasdiLog('[ERROR] waspy.executeProcessor: something broke when contacting the server')\n elif oResult.ok is True:\n _log('[INFO] waspy.executeProcessor: API call OK')\n aoJson = oResult.json()\n if \"processingIdentifier\" in aoJson:\n sProcessID = aoJson['processingIdentifier']\n return sProcessID\n else:\n wasdiLog('[ERROR] waspy.executeProcessor: cannot extract processing identifier from response, aborting')\n else:\n wasdiLog('[ERROR] waspy.executeProcessor: server returned status ' + str(oResult.status_code))\n \n wasdiLog(\"[ERROR]: Error triggering the new process.\")\n time.sleep(5)\n \n wasdiLog(\"[ERROR]: process not triggered, too many errors\")\n \n # If we exit from the cycle, we do not have any result for our client...\n return ''\n\n\n\ndef _uploadFile(sFileName):\n \"\"\"\n Uploads a file to WASDI\n :param sFileName: name of file inside working directory OR path to file RELATIVE to working directory\n :return: True if succeded, False otherwise\n \"\"\"\n\n _log('upload ' + sFileName)\n \n if getIsOnServer() is True:\n return True\n \n bResult = False\n try:\n if sFileName is None:\n wasdiLog('[ERROR] upload: the given file name is None, cannot upload')\n return False\n\n sFileProperName = os.path.basename(sFileName)\n\n sFullPath = getPath(sFileName)\n\n sUrl = getWorkspaceBaseUrl() + '/product/uploadfilebylib?workspace=' + getActiveWorkspaceId() + '&name=' + sFileProperName\n asHeaders = _getStandardHeaders()\n if 'Content-Type' in asHeaders:\n del (asHeaders['Content-Type'])\n\n oFiles = {'file': (sFileProperName, open(sFullPath, 'rb'))}\n\n _log('uploadFile: uploading file to wasdi...')\n\n oResponse = requests.post(sUrl, files=oFiles, headers=asHeaders)\n if oResponse.ok:\n _log('uploadFile: upload complete :-)')\n bResult = True\n else:\n wasdiLog('[ERROR] uploadFile: upload failed with code {oResponse.status_code}: {oResponse.text}')\n\n except Exception as oE:\n wasdiLog('[ERROR] uploadFile: ' +str(oE))\n # finally:\n # os.chdir(getScriptPath())\n return bResult\n\n\ndef addFileToWASDI(sFileName):\n \"\"\"\n Add a file to the wasdi workspace\n :param sFileName: Name (with extension) of the file to add\n :return: status of the operation\n \"\"\"\n return _internalAddFileToWASDI(sFileName, False)\n\n\ndef asynchAddFileToWASDI(sFileName):\n \"\"\"\n Triggers the ingestion of File Name in the workspace\n :param: sFileName: Name (with extension) of the file to add\n :return: Process Id of the ingestion\n \"\"\"\n return _internalAddFileToWASDI(sFileName, True)\n\n\ndef subset(sInputFile, sOutputFile, dLatN, dLonW, dLatS, dLonE):\n \"\"\"\n Creates a Subset of an image:\n :param sInputFile: Input file \n :param sOutputFile: Output File\n :param dLatN: Latitude north of the subset\n :param dLonW: Longitude west of the subset\n :param dLatS: Latitude South of the subset\n :param dLonE: Longitude Est of the subset\n \"\"\"\n _log('[INFO] waspy.subset( ' + str(sInputFile) + ', ' + str(sOutputFile) + ', ' +\n str(dLatN) + ', ' + str(dLonW) + ', ' + str(dLatS) + ', ' + str(dLonE) + ' )')\n\n if sInputFile is None:\n wasdiLog('[ERROR] waspy.subset: input file must not be None, aborting' +\n ' ******************************************************************************')\n return ''\n if len(sInputFile) < 1:\n wasdiLog('[ERROR] waspy.subset: input file name must not have zero length, aborting' +\n ' ******************************************************************************')\n return ''\n if sOutputFile is None:\n wasdiLog('[ERROR] waspy.subset: output file must not be None, aborting' +\n ' ******************************************************************************')\n return ''\n if len(sOutputFile) < 1:\n wasdiLog('[ERROR] waspy.subset: output file name len must not have zero length, aborting' +\n ' ******************************************************************************')\n return ''\n\n sUrl = m_sBaseUrl + \"/processing/geometric/subset?sSourceProductName=\" + sInputFile + \"&sDestinationProductName=\" + \\\n sOutputFile + \"&sWorkspaceId=\" + m_sActiveWorkspace\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n sSubsetSetting = \"{ \\\"latN\\\":\" + dLatN + \", \\\"lonW\\\":\" + dLonW + \", \\\"latS\\\":\" + dLatS + \", \\\"lonE\\\":\" + dLonE + \" }\"\n asHeaders = _getStandardHeaders()\n oResponse = requests.get(sUrl, data=sSubsetSetting, headers=asHeaders)\n if oResponse is None:\n wasdiLog('[ERROR] waspy.subset: cannot contact server' +\n ' ******************************************************************************')\n return ''\n if oResponse.ok is not True:\n wasdiLog('[ERROR] waspy.subset: failed, server returned ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n return ''\n else:\n oJson = oResponse.json()\n if oJson is not None:\n if 'stringValue' in oJson:\n sProcessId = oJson['stringValue']\n return waitProcess(sProcessId)\n\n return ''\n\n\ndef multiSubset(sInputFile, asOutputFiles, adLatN, adLonW, adLatS, adLonE, bBigTiff=False):\n \"\"\"\n Creates a Many Subsets from an image. MAX 10 TILES PER CALL\n :param sInputFile: Input file \n :param sOutputFile: Array of Output File Names\n :param dLatN: Array of Latitude north of the subset\n :param dLonW: Array of Longitude west of the subset\n :param dLatS: Array of Latitude South of the subset\n :param dLonE: Array of Longitude Est of the subset\n \"\"\"\n\n _log('[INFO] waspy.multiSubset( ' + str(sInputFile) + ', ' + str(asOutputFiles) + ', ' +\n str(adLatN) + ', ' + str(adLonW) + ', ' + str(adLatS) + ', ' + str(adLonE) + ' )')\n\n if sInputFile is None:\n wasdiLog('[ERROR] waspy.multiSubset: input file must not be None, aborting' +\n ' ******************************************************************************')\n return ''\n if len(sInputFile) < 1:\n wasdiLog('[ERROR] waspy.multiSubset: input file name must not have zero length, aborting' +\n ' ******************************************************************************')\n return ''\n if asOutputFiles is None:\n wasdiLog('[ERROR] waspy.multiSubset: output files must not be None, aborting' +\n ' ******************************************************************************')\n return ''\n if len(asOutputFiles) < 1:\n wasdiLog('[ERROR] waspy.multiSubset: output file names len must not have zero length, aborting' +\n ' ******************************************************************************')\n return ''\n\n if len(asOutputFiles) > 10:\n wasdiLog('[ERROR] waspy.multiSubset: max allowed 10 tiles per call' +\n ' ******************************************************************************')\n return ''\n\n sUrl = m_sBaseUrl + \"/processing/geometric/multisubset?sSourceProductName=\" + sInputFile + \"&sDestinationProductName=\" + \\\n sInputFile + \"&sWorkspaceId=\" + m_sActiveWorkspace\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n aoBody = {}\n\n aoBody[\"outputNames\"] = asOutputFiles;\n aoBody[\"latNList\"] = adLatN;\n aoBody[\"lonWList\"] = adLonW;\n aoBody[\"latSList\"] = adLatS;\n aoBody[\"lonEList\"] = adLonE;\n \n if bBigTiff:\n aoBody[\"bigTiff\"] = True \n \n sSubsetSetting = json.dumps(aoBody)\n asHeaders = _getStandardHeaders()\n\n oResponse = requests.post(sUrl, headers=asHeaders, data=sSubsetSetting)\n\n if oResponse is None:\n wasdiLog('[ERROR] waspy.multiSubset: cannot contact server' +\n ' ******************************************************************************')\n return ''\n\n if oResponse.ok is not True:\n wasdiLog('[ERROR] waspy.multiSubset: failed, server returned ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n return ''\n else:\n oJson = oResponse.json()\n if oJson is not None:\n if 'stringValue' in oJson:\n sProcessId = oJson['stringValue']\n return waitProcess(sProcessId)\n\n return ''\n\ndef getWorkflows():\n \"\"\"\n Get the list of workflows for the user\n :return: None if there is any error; an array of WASDI Workspace JSON Objects if everything is ok. The format is as follows:\n\n {\n \"description\":STRING,\n \"name\": STRING,\n \"workflowId\": STRING\n }\n\n \"\"\"\n global m_sBaseUrl\n global m_sSessionId\n\n asHeaders = _getStandardHeaders()\n\n sUrl = m_sBaseUrl + '/processing/getgraphsbyusr'\n\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if (oResult is not None) and (oResult.ok is True):\n oJsonResults = oResult.json()\n return oJsonResults\n else:\n return None\n\ndef executeWorkflow(asInputFileNames, asOutputFileNames, sWorkflowName):\n \"\"\"\n Execute a SNAP Workflow available in WASDI (you can use WASDI to upload your SNAP Graph XML and use from remote)\n :param asInputFileNames: array of the inputs of the workflow. Must correspond to the number of inputs of the workflow.\n :param asOutputFileNames: array of the outputs of the workflow. Must correspond to the number of inputs of the workflow.\n :param sWorkflowName: Name of the workflow to run\n :return: final status of the executed Workflow\n \"\"\"\n return _internalExecuteWorkflow(asInputFileNames, asOutputFileNames, sWorkflowName, False)\n\n\ndef asynchExecuteWorkflow(asInputFileNames, asOutputFileNames, sWorkflowName):\n \"\"\"\n Trigger the asynch execution of a SNAP Workflow available in WASDI (you can use WASDI to upload your SNAP Graph XML and use from remote)\n :param asInputFileNames: array of the inputs of the workflow. Must correspond to the number of inputs of the workflow.\n :param asOutputFileNames: array of the outputs of the workflow. Must correspond to the number of inputs of the workflow.\n :param sWorkflowName: Name of the workflow to run\n :return: Process Id of the started workflow\n \"\"\"\n return _internalExecuteWorkflow(asInputFileNames, asOutputFileNames, sWorkflowName, True)\n\n\ndef asynchMosaic(asInputFiles, sOutputFile, iNoDataValue=None, iIgnoreInputValue=None):\n \"\"\"\n Start a mosaic out of a set of images in asynch way\n\n :param asInputFiles: List of input files to mosaic\n :param sOutputFile: Name of the mosaic output file\n :param iNoDataValue: Value to use as noData. Use -1 to ignore\n :param iIgnoreInputValue: Value to ignore from the input files of the mosaic. Use -1 to ignore\n :return: Process ID is asynchronous execution, end status otherwise. An empty string is returned in case of failure\n \"\"\"\n\n return mosaic(asInputFiles, sOutputFile, iNoDataValue, iIgnoreInputValue, True)\n\n\ndef mosaic(asInputFiles, sOutputFile, iNoDataValue=None, iIgnoreInputValue=None, bAsynch=False):\n \"\"\"\n Creates a mosaic out of a set of images\n\n :param asInputFiles: List of input files to mosaic\n :param sOutputFile: Name of the mosaic output file\n :param iNoDataValue: Value to use as noData. Use -1 to ignore\n :param iIgnoreInputValue: Value to ignore from the input files of the mosaic. Use -1 to ignore\n :param bAsynch: True to return after the triggering, False to wait the process to finish\n :return: Process ID is asynchronous execution, end status otherwise. An empty string is returned in case of failure\n \"\"\"\n asBands = []\n fPixelSizeX = -1.0\n fPixelSizeY = -1.0\n sCrs = None\n fSouthBound = -1.0\n fNorthBound = -1.0\n fEastBound = -1.0\n fWestBound = -1.0\n sOverlappingMethod = \"MOSAIC_TYPE_OVERLAY\"\n bShowSourceProducts = False\n sElevationModelName = \"ASTER 1sec GDEM\"\n sResamplingName = \"Nearest\"\n bUpdateMode = False\n bNativeResolution = True\n sCombine = \"OR\"\n\n _log('[INFO] waspy.mosaic( ' +\n str(asInputFiles) + ', ' +\n str(sOutputFile) + ', ' +\n str(iNoDataValue) + ', ' +\n str(iIgnoreInputValue) + ', ' +\n str(bAsynch) + ' )'\n )\n\n if asInputFiles is None:\n wasdiLog('[ERROR] waspy.mosaic: list of input files is None, aborting')\n return ''\n elif len(asInputFiles) <= 0:\n wasdiLog('[ERROR] waspy.mosaic: list of input files is empty, aborting')\n return ''\n\n if sOutputFile is None:\n wasdiLog('[ERROR] waspy.mosaic: name of output file is None, aborting')\n return ''\n elif isinstance(sOutputFile, str) is False:\n wasdiLog('[ERROR] waspy.mosaic: output file name must be a string, but a ' + str(type(sOutputFile)) +\n ' was passed, aborting')\n return ''\n elif len(sOutputFile) <= 0:\n wasdiLog('[ERROR] waspy.mosaic: output file name is empty, aborting')\n return ''\n\n sUrl = getBaseUrl() + \"/processing/geometric/mosaic?sDestinationProductName=\" + sOutputFile + \"&sWorkspaceId=\" + \\\n getActiveWorkspaceId()\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n sOutputFormat = \"GeoTIFF\"\n if sOutputFile.endswith(\".dim\"):\n sOutputFormat = \"BEAM-DIMAP\"\n if (sOutputFile.endswith(\".vrt\")):\n sOutputFormat = \"VRT\"\n\n if sCrs is None:\n sCrs = _getDefaultCRS()\n\n # todo check input type is appropriate\n try:\n aoMosaicSettings = {\n 'crs': sCrs,\n 'southBound': fSouthBound,\n 'eastBound': fEastBound,\n 'westBound': fWestBound,\n 'northBound': fNorthBound,\n 'pixelSizeX': fPixelSizeX,\n 'pixelSizeY': fPixelSizeY,\n 'noDataValue': iNoDataValue,\n 'inputIgnoreValue': iIgnoreInputValue,\n 'overlappingMethod': sOverlappingMethod,\n 'showSourceProducts': bShowSourceProducts,\n 'elevationModelName': sElevationModelName,\n 'resamplingName': sResamplingName,\n 'updateMode': bUpdateMode,\n 'nativeResolution': bNativeResolution,\n 'combine': sCombine,\n 'outputFormat': sOutputFormat,\n 'sources': asInputFiles,\n 'variableNames': asBands,\n 'variableExpressions': []\n }\n except:\n wasdiLog('[ERROR] waspy.mosaic: cannot build DTO, please check your input. Aborting')\n return ''\n\n asHeaders = _getStandardHeaders()\n oResponse = requests.post(sUrl, data=json.dumps(aoMosaicSettings), headers=asHeaders)\n if oResponse is None:\n wasdiLog('[ERROR] waspy.mosaic: cannot contact server, aborting')\n return ''\n if oResponse.ok is True:\n asJson = oResponse.json()\n if 'stringValue' in asJson:\n sProcessId = str(asJson['stringValue'])\n if bAsynch is False:\n return waitProcess(sProcessId)\n else:\n return sProcessId\n else:\n wasdiLog('[ERROR] waspy.mosaic: server responded with status: ' + str(oResponse.status_code) + ', aborting')\n return ''\n\n return ''\n\n\n\ndef copyFileToSftp(sFileName, bAsynch=None):\n \"\"\"\n Copy a file from a workspace to the WASDI user's SFTP Folder\n \n :param sFileName: FIle name (with extension, without path) to copy in the SFTP folder\n :param bAsynch: True to return after the triggering, False to wait the process to finish\n :return: Process ID is asynchronous execution, end status otherwise. An empty string is returned in case of failure \n \"\"\"\n \n _log('[INFO] waspy.copyFileToSftp( ' + str(sFileName) + ', ' + str(bAsynch) + ' )')\n\n if sFileName is None:\n wasdiLog('[ERROR] waspy.copyFileToSftp: file name is None, aborting' +\n ' ******************************************************************************')\n return ''\n if not isinstance(sFileName, str):\n wasdiLog('[WARNING] waspy.copyFileToSftp: file name is not a string, trying conversion' +\n ' ******************************************************************************')\n try:\n sFileName = str(sFileName)\n except:\n wasdiLog('[ERROR] waspy.copyFileToSftp: cannot convert file name into string, aborting' +\n ' ******************************************************************************')\n return ''\n if len(sFileName) < 1:\n wasdiLog('[ERROR] waspy.copyFileToSftp: file name has zero length, aborting' +\n ' ******************************************************************************')\n return ''\n\n if bAsynch is None:\n wasdiLog('[WARNING] waspy.copyFileToSftp: asynch flag is None, assuming False')\n bAsynch = False\n if not isinstance(bAsynch, bool):\n wasdiLog('[WARNING] waspy.copyFileToSftp: asynch flag is not a boolean, trying casting')\n try:\n bAsynch = bool(bAsynch)\n except:\n wasdiLog('[ERROR] waspy.copyFileToSftp: could not convert asynch flag into bool, aborting' +\n ' ******************************************************************************')\n return ''\n\n sResult = ''\n try:\n if getUploadActive() is True:\n sFilePath = os.path.join(getSavePath(), sFileName)\n if fileExistsOnWasdi(sFilePath) is False:\n _log('[INFO] waspy.copyFileToSftp: remote file is missing, uploading')\n try:\n _uploadFile(sFileName)\n _log('[INFO] waspy.moveFileToSftp: file uploaded, keep on working!')\n except:\n wasdiLog('[ERROR] waspy.copyFileToSftp: could not proceed with upload' +\n ' ******************************************************************************')\n\n sUrl = getWorkspaceBaseUrl() + \"/catalog/copytosfpt?file=\" + sFileName + \"&workspace=\" + getActiveWorkspaceId()\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n asHeaders = _getStandardHeaders()\n oResponse = requests.get(url=sUrl, headers=asHeaders)\n if oResponse is None:\n wasdiLog('[ERROR] waspy.copyFileToSftp: cannot contact server' +\n ' ******************************************************************************')\n elif oResponse.ok is not True:\n wasdiLog('[ERROR] waspy.copyFileToSftp: failed, server replied ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n else:\n oJson = oResponse.json()\n if 'stringValue' in oJson:\n bOk = bool(oJson['boolValue'])\n if bOk:\n sProcessId = str(oJson['stringValue'])\n if bAsynch is True:\n sResult = sProcessId\n else:\n sResult = waitProcess(sProcessId)\n else:\n wasdiLog('[ERROR] waspy.copyFileToSftp: impossible to move file in the user WASDI sftp folder')\n except:\n wasdiLog('[ERROR] waspy.copyFileToSftp: something broke alongside' +\n ' ******************************************************************************')\n\n return sResult\n\n\ndef getProcessorPath():\n \"\"\"\n Get the local path of the processor (where myProcessor.py is located)\n \"\"\"\n \n try: \n # get the caller's stack frame and extract its file path\n oFrameInfo = inspect.stack()[1]\n sCallerFilePath = oFrameInfo[1]\n # drop the reference to the stack frame to avoid reference cycles\n del oFrameInfo \n \n # make the path absolute\n sCallerFilePath = os.path.dirname(os.path.abspath(sCallerFilePath))\n sCallerFilePath = sCallerFilePath + \"/\"\n \n return sCallerFilePath\n except:\n return \"\"\n\ndef _log(sLog):\n \"\"\"\n Internal Log function\n :param sLog: text row to log\n \"\"\"\n global m_bVerbose\n\n if m_bVerbose:\n print(sLog)\n\n\ndef _getStandardHeaders():\n \"\"\"\n Get the standard headers for a WASDI API Call, setting also the session token\n :return: dictionary of headers to add to the REST API\n \"\"\"\n global m_sSessionId\n asHeaders = {'Content-Type': 'application/json', 'x-session-token': m_sSessionId}\n return asHeaders\n\n\ndef _loadConfig(sConfigFilePath):\n \"\"\"\n Loads configuration from given file\n :param sConfigFilePath: a string containing a path to the configuration file\n \"\"\"\n if sConfigFilePath is None:\n print('[ERROR] waspy._loadConfigParams: config parameter file name is None, cannot load config' +\n ' ******************************************************************************')\n return\n if sConfigFilePath == '':\n print('[ERROR] waspy._loadConfigParams: config parameter file name is empty, cannot load config' +\n ' ******************************************************************************')\n return\n\n sConfigFilePath = _normPath(sConfigFilePath)\n\n global m_sUser\n global m_sPassword\n global m_sParametersFilePath\n global m_sSessionId\n global m_sBasePath\n\n global m_bDownloadActive\n global m_bUploadActive\n global m_bVerbose\n\n try:\n # assume it is a JSON file\n sTempWorkspaceName = None\n sTempWorkspaceID = None\n with open(sConfigFilePath) as oJsonFile:\n oJson = json.load(oJsonFile)\n if \"USER\" in oJson:\n m_sUser = oJson[\"USER\"]\n if \"PASSWORD\" in oJson:\n m_sPassword = oJson[\"PASSWORD\"]\n if \"WORKSPACE\" in oJson:\n sTempWorkspaceName = oJson[\"WORKSPACE\"]\n sTempWorkspaceID = None\n elif \"WORKSPACEID\" in oJson:\n sTempWorkspaceID = oJson[\"WORKSPACEID\"]\n sTempWorkspaceName = None\n if \"BASEPATH\" in oJson:\n m_sBasePath = oJson[\"BASEPATH\"]\n if \"PARAMETERSFILEPATH\" in oJson:\n m_sParametersFilePath = oJson[\"PARAMETERSFILEPATH\"]\n m_sParametersFilePath = _normPath(m_sParametersFilePath)\n if \"DOWNLOADACTIVE\" in oJson:\n m_bDownloadActive = bool(oJson[\"DOWNLOADACTIVE\"])\n if \"UPLOADACTIVE\" in oJson:\n m_bUploadActive = bool(oJson[\"UPLOADACTIVE\"])\n if \"VERBOSE\" in oJson:\n m_bVerbose = bool(oJson[\"VERBOSE\"])\n if 'BASEURL' in oJson:\n setBaseUrl(oJson['BASEURL'])\n\n return True, sTempWorkspaceName, sTempWorkspaceID\n\n except Exception as oEx:\n wasdiLog('[ERROR] waspy._loadConfigParams: something went wrong' + \n ' ******************************************************************************')\n wasdiLog(str(oEx))\n return\n\n\ndef _loadParams():\n \"\"\"\n Loads parameters from file, if specified in configuration file\n \"\"\"\n global m_sParametersFilePath\n global m_aoParamsDictionary\n\n bParamLoaded = False\n if (m_sParametersFilePath is not None) and (m_sParametersFilePath != ''):\n try:\n if not os.path.isfile(m_sParametersFilePath):\n wasdiLog('[WARNING] _loadParams: parameters file not found')\n with open(m_sParametersFilePath) as oJsonFile:\n m_aoParamsDictionary = json.load(oJsonFile)\n bParamLoaded = True\n except Exception as oE:\n wasdiLog('[WARNING] _loadParams: could not open file due to: ' + str(oE))\n\n if not bParamLoaded:\n wasdiLog('[INFO] _loadParams: wasdi could not load param file. That is fine, you can still load it later, don\\'t worry')\n\n\ndef _unzip(sAttachmentName, sPath):\n \"\"\"\n Unzips a file\n :param sAttachmentName: filename to unzip\n :param sPath: both the path where the file is and where it must be unzipped\n :return: None\n \"\"\"\n _log('[INFO] waspy._unzip( ' + sAttachmentName + ', ' + sPath + ' )')\n if sPath is None:\n print('[ERROR] waspy._unzip: path is None' +\n ' ******************************************************************************')\n return\n if sAttachmentName is None:\n print('[ERROR] waspy._unzip: attachment to unzip is None' +\n ' ******************************************************************************')\n return\n\n try:\n sZipFilePath = os.path.join(sPath, sAttachmentName)\n zip_ref = zipfile.ZipFile(sZipFilePath, 'r')\n zip_ref.extractall(sPath)\n zip_ref.close()\n except:\n print('[ERROR] waspy._unzip: failed unzipping' +\n ' ******************************************************************************')\n\n return\n\n\ndef _waitForResume():\n if m_bIsOnServer:\n # Put this processor as READY\n updateStatus(\"READY\")\n\n try:\n # Wait for the WASDI Scheduler to resume us\n _log(\"[INFO] Waiting for the scheduler to resume this process\")\n sStatus = ''\n\n while sStatus not in {\"RUNNING\", \"DONE\", \"STOPPED\", \"ERROR\"}:\n sStatus = getProcessStatus(getProcId())\n time.sleep(5)\n\n _log(\"[INFO] Process Resumed, let's go!\")\n except:\n _log(\"Exception in the _waitForResume\")\n\n\ndef _normPath(sPath):\n \"\"\"\n Normalizes path by adjusting separator\n :param sPath: a path to be normalized\n :return: the normalized path\n \"\"\"\n\n if sPath is None:\n print('[ERROR] waspy._normPath: passed path is None' +\n ' ******************************************************************************')\n return None\n\n sPath = sPath.replace('/', os.path.sep)\n sPath = sPath.replace('\\\\', os.path.sep)\n\n return sPath\n\ndef _internalAddFileToWASDI(sFileName, bAsynch=None):\n _log('[INFO] waspy._internalAddFileToWASDI( ' + str(sFileName) + ', ' + str(bAsynch) + ' )')\n\n if sFileName is None:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: file name is None, aborting' +\n ' ******************************************************************************')\n return ''\n if not isinstance(sFileName, str):\n wasdiLog('[WARNING] waspy._internalAddFileToWASDI: file name is not a string, trying conversion' +\n ' ******************************************************************************')\n try:\n sFileName = str(sFileName)\n except:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: cannot convert file name into string, aborting' +\n ' ******************************************************************************')\n return ''\n if len(sFileName) < 1:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: file name has zero length, aborting' +\n ' ******************************************************************************')\n return ''\n\n if bAsynch is None:\n print('[WARNING] waspy._internalAddFileToWASDI: asynch flag is None, assuming False')\n bAsynch = False\n if not isinstance(bAsynch, bool):\n print('[WARNING] waspy._internalAddFileToWASDI: asynch flag is not a boolean, trying casting')\n try:\n bAsynch = bool(bAsynch)\n except:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: could not convert asynch flag into bool, aborting' +\n ' ******************************************************************************')\n return ''\n\n sResult = ''\n try:\n if m_bIsOnServer is False:\n if getUploadActive() is True:\n if fileExistsOnWasdi(sFileName) is False:\n _log('[INFO] waspy._internalAddFileToWASDI: remote file is missing, uploading')\n try:\n _uploadFile(sFileName)\n _log('[INFO] waspy._internalAddFileToWASDI: file uploaded, keep on working!')\n except:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: could not proceed with upload' +\n ' ******************************************************************************')\n else:\n try:\n # We are on the server: do I have the file?\n if os.path.exists(getPath(sFileName)) is True:\n # Does it exists on the target node?\n if _fileOnNode(sFileName) is False:\n wasdiLog('[WARNING] waspy._internalAddFileToWASDI: uploading the file to the workspace node')\n try:\n _uploadFile(sFileName)\n wasdiLog('[WARNING] waspy._internalAddFileToWASDI: file uploaded, keep on working!')\n except:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: could not proceed with upload' +\n ' ******************************************************************************')\n except:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: could not send the file the workspace node')\n \n\n sUrl = getWorkspaceBaseUrl() + \"/catalog/upload/ingestinws?file=\" + sFileName + \"&workspace=\" + getActiveWorkspaceId()\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n asHeaders = _getStandardHeaders()\n oResponse = requests.get(url=sUrl, headers=asHeaders)\n if oResponse is None:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: cannot contact server' +\n ' ******************************************************************************')\n elif oResponse.ok is not True:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: failed, server replied ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n else:\n oJson = oResponse.json()\n if 'stringValue' in oJson:\n bOk = bool(oJson['boolValue'])\n if bOk:\n sProcessId = str(oJson['stringValue'])\n if bAsynch is True:\n sResult = sProcessId\n else:\n sResult = waitProcess(sProcessId)\n else:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: impossible to ingest the file in WASDI')\n except:\n wasdiLog('[ERROR] waspy._internalAddFileToWASDI: something broke alongside' +\n ' ******************************************************************************')\n\n return sResult\n\ndef _internalExecuteWorkflow(asInputFileNames, asOutputFileNames, sWorkflowName, bAsynch=False):\n \"\"\"\n Internal call to execute workflow\n\n :param asInputFileNames: name of the file in input (string WITH extension) or array of strings of the files in input (WITH extension)\n :param asOutputFileNames: name of the file in output (string WITH extension) or array of strings of the files in output (WITH extension)\n :param sWorkflowName: name of the SNAP workflow uploaded in WASDI\n :param bAsynch: true to run asynch, false to run synch\n :return: processID if asynch, status of the executed process if synch, empty string in case of failure\n \"\"\"\n\n _log('[INFO] waspy._internalExecuteWorkflow( ' + str(asInputFileNames) + ', ' +\n str(asOutputFileNames) + ', ' + str(sWorkflowName) + ', ' + str(bAsynch) + ' )')\n\n # if we got only a single file input, let transform it in an array\n if not isinstance(asInputFileNames, list):\n asInputFileNames = [asInputFileNames]\n\n if not isinstance(asOutputFileNames, list):\n asOutputFileNames = [asOutputFileNames]\n\n if asInputFileNames is None:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: input file names None, aborting' +\n ' ******************************************************************************')\n return ''\n elif len(asInputFileNames) <= 0:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: no input file names, aborting' +\n ' ******************************************************************************')\n return ''\n\n if asOutputFileNames is None:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: output file names None, aborting' +\n ' ******************************************************************************')\n return ''\n # elif len(asOutputFileNames) <= 0:\n # print('[ERROR] waspy._internalExecuteWorkflow: no output file names, aborting')\n # return ''\n\n if sWorkflowName is None:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: workspace name is None, aborting' +\n ' ******************************************************************************')\n return ''\n elif len(sWorkflowName) <= 0:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: workflow name too short, aborting' +\n ' ******************************************************************************')\n return ''\n\n sProcessId = ''\n sUrl = getBaseUrl() + \"/processing/graph_id?workspace=\" + getActiveWorkspaceId()\n\n if m_bIsOnServer:\n sUrl += \"&parent=\"\n sUrl += getProcId()\n\n # get a list of workflows, with entries in this form: :\n # { \"description\":STRING,\n # \"name\": STRING,\n # \"workflowId\": STRING }\n aoWorkflows = getWorkflows()\n aoDictPayload = None\n if aoWorkflows is None:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: workflow list is None, aborting' +\n ' ******************************************************************************')\n return ''\n elif len(aoWorkflows) <= 0:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: workflow list is empty, aborting' +\n ' ******************************************************************************')\n return ''\n else:\n for asWorkflow in aoWorkflows:\n if asWorkflow is not None:\n if \"name\" in asWorkflow:\n if asWorkflow[\"name\"] == sWorkflowName:\n if \"workflowId\" in asWorkflow:\n # replace \\' with \\\" everywhere\n aoDictPayload = {}\n aoDictPayload[\"description\"] = asWorkflow[\"description\"]\n aoDictPayload[\"name\"] = asWorkflow[\"name\"]\n aoDictPayload[\"workflowId\"] = asWorkflow[\"workflowId\"]\n break\n if aoDictPayload is None:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: workflow name not found, aborting')\n return ''\n\n try:\n aoDictPayload[\"inputFileNames\"] = asInputFileNames\n aoDictPayload[\"outputFileNames\"] = asOutputFileNames\n except:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: payload could not be generated, aborting' +\n ' ******************************************************************************')\n return ''\n\n _log('[INFO] waspy._internalExecuteWorkflow: about to HTTP put to ' + str(sUrl) + ' with payload ' + str(\n aoDictPayload))\n asHeaders = _getStandardHeaders()\n oResponse = requests.post(sUrl, headers=asHeaders, data=json.dumps(aoDictPayload))\n if oResponse is None:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: communication with the server failed, aborting' +\n ' ******************************************************************************')\n return ''\n elif oResponse.ok is True:\n _log('[INFO] waspy._internalExecuteWorkflow: server replied OK')\n asJson = oResponse.json()\n if \"stringValue\" in asJson:\n sProcessId = asJson[\"stringValue\"]\n if bAsynch is True:\n return sProcessId\n else:\n return waitProcess(sProcessId)\n else:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: cannot find process ID in response, aborting' +\n ' ******************************************************************************')\n return ''\n else:\n wasdiLog('[ERROR] waspy._internalExecuteWorkflow: server returned status ' + str(oResponse.status_code) +\n ' ******************************************************************************')\n wasdiLog(oResponse.content)\n return ''\n\n\ndef _fileOnNode(sFileName):\n \"\"\"\n checks if a file already exists on the node of the workspace or not\n :param sFileName: file name with extension\n :return: True if the file exists, False otherwise\n \"\"\"\n\n if sFileName is None:\n wasdiLog('[ERROR] waspy._fileOnNode: file name must not be None' +\n ' ******************************************************************************')\n return False\n if len(sFileName) < 1:\n wasdiLog('[ERROR] waspy._fileOnNode: File name too short' +\n ' ******************************************************************************')\n return False\n\n sSessionId = getSessionId()\n sActiveWorkspace = getActiveWorkspaceId()\n\n sUrl = getWorkspaceBaseUrl()\n sUrl += \"/catalog/fileOnNode?token=\"\n sUrl += sSessionId\n sUrl += \"&filename=\"\n sUrl += sFileName\n sUrl += \"&workspace=\"\n sUrl += sActiveWorkspace\n\n asHeaders = _getStandardHeaders()\n oResult = requests.get(sUrl, headers=asHeaders)\n\n if oResult is None:\n wasdiLog('[ERROR] waspy._fileOnNode: failed contacting the server' +\n ' ******************************************************************************')\n return False\n elif not oResult.ok and not 500 == oResult.status_code:\n wasdiLog('[ERROR] waspy._fileOnNode: unexpected failure, server returned: ' + str(oResult.status_code) +\n ' ******************************************************************************')\n return False\n else:\n try:\n oJsonResponse = oResult.json()\n \n if oJsonResponse.BoolValue is not None:\n return oJsonResponse.BoolValue\n else:\n return False\n except:\n return False\n\ndef _getDefaultCRS():\n return (\n \"GEOGCS[\\\"WGS84(DD)\\\", \\r\\n\" +\n \" DATUM[\\\"WGS84\\\", \\r\\n\" +\n \" SPHEROID[\\\"WGS84\\\", 6378137.0, 298.257223563]], \\r\\n\" +\n \" PRIMEM[\\\"Greenwich\\\", 0.0], \\r\\n\" +\n \" UNIT[\\\"degree\\\", 0.017453292519943295], \\r\\n\" +\n \" AXIS[\\\"Geodetic longitude\\\", EAST], \\r\\n\" +\n \" AXIS[\\\"Geodetic latitude\\\", NORTH]]\"\n )\n\n\nif __name__ == '__main__':\n _log(\n 'WASPY - The WASDI Python Library. Include in your code for space development processors. Visit www.wasdi.net'\n )\n\n\n","sub_path":"libraries/waspy/wasdi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":116377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81831284","text":"#!/usr/bin/env python\n\nimport unittest\nimport config\nimport app as nomsly\nfrom core import database\n\nclass FlaskTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\" this is ran once automatically before all tests \"\"\"\n database.connect(\n config.database[\"host\"],\n config.database[\"db\"],\n config.database[\"user\"],\n config.database[\"pass\"]\n )\n\n def setUp(self):\n \"\"\" ran before each test \"\"\"\n nomsly.app.config['TESTING'] = True\n self.app = nomsly.app.test_client()\n\n def testIndex(self):\n rv = self.app.get(\"/\")\n assert rv.data is not None\n assert rv.data != \"\"\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_routes.py","file_name":"test_routes.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"241499011","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Utilities module for python \"\"\"\n\nimport os\n\nimport fabric\nimport fabtools\nfrom fabric.api import env\nfrom pydiploy.decorators import do_verbose\n\n\n@do_verbose\ndef python_pkg(update=False):\n \"\"\" Installs python packages and pip \"\"\"\n\n fabtools.require.deb.packages([\n '%s' % 'python-dev' if env.remote_python_version < 3 else 'python%s-dev' % env.remote_python_version,\n 'python-pip'\n ], update=update)\n fabtools.require.python.install('pip', upgrade=True, use_sudo=True)\n\n\n@do_verbose\ndef application_dependencies(upgrade_pkg, staging=True):\n \"\"\" Installs application dependencies with requirements.txt files \"\"\"\n\n with fabtools.python.virtualenv(env.remote_virtualenv_dir):\n with fabric.api.cd(env.remote_current_path):\n requirements_file = os.path.join('requirements',\n '%s.txt' % env.goal) if staging else 'requirements.txt'\n # ugly fix for error when pip install fail and error raises while /home/user/.pip not writable\n pip_log = '%s/pip_error.log' % env.remote_home\n pip_cmd = 'pip --log-file %s' % pip_log\n if 'oracle_client_version' in env:\n oracle_dir = 'instantclient_%s' % '_'.join(\n env.oracle_client_version.split('.')[:2])\n oracle_root_path = os.path.join(\n env.oracle_remote_dir, oracle_dir)\n oracle_full_path = os.path.join(\n env.remote_home, oracle_root_path)\n pip_cmd = 'ORACLE_HOME=%s pip' % oracle_full_path\n\n # upgrade pip to latest version\n fabtools.require.python.install('pip', \n upgrade=True, \n use_sudo=True,\n user=env.remote_owner,\n pip_cmd=pip_cmd,\n quiet=True)\n\n fabtools.python.install_requirements(requirements_file,\n use_sudo=True,\n user=env.remote_owner,\n upgrade=upgrade_pkg,\n pip_cmd='%s --no-cache-dir' % pip_cmd,\n quiet=True)\n\n fabric.api.sudo(\n 'pip install --log-file %s --quiet -e .' % pip_log ,\n user=env.remote_owner,\n pty=False)\n","sub_path":"pydiploy/require/python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"615327557","text":"binum = input(\"Enter a binary number: \")\nbinum = str(binum)\nlength = len(binum)\nvalue = 0\nabc = 0\nwhile length > 0:\n last = binum[length - 1]\n if last == \"1\":\n value = value + 2**abc\n elif last == \"0\":\n value = value\n else:\n raise Exception(\"This is not a binary number! Please enter a binary number!\")\n abc = abc + 1\n length = length - 1\nvalue = str(value)\nprint(binum + \" is \" + value + \".\")\n","sub_path":"Binary.py","file_name":"Binary.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"646242574","text":"import os\n\nversion = 0.3\n\njs_path = os.path.dirname(os.path.abspath(__file__)) + \"\\\\..\\\\js_script\\\\\"\n\ncommands = {\"proxy\":\"capture the api in capture_list\",\n\"set\":\"Set Options\",\n\"GET_SETTING\":\"get setting options\",\n\"exit\":\"Exit This Script\"}\n\nsettings_validation = {\"mode\":[\"hex\", \"string\"],\n\"capture_list\":[\"send\",\"recv\",\"wsasend\",\"wsarecv\",\"recvfrom\",\"wsarecvfrom\"],\n\"intercept\":[\"on\",\"off\"]\n}\n\nsettings = {\"mode\":\"hex\",\n\"capture_list\":[\"send\"],\n\"intercept\":\"off\"\n}\n\n\n\n\n\n","sub_path":"tcp_proxy_core/tcp_proxy_config.py","file_name":"tcp_proxy_config.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"160757510","text":"import argparse\nimport json\nimport git\nimport csv\nimport os\nimport pwd\nfrom pathlib import PurePath\nimport platform\n\nfrom py4j.java_gateway import JavaGateway, GatewayParameters\nimport datetime as dt\nfrom dateutil.tz import tzutc\n\nfrom nshm_toshi_client.rupture_generation_task import RuptureGenerationTask\nfrom nshm_toshi_client.general_task import GeneralTask\nfrom nshm_toshi_client.task_relation import TaskRelation\nimport time\n\n\nAPI_URL = os.getenv('NZSHM22_TOSHI_API_URL', \"http://127.0.0.1:5000/graphql\")\nAPI_KEY = os.getenv('NZSHM22_TOSHI_API_KEY', \"\")\nS3_URL = os.getenv('NZSHM22_TOSHI_S3_URL',\"http://localhost:4569\")\n\nclass RuptureSetBuilderTask():\n \"\"\"\n The python client for a RuptureSetBuildTask\n \"\"\"\n def __init__(self, job_args):\n\n self.use_api = job_args.get('use_api', False)\n\n #setup the java gateway binding\n gateway = JavaGateway(gateway_parameters=GatewayParameters(port=job_args['java_gateway_port']))\n app = gateway.entry_point\n self._builder = app.getAzimuthalRuptureSetBuilder()\n\n #get the root path for the task local data\n # root_folder = PurePath(os.getcwd())\n\n repos = [\"opensha\", \"nshm-nz-opensha\"]\n #repo_root = root_folder\n self._output_folder = PurePath(job_args.get('working_path')) #.joinpath('tmp').joinpath(dt.datetime.utcnow().isoformat().replace(':','-'))\n # os.mkdir(self._output_folder)\n\n #setup the csv (backup) task recorder\n self._writer = None #CSVResultWriter(open(self._output_folder.joinpath('results.csv'), 'w'), repos)\n self._repoheads = get_repo_heads(PurePath(job_args['root_folder']), repos)\n\n if self.use_api:\n headers={\"x-api-key\":API_KEY}\n self._ruptgen_api = RuptureGenerationTask(API_URL, S3_URL, None, with_schema_validation=True, headers=headers)\n self._general_api = GeneralTask(API_URL, S3_URL, None, with_schema_validation=True, headers=headers)\n self._task_relation_api = TaskRelation(API_URL, None, with_schema_validation=True, headers=headers)\n\n\n def ruptureSetMetrics(self):\n metrics = {}\n metrics[\"subsection_count\"] = self._builder.getSubSections().size()\n metrics[\"rupture_count\"] = self._builder.getRuptures().size()\n #metrics[\"possible_cluster_connections\"] = conf.getConnectionStrategy().getClusterConnectionCount()\n\n # # get info from the configuratiion\n conf = self._builder.getPlausibilityConfig()\n conf_diags = json.loads(conf.toJSON())\n conns = 0\n for cluster in conf_diags['connectionStrategy']['clusters']:\n conns += len(cluster.get('connections',[]))\n metrics[\"cluster_connections\"] = conns\n\n return metrics\n\n def run(self, task_arguments, job_arguments):\n\n # print(task_arguments)\n # print(job_arguments)\n\n t0 = dt.datetime.utcnow()\n\n environment = {\n \"host\": platform.node(),\n \"gitref_opensha\":self._repoheads['opensha'],\n \"gitref_nshm-nz-opensha\":self._repoheads['nshm-nz-opensha'] }\n\n if self.use_api:\n #create new task in toshi_api\n task_id = self._ruptgen_api.create_task(\n dict(created=dt.datetime.now(tzutc()).isoformat()),\n arguments=task_arguments,\n environment=environment\n )\n\n #link task tp the parent task\n self._task_relation_api.create_task_relation(job_arguments['general_task_id'], task_id)\n # #link task to the input datafile (*.XML)\n # self._ruptgen_api.link_task_file(task_id, crustal_id, 'READ')\n\n else:\n task_id = None\n\n # Run the task....\n ta = task_arguments\n ## for crustal\n self._builder \\\n .setMaxFaultSections(int(ta[\"max_sections\"]))\\\n .setMaxJumpDistance(float(ta[\"max_jump_distance\"]))\\\n .setPermutationStrategy(ta[\"connection_strategy\"])\\\n .setMaxSubSectionLength(float(ta[\"down_dip_width\"]))\\\n .setMinSubSectsPerParent(int(ta[\"min_sub_sects_per_parent\"]))\\\n .setMinSubSections(int(ta[\"min_sub_sections\"]))\\\n .setMaxCumulativeAzimuthChange(float(ta[\"max_cumulative_azimuth\"]))\\\n .setThinningFactor(float(ta[\"thinning_factor\"]))\\\n .setFaultModel(ta[\"fault_model\"])\n\n #name the output file\n outputfile = self._output_folder.joinpath(self._builder.getDescriptiveName()+ \".zip\")\n print(\"building %s started at %s\" % (outputfile, dt.datetime.utcnow().isoformat()), end=' ')\n\n self._builder \\\n .setNumThreads(int(job_arguments[\"java_threads\"]))\\\n .buildRuptureSet()\n\n #capture task metrics\n duration = (dt.datetime.utcnow() - t0).total_seconds()\n metrics = self.ruptureSetMetrics()\n\n #write the result\n self._builder .writeRuptureSet(str(outputfile))\n\n if self.use_api:\n #record the completed task\n done_args = {\n 'task_id':task_id,\n 'duration':duration,\n 'result':\"SUCCESS\",\n 'state':\"DONE\",\n }\n self._ruptgen_api.complete_task(done_args, metrics)\n\n #upload the task output\n self._ruptgen_api.upload_task_file(task_id, outputfile, 'WRITE', meta=task_arguments)\n\n #and the log files, why not\n java_log_file = self._output_folder.joinpath(f\"java_app.{job_arguments['java_gateway_port']}.log\")\n self._ruptgen_api.upload_task_file(task_id, java_log_file, 'WRITE')\n pyth_log_file = self._output_folder.joinpath(f\"python_script.{job_arguments['java_gateway_port']}.log\")\n self._ruptgen_api.upload_task_file(task_id, pyth_log_file, 'WRITE')\n\n print(\"; took %s secs\" % (dt.datetime.utcnow() - t0).total_seconds())\n\n\ndef get_repo_heads(rootdir, repos):\n result = {}\n for reponame in repos:\n repo = git.Repo(rootdir.joinpath(reponame))\n headcommit = repo.head.commit\n result[reponame] = headcommit.hexsha\n return result\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\")\n args = parser.parse_args()\n\n config_file = args.config\n f= open(config_file, 'r', encoding='utf-8')\n config = json.load(f)\n\n # maybe the JVM App is a little slow to get listening\n time.sleep(5)\n # Wait for some more time, scaled by taskid to avoid S3 consistency issue\n time.sleep(config['job_arguments']['task_id'] * 5)\n\n # print(config)\n task = RuptureSetBuilderTask(config['job_arguments'])\n task.run(**config)\n","sub_path":"src/python/automation/scaling/azimuthal_rupture_set_builder_task.py","file_name":"azimuthal_rupture_set_builder_task.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"505690550","text":"# ---------------------------------------------------------------\n# Validate the Model with Off-the-Shelf Face Recognition Results\n#\n# Author: Liangqi Li\n# Creating Date: Jul 1, 2018\n# Latest rectifying: Jul 16, 2018\n# ---------------------------------------------------------------\nimport os\nimport time\n\nimport json\nimport argparse\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport torch.backends.cudnn\n\nfrom __init__ import clock_non_return\nfrom utils import cuda_mode, normalize_matrix, save_result_txt\nfrom model import QRID\nfrom dataset import ECCVDataset, generate_probes, generate_negatives\nfrom reid.re_ranking import re_ranking\n\n\ndef parse_args():\n \"\"\"Parse input arguments\"\"\"\n\n parser = argparse.ArgumentParser(description='Validating')\n parser.add_argument('--net', default='res50', type=str)\n parser.add_argument('--trained_epoch', default=1, type=int)\n parser.add_argument('--split', default='test', type=str)\n parser.add_argument('--use_qal', default=0, type=str)\n parser.add_argument('--sim_weight', default=0, type=str)\n parser.add_argument('--use_rerank', default=1, type=str)\n parser.add_argument('--save_feat', default=0, type=str)\n parser.add_argument('--bs', default=64, type=int)\n parser.add_argument('--gpu_ids', default='0', type=str)\n parser.add_argument('--data_dir', default='', type=str)\n parser.add_argument('--out_dir', default='./output', type=str)\n\n args = parser.parse_args()\n\n return args\n\n\ndef sort1(mv_df, feats, casts, face_matching):\n \"\"\"\n Using candidates with face scores higher than the thresh as the probes,\n while others as the galleries.\n \"\"\"\n\n sorted_i_indices = []\n df_indices = mv_df.index.values\n\n for cast in casts:\n p_indices = generate_probes(mv_df, cast, face_matching[cast], 0.35)\n p_i_indices = [np.where(df_indices == idx)[0][0] for idx in p_indices]\n g_i_indices = list(set(range(mv_df.shape[0])) - set(p_i_indices))\n p_i_indices = np.array(p_i_indices)\n g_i_indices = np.array(g_i_indices)\n\n p_feats = feats[p_i_indices]\n g_feats = feats[g_i_indices]\n p_feats = normalize_matrix(p_feats)\n g_feats = normalize_matrix(g_feats)\n sim_mat = np.dot(p_feats, g_feats.T)\n\n if use_rerank:\n q_q_sim = np.dot(p_feats, p_feats.T)\n q_g_sim = sim_mat\n g_g_sim = np.dot(g_feats, g_feats.T)\n if sim_weight:\n q_q_sim = 1e10 ** q_q_sim\n q_q_sim /= np.max(q_q_sim)\n q_g_sim = 1e10 ** q_g_sim\n q_g_sim /= np.max(q_g_sim)\n g_g_sim = 1e10 ** g_g_sim\n g_g_sim /= np.max(g_g_sim)\n re_rank = re_ranking(\n q_g_sim, q_q_sim, g_g_sim, k1=10, k2=3, lambda_value=0.05)\n sim_mat = 1 - re_rank\n else:\n if sim_weight:\n # sim_mat = sim_mat ** 2 # / sim_mat.sum(0)\n sim_mat = 1e10 ** sim_mat\n\n sim = sim_mat.sum(0) / sim_mat.shape[0]\n orders = np.argsort(sim)[::-1]\n g_sorted_i_indices = g_i_indices[orders]\n cur_sorted_i_indices = np.hstack((p_i_indices, g_sorted_i_indices))\n sorted_i_indices.append(cur_sorted_i_indices)\n\n return np.vstack(sorted_i_indices)\n\n\ndef sort2(mv_df, feats, casts, face_matching):\n \"\"\"\n Using candidates with face scores higher than the thresh as the probes,\n using candidates with high scores belonging to other casts as the\n negatives, while leaving the rest as the galleries. Note that for each\n cast, the number of galleries is same.\n \"\"\"\n\n sorted_i_indices = []\n df_indices = mv_df.index.values\n\n probe_indices = [generate_probes(mv_df, cast, face_matching[cast], 0.35)\n for cast in casts]\n all_probe_indices = [x for xx in probe_indices for x in xx]\n g_indices = list(set(df_indices) - set(all_probe_indices))\n g_i_indices = [np.where(df_indices == idx)[0][0] for idx in g_indices]\n g_i_indices = np.array(g_i_indices)\n probe_i_indices = []\n neg_i_indices = []\n\n p_feats = []\n g_feats = feats[g_i_indices]\n\n for i in range(len(casts)):\n p_indices = probe_indices[i]\n p_i_indices = [np.where(df_indices == idx)[0][0] for idx in p_indices]\n n_i_indices = list(\n set(range(mv_df.shape[0])) - set(p_i_indices) - set(g_i_indices))\n p_i_indices = np.array(p_i_indices)\n n_i_indices = np.array(n_i_indices)\n probe_i_indices.append(p_i_indices)\n neg_i_indices.append(n_i_indices)\n\n p_feat = feats[p_i_indices]\n p_feat = normalize_matrix(p_feat).sum(0)\n p_feats.append(p_feat)\n\n p_feats = np.vstack(p_feats)\n p_feats = normalize_matrix(p_feats)\n g_feats = normalize_matrix(g_feats)\n sim_mat = np.dot(p_feats, g_feats.T)\n\n if use_rerank:\n q_q_sim = np.eye(len(casts))\n q_g_sim = sim_mat\n g_g_sim = np.dot(g_feats, g_feats.T)\n if sim_weight:\n q_q_sim = 1e10 ** q_q_sim\n q_q_sim /= np.max(q_q_sim)\n q_g_sim = 1e10 ** q_g_sim\n q_g_sim /= np.max(q_g_sim)\n g_g_sim = 1e10 ** g_g_sim\n g_g_sim /= np.max(g_g_sim)\n re_rank = re_ranking(\n q_g_sim, q_q_sim, g_g_sim, k1=10, k2=3, lambda_value=0.05)\n sim_mat = 1 - re_rank\n else:\n if sim_weight:\n # sim_mat = sim_mat ** 2 # / sim_mat.sum(0)\n sim_mat = 1e10 ** sim_mat\n\n orders = [np.argsort(sim)[::-1] for sim in sim_mat]\n for i in range(len(casts)):\n g_sorted_i_indices = g_i_indices[orders[i]]\n cur_sorted_i_indices = np.hstack(\n (probe_i_indices[i], g_sorted_i_indices, neg_i_indices[i]))\n sorted_i_indices.append(cur_sorted_i_indices)\n\n return np.vstack(sorted_i_indices)\n\n\ndef sort3(mv_df, feats, casts, face_matching):\n \"\"\"\n Using candidates with face scores higher than the thresh as the probes,\n using candidates with high scores belonging to other casts as the\n negatives, while leaving the rest as the galleries. Note that for each\n cast, the number of galleries is same.\n \"\"\"\n\n sorted_i_indices = []\n df_indices = mv_df.index.values\n\n probe_indices = [generate_probes(mv_df, cast, face_matching[cast], 0.35)\n for cast in casts]\n all_probe_indices = [x for xx in probe_indices for x in xx]\n\n # other_frac= 1 / 7\n # other_i_indices = np.arange(\n # int((1 - other_frac) * mv_df.shape[0]), mv_df.shape[0])\n\n g_indices = list(set(df_indices) - set(all_probe_indices))\n g_i_indices = [np.where(df_indices == idx)[0][0] for idx in g_indices]\n # g_i_indices = list(set(g_i_indices) - set(other_i_indices))\n g_i_indices = np.array(g_i_indices)\n probe_i_indices = []\n neg_i_indices = []\n\n g_feats = feats[g_i_indices]\n g_feats = normalize_matrix(g_feats)\n\n for i in range(len(casts)):\n p_indices = probe_indices[i]\n p_i_indices = [np.where(df_indices == idx)[0][0] for idx in p_indices]\n n_i_indices = list(\n set(range(mv_df.shape[0])) - set(p_i_indices) - set(g_i_indices))\n # p_i_indices = list(set(p_i_indices) - set(other_i_indices))\n # n_i_indices = list(set(n_i_indices) - set(other_i_indices))\n p_i_indices = np.array(p_i_indices)\n n_i_indices = np.array(n_i_indices)\n probe_i_indices.append(p_i_indices)\n neg_i_indices.append(n_i_indices)\n\n p_feat = feats[p_i_indices]\n p_feat = normalize_matrix(p_feat)\n\n sim_mat = np.dot(p_feat, g_feats.T)\n\n # q_q_sim = np.dot(p_feat, p_feat.T)\n # rows, cols = np.where(q_q_sim >= 0.8)\n # sims = dict()\n # del_rows = set()\n #\n # for row, col in zip(rows, cols):\n # if row == col:\n # continue\n # if row not in sims.keys():\n # sims[row] = [col]\n # else:\n # sims[row].append(col)\n #\n # for row in sims.keys():\n # if row in del_rows:\n # continue\n # del_rows.update(sims[row])\n #\n # del_rows = list(del_rows)\n # p_feat = np.delete(p_feat, del_rows, 0)\n # sim_mat = np.dot(p_feat, g_feats.T)\n\n if use_rerank:\n q_q_sim = np.dot(p_feat, p_feat.T)\n q_g_sim = sim_mat\n g_g_sim = np.dot(g_feats, g_feats.T)\n if sim_weight:\n q_q_sim = 1e10 ** q_q_sim\n q_q_sim /= np.max(q_q_sim)\n q_g_sim = 1e10 ** q_g_sim\n q_g_sim /= np.max(q_g_sim)\n g_g_sim = 1e10 ** g_g_sim\n g_g_sim /= np.max(g_g_sim)\n re_rank = re_ranking(\n q_g_sim, q_q_sim, g_g_sim, k1=20, k2=6, lambda_value=0.2)\n sim_mat = 1 - re_rank\n else:\n if sim_weight:\n # sim_mat = sim_mat ** 2 # / sim_mat.sum(0)\n sim_mat = 1e10 ** sim_mat\n\n sim = sim_mat.sum(0) / sim_mat.shape[0]\n orders = np.argsort(sim)[::-1]\n g_sorted_i_indices = g_i_indices[orders]\n cur_sorted_i_indices = np.hstack(\n (p_i_indices, g_sorted_i_indices, n_i_indices)).astype(np.int64)\n sorted_i_indices.append(cur_sorted_i_indices)\n\n return np.vstack(sorted_i_indices)\n\n\ndef sort4(mv_df, feats, casts, face_matching):\n \"\"\"\n Using candidates with face scores higher than the thresh1 as the probes,\n using candidates with face scores lower than the thresh2 as the\n negatives, while leaving the rest as the galleries. Note that for each\n cast, the number of galleries is different.\n \"\"\"\n\n sorted_i_indices = []\n df_indices = mv_df.index.values\n\n probe_indices = [generate_probes(mv_df, cast, face_matching[cast], 0.35)\n for cast in casts]\n neg_indices = [generate_negatives(mv_df, cast, face_matching[cast], 0.02)\n for cast in casts]\n\n for i in range(len(casts)):\n p_indices = probe_indices[i]\n n_indices = neg_indices[i]\n p_i_indices = [np.where(df_indices == idx)[0][0] for idx in p_indices]\n n_i_indices = [np.where(df_indices == idx)[0][0] for idx in n_indices]\n n_i_indices = list(set(n_i_indices) - set(p_i_indices))\n p_i_indices = np.array(p_i_indices)\n n_i_indices = np.array(n_i_indices)\n g_i_indices = list(\n set(range(mv_df.shape[0])) - set(p_i_indices) - set(n_i_indices))\n g_i_indices = np.array(g_i_indices)\n\n p_feats = feats[p_i_indices]\n g_feats = feats[g_i_indices]\n p_feats = normalize_matrix(p_feats)\n g_feats = normalize_matrix(g_feats)\n sim_mat = np.dot(p_feats, g_feats.T)\n\n if use_rerank:\n q_q_sim = np.dot(p_feats, p_feats.T)\n q_g_sim = sim_mat\n g_g_sim = np.dot(g_feats, g_feats.T)\n if sim_weight:\n q_q_sim = 1e10 ** q_q_sim\n q_q_sim /= np.max(q_q_sim)\n q_g_sim = 1e10 ** q_g_sim\n q_g_sim /= np.max(q_g_sim)\n g_g_sim = 1e10 ** g_g_sim\n g_g_sim /= np.max(g_g_sim)\n re_rank = re_ranking(\n q_g_sim, q_q_sim, g_g_sim, k1=10, k2=3, lambda_value=0.05)\n sim_mat = 1 - re_rank\n else:\n if sim_weight:\n # sim_mat = sim_mat ** 2 # / sim_mat.sum(0)\n sim_mat = 1e10 ** sim_mat\n\n sim = sim_mat.sum(0) / sim_mat.shape[0]\n orders = np.argsort(sim)[::-1]\n g_sorted_i_indices = g_i_indices[orders]\n cur_sorted_i_indices = np.hstack(\n (p_i_indices, g_sorted_i_indices, n_i_indices)).astype(np.int64)\n sorted_i_indices.append(cur_sorted_i_indices)\n\n return np.vstack(sorted_i_indices)\n\n\ndef validate(dataloader, net, mv_df, face_matching, movie):\n \"\"\"Validation Process\"\"\"\n\n feats = []\n casts = list(face_matching.keys())\n\n if save_feat:\n for data in dataloader:\n inputs, _ = data\n if use_cuda:\n inputs = inputs.cuda()\n\n feat = net(inputs, None, 0).numpy()\n feats.append(feat)\n torch.cuda.empty_cache()\n\n # Transform torch.Tensor to np.ndarray and save it\n feats = np.vstack(feats)\n extracted_feats[movie] = feats\n\n else:\n feats = extracted_feats[movie]\n\n sorted_i_indices = sort3(mv_df, feats, casts, face_matching)\n mv_ids = []\n for indices in sorted_i_indices:\n ids = [str(mv_df.iloc[idx]['id']) for idx in indices]\n ids = [movie + '_' + '0' * (4 - len(idx)) + idx for idx in ids]\n mv_ids.append(ids)\n\n return casts, mv_ids\n\n\n@clock_non_return\ndef main():\n\n opt = parse_args()\n split = opt.split\n global use_cuda\n use_cuda = cuda_mode(opt)\n torch.backends.cudnn.benchmark = True\n\n model_dir = opt.out_dir\n trained_model_path = os.path.join(model_dir, '{}_{}.pth'.format(\n opt.net, opt.trained_epoch))\n\n use_qal = opt.use_qal\n global sim_weight\n global use_rerank\n global save_feat\n sim_weight = opt.sim_weight\n use_rerank = opt.use_rerank\n save_feat = opt.save_feat\n feats_file = '{}_feats_{}.pkl'.format(split, opt.net)\n\n model = QRID(opt.net, use_qal, trained_model_path, is_train=False)\n model.eval()\n if use_cuda:\n model.cuda()\n\n # load trained model\n if use_qal:\n print('Loading model check point from {:s}'.format(trained_model_path))\n model.load_trained_model(torch.load(trained_model_path))\n\n # List transformations to be implemented on the images\n transform_list = [\n transforms.Resize((256, 128), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n data_transforms = transforms.Compose(transform_list)\n print(transform_list)\n\n # Load annotation file and face matching results\n val_df = pd.read_csv(os.path.join(\n opt.data_dir, '{}GalleriesDF.csv'.format(split)))\n file_name = 'matching_{}_results.json'.format(split)\n with open(os.path.join(opt.data_dir, file_name), 'r') as f:\n face_matching = json.load(f)\n\n movies = os.listdir(os.path.join(opt.data_dir, split))\n final_results = dict()\n\n global extracted_feats\n if save_feat:\n extracted_feats = dict()\n else:\n with open(feats_file, 'rb') as f:\n extracted_feats = pickle.load(f)\n\n # Go through all movies and all casts\n for i, movie in enumerate(movies, 1):\n start = time.time()\n mv_df = val_df.query('movie==@movie')\n cur_match = face_matching[movie]\n\n dataset = ECCVDataset(\n opt.data_dir, split, mv_df, transform=data_transforms)\n dataloader = DataLoader(dataset, batch_size=opt.bs, num_workers=16)\n casts, mv_ids = validate(dataloader, model, mv_df, cur_match, movie)\n\n assert len(casts) == len(mv_ids)\n for cast, ids in zip(casts, mv_ids):\n pid = cast[:-4]\n full_cast = movie + '_' + pid\n assert len(ids) == mv_df.shape[0]\n final_results[full_cast] = ids\n\n end = time.time()\n print('Movie {}/{} processed. Time cost: {:.2f}s'.format(\n i, len(movies), end - start))\n\n if save_feat:\n with open(feats_file, 'wb') as f:\n pickle.dump(extracted_feats, f, pickle.HIGHEST_PROTOCOL)\n\n save_fname = '{}_result.txt'.format(split)\n if os.path.exists(save_fname):\n os.remove(save_fname)\n save_result_txt(save_fname, final_results)\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":15729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"615261879","text":"import ep_statemachine\nimport machine\n\ndef setup(temps, thresh):\n pump_pin = machine.Pin(14, machine.Pin.OUT)\n warn_led = machine.Pin(17, machine.Pin.OUT)\n\n pump_on = lambda: pump_pin.off()\n pump_off = lambda: pump_pin.on()\n\n warn_on = lambda: warn_led.off()\n warn_off = lambda: warn_led.on()\n\n def s_cold_enter():\n pump_off()\n warn_off()\n\n def s_heatUp1_enter():\n pump_on()\n warn_off()\n\n def s_heatUp2_enter():\n pump_off()\n warn_off()\n\n def s_hot_enter():\n pump_off()\n warn_off()\n\n def s_toHot_enter():\n pump_on()\n warn_on()\n\n def s_muchToHot_enter():\n pump_on()\n warn_on()\n\n s_cold = ep_statemachine.state(\"cold\", initial=True, entry_action=s_cold_enter) #1\n s_heatUp1 = ep_statemachine.state(\"heatUp1\", entry_action=s_heatUp1_enter) #2\n s_heatUp2 = ep_statemachine.state(\"heatUp2\", entry_action=s_heatUp2_enter) #3\n s_hot = ep_statemachine.state(\"hot\", entry_action=s_hot_enter) #4\n s_toHot = ep_statemachine.state(\"toHot\", entry_action=s_toHot_enter) #5\n s_muchToHot = ep_statemachine.state(\"muchToHot\", entry_action=s_muchToHot_enter) #6\n\n t_12 = ep_statemachine.transition(s_heatUp1, \"t_12\", # (T_Oven > T1) & T_Oven > T_TankU + dT1\n lambda: (temps(\"T_Oven\")>thresh(\"T1\")) & (temps(\"T_Oven\")>temps(\"T_TankU\")+thresh(\"T1\")) \n )\n\n t_23 = ep_statemachine.transition(s_heatUp2, \"t_23\", \n lambda: (temps(\"T_Oven\")temps(\"T_TankU\")+thresh(\"dT1\")) # T_Oven > T_TankU + dT1\n )\n\n t_cold = ep_statemachine.transition(s_cold, \"t_cold\", \n lambda: (temps(\"T_Oven\")thresh(\"T2\")) # T_TankL > T2\n )\n\n t_42 = ep_statemachine.transition(s_heatUp1, \"t_42\", # T_TankL < T2 - dT1 & T_Oven > T_TankU + dT1\n lambda: (temps(\"T_TankL\")temps(\"T_TankU\")+thresh(\"dT1\")) \n )\n\n t_toHot = ep_statemachine.transition(s_toHot, \"t_toHot\", \n lambda: (temps(\"T_Oven\")>thresh(\"T3\")+thresh(\"dT2\")) # T_Oven > T3 + dT2\n )\n\n t_54 = ep_statemachine.transition(s_hot, \"t_54\", \n lambda: (temps(\"T_Oven\")thresh(\"T4\")+thresh(\"dT2\")) # T_Oven > T4 + dT2\n )\n\n t_65 = ep_statemachine.transition(s_toHot, \"t_65\", \n lambda: (temps(\"T_Oven\")= 0:\n\t\tlogger.info('increase %s funds' % (new_count - old_count))\n\telse:\n\t\tlogger.info('reduce %s funds' % (old_count - new_count))\n\nif __name__ == \"__main__\":\n\t#today = datetime.datetime.strftime(datetime.date.today(), '%Y-%m-%d')\n\tmain()","sub_path":"get_all_funds.py","file_name":"get_all_funds.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"242610548","text":"\"\"\"\n@author = 'Abdullah Waqar'\nWrite a program that allows the user to a series of numerical grades and then calculates an\naverage. Entering an integer of value 0 ends the input and produces the result. Use two separate\nfunctions (methods) to 1) input data and calculate the total of all grades entered and 2) calculate\nthe average.\n\"\"\"\n\ndef get_total():\n grade_sum = 0\n grade_count = 0\n\n while True:\n temp_input = int(input('Enter the number: '))\n if temp_input == 0: break\n grade_count += 1\n grade_sum += temp_input\n return get_average(grade_sum, grade_count)\n\ndef get_average(sum, count):\n return sum / count\n\nif __name__ == '__main__':\n print(get_total())","sub_path":"Algorithms/AI/lab03/numarical_grade/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"240388952","text":"# coding=utf-8\nfrom requests_html import HTMLSession\nfrom tqdm import tqdm\nimport os\nimport time\nimport re\n\n\nif not os.path.exists('./hparchive'):\n os.makedirs('./hparchive')\n\n\nusername = input('用户名: ')\npwd = input('密码: ')\n\ndata = {'username': username, 'password': pwd}\n\n\n\nuseragent = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15'}\nhpsession = HTMLSession()\nhpsession.headers.update(useragent)\n\nloginurl = 'https://www.hi-pda.com/forum/logging.php?action=login&loginsubmit=yes&inajax=1'\nhpsession.post(loginurl, data=data)\n\nfavlist = {}\n\n\n# 从页面获取收藏帖子标题及tid,确定总收藏页数maxpagenum\ndef getfav(page=1):\n if (page == 1):\n favurl = 'https://www.hi-pda.com/forum/my.php?item=favorites&type=thread'\n else:\n favurl = 'https://www.hi-pda.com/forum/my.php?item=favorites&type=thread&page=' + \\\n str(page)\n\n fav = hpsession.get(favurl)\n \n tbodysel = '#wrap > div.main > div > div.threadlist.datalist > form > table > tbody'\n\n tbody = fav.html.find(tbodysel, first=True)\n favas = tbody.find('tr > th > a')\n\n for a in favas:\n tid = a.attrs['href'].split('tid=')[1].split('&')[0]\n favlist[tid] = a.text\n\n if (page == 1):\n maxpagesel = '#wrap > div.main > div > div.threadlist.datalist > form > table > tbody > tr:nth-child(76) > td:nth-child(3) > div > a:nth-last-child(2)'\n maxpageanchor = fav.html.find(maxpagesel, first=True)\n if (maxpageanchor == None):\n maxpage = 1\n else:\n maxpage = int(maxpageanchor.text.split('page=')[0])\n return maxpage\n\n\n\ndef genTOC():\n tocs = ''\n for i in favlist:\n tocs = tocs + '' + favlist[i] + '
    ' + '\\n'\n \n with open('fav.html','w') as f:\n f.write(tocs) \n \n\n\n\n# 下载tid对应的帖子html\ndef savethread(tid,page=1):\n rawurl = 'https://www.hi-pda.com/forum/viewthread.php?tid='\n if (page == 1):\n \n threadurl = rawurl + str(tid)\n \n else:\n threadurl = rawurl + str(tid) + '&extra=&page=' + str(page)\n\n\n # replacestr = 'viewthread.php?tid=' + str(tid) + '&extra=&page=' + str(page)\n \n r = hpsession.get(threadurl)\n # modhtml = r.html.html.replace(replacestr,str(tid) + '-' + str(page) + 'html')\n r1 = 'viewthread.php\\?tid=' + tid + '&extra=&page=(\\d+)'\n r2 = tid + r'-\\1.html'\n modhtml = re.sub(r1,r2,r.html.html)\n if (r.status_code != 200):\n print(favlist[tid])\n with open('./hparchive/' + str(tid) + '-' + str(page) + '.html','w',encoding='gb18030') as f:\n f.write(modhtml)\n \n hasnextpage = r.html.find('a.next')\n time.sleep(0.1)\n if (len(hasnextpage) > 1 ):\n savethread(tid,page=page+1)\n\n \n\n\n\nmaxpagenum = getfav(page=1)\n\nif maxpagenum > 1:\n for i in range(2, maxpagenum + 1):\n getfav(page=i)\n\n\n\nprint('一共' + str(len(favlist)) + '个收藏贴')\ngenTOC()\n\n\n\nfor tid in tqdm(favlist):\n savethread(tid)\n time.sleep(0.3)","sub_path":"hparchive.py","file_name":"hparchive.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"635161599","text":"from flask import g\nfrom flask_httpauth import HTTPTokenAuth\nfrom flask import make_response, jsonify\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nserializer = Serializer('secret key here', expires_in=43200)\nauth = HTTPTokenAuth(scheme='Token')\n\n\n@auth.verify_token\ndef verify_token(token):\n \"\"\"\n 验证token是否正确\n :param token:\n :return:\n \"\"\"\n g.username = None\n try:\n data = serializer.loads(token)\n except:\n return False\n if 'username' in data:\n g.username = data['username']\n return True\n return False\n\n\n@auth.error_handler\ndef unauthorized():\n \"\"\"\n token验证失败返回信息\n :return:\n \"\"\"\n return make_response(jsonify({\"result\": \"NT\", \"error\": \"Token已失效!请重新登陆!\"}), 401)\n","sub_path":"Source/collaboration_2/token_manage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"610153895","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\nstrPath = '/Users/lishaojun/Documents/dev/'\n\n\nfileA = 'taz_7_结果.xlsx'\nfileB = 'taz_8_加产业合计.xlsx'\n\n\ndf = pd.read_excel(strPath+fileA)\n\n\ndf= df.set_index('TAZ_ID')\ndf.sort_index()\n\ndf['Total'] = 0.0\n\nfor index, row in df.iterrows():\n df.at[index,'Total'] = row['F4']+row['F5']+row['F6']+row['F7']+row['F8']+row['F9']+row['F10']+row['F11']+row['F12']+row['F13']+row['F14']+row['F15']+row['F16']+row['F17']+row['F18']+row['F19']+row['F20']+row['F21']+row['F22']\n \ndf.to_excel(strPath+fileB)","sub_path":"liuyue/Script/tazarea8.py","file_name":"tazarea8.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"371741160","text":"\"\"\"Define a notification mechanism for all AppDaemon apps.\"\"\"\nfrom datetime import datetime, timedelta\nfrom enum import Enum\nfrom typing import Callable, List, Union # pylint: disable=unused-import\nfrom uuid import UUID\n\nfrom core import Base\nfrom const import BLACKOUT_END, BLACKOUT_START, CONF_PEOPLE\nfrom people import Person\nfrom helpers.dt import time_is_between\n\n\nclass NotificationTypes(Enum):\n \"\"\"Define an enum for notification types.\"\"\"\n\n single = 1\n repeating = 2\n\n\n# pylint: disable=too-few-public-methods,too-many-instance-attributes\nclass Notification:\n \"\"\"Define a notification object.\"\"\"\n\n def __init__(self, kind, message, *, title=None, **kwargs):\n \"\"\"Initialize.\"\"\"\n self.blackout_end_time = kwargs.get('blackout_end_time')\n self.blackout_start_time = kwargs.get('blackout_start_time')\n self.cancel = None\n self.interval = kwargs.get('interval')\n self.kind = kind\n self.message = message\n self.target = kwargs.get('target')\n self.when = kwargs.get('when')\n\n if title:\n self.title = title\n else:\n self.title = ''\n\n self.data = kwargs.get('data')\n if self.data is None:\n self.data = {}\n self.data.setdefault('push', {})\n self.data['push'].setdefault(\n 'thread-id', '{0}_{1}'.format(title, message))\n\n def __eq__(self, other):\n \"\"\"Define method to compare notification objects.\"\"\"\n return self.__dict__ == other.__dict__\n\n\nclass NotificationManager(Base):\n \"\"\"Define an app to act as a system-wide notifier.\"\"\"\n\n def configure(self):\n \"\"\"Configure.\"\"\"\n self.registry = []\n\n self.listen_event(self._notifier_test_cb, 'NOTIFIER_TEST')\n\n def _adjust_for_blackout(self, notification: Notification) -> Notification:\n \"\"\"Reschedule a notification's schedule for outside of blackout.\"\"\"\n if self._in_blackout(notification):\n if notification.when:\n target_date = notification.when.date()\n active_time = notification.when.time()\n else:\n target_date = self.date()\n active_time = self.time()\n\n if active_time > self.parse_time(notification.blackout_end_time):\n target_date = target_date + timedelta(days=1)\n\n new_dt = datetime.combine(\n target_date, self.parse_time(notification.blackout_end_time))\n\n self.log(\n 'Rescheduling notification: {0}'.format(\n notification.title if notification.title else notification.\n message))\n self.log('New date/time: {0}'.format(new_dt))\n\n notification.when = new_dt\n else:\n notification.when = self.datetime() + timedelta(seconds=1)\n\n return notification\n\n def _get_targets(self, target: Union[str, list]) -> list:\n \"\"\"Get a list of targets based on input string.\"\"\"\n if isinstance(target, str):\n _targets = [target]\n else:\n _targets = target\n\n targets = [] # type: List[str]\n for item in _targets:\n split = item.split(' ') # type: ignore\n\n # 1. target='not Person'\n if split[0] == 'not' and split[1] in [\n person.first_name\n for person in self.global_vars[CONF_PEOPLE]\n ]:\n targets += [\n notifier for person in self.global_vars[CONF_PEOPLE]\n if person.first_name != split[1]\n for notifier in person.notifiers\n ]\n\n # 2. target='Person'\n elif split[0] in [person.first_name\n for person in self.global_vars[CONF_PEOPLE]]:\n targets += [\n notifier for person in self.global_vars[CONF_PEOPLE]\n if person.first_name == split[0]\n for notifier in person.notifiers\n ]\n\n else:\n try:\n # 3. target='home'\n targets += [\n notifier for person in getattr(\n self.presence_manager, 'whos_{0}'.format(item))()\n for notifier in person.notifiers\n ]\n except AttributeError:\n # 4. target='everyone'\n if item == 'everyone':\n targets += [\n notifier\n for person in self.global_vars[CONF_PEOPLE]\n for notifier in person.notifiers\n ]\n\n # 5. target='person_iphone'\n else:\n targets.append(item)\n\n return targets\n\n def _in_blackout(self, notification: Notification) -> bool:\n \"\"\"Determine whether a notification is set to send in blackout.\"\"\"\n if (not notification.blackout_start_time\n or not notification.blackout_end_time):\n return False\n\n if notification.when:\n return time_is_between(\n self, notification.when, notification.blackout_start_time,\n notification.blackout_end_time)\n\n return self.now_is_between(\n notification.blackout_start_time, notification.blackout_end_time)\n\n def _notifier_test_cb(\n self, event_name: str, data: dict, kwargs: dict) -> None:\n \"\"\"Run a test.\"\"\"\n try:\n kind = data['kind']\n message = data['message']\n except KeyError:\n self.error('Missing message and/or kind in notifier test')\n return\n\n _data = data.get('data', None)\n blackout_end_time = data.get('blackout_end_time', BLACKOUT_END)\n blackout_start_time = data.get('blackout_start_time', BLACKOUT_START)\n interval = data.get('interval', None)\n target = data.get('target', None)\n title = data.get('title', None)\n when = data.get('when', None)\n\n if kind == NotificationTypes.single.name:\n self.send(\n message,\n title=title,\n when=when,\n target=target,\n data=_data,\n blackout_start_time=blackout_start_time,\n blackout_end_time=blackout_end_time)\n elif kind == NotificationTypes.repeating.name:\n self.repeat(\n message,\n interval,\n title=title,\n when=when,\n target=target,\n data=_data,\n blackout_start_time=blackout_start_time,\n blackout_end_time=blackout_end_time)\n\n def _send_cb(self, kwargs: dict) -> None:\n \"\"\"Send a single (immediate or scheduled) notification.\"\"\"\n notification = kwargs['notification']\n\n # If an instance of a repeating notification occurs in the blackout,\n # we should cancel the entire series and resume when the blackout\n # lifts. Setting `notification.when` to `None` forces a check for\n # whether we're currently in the blackout, rather than a check for\n # whether the notification's original `when` is in the blackout.\n if notification.kind == NotificationTypes.repeating:\n notification.when = None\n if self._in_blackout(notification):\n notification.cancel()\n self.dispatch(notification)\n return\n\n for target in self._get_targets(notification.target):\n self.log(\n 'Sending notification to \"{0}\": {1}'.format(\n target, notification.title\n if notification.title else notification.message))\n\n self.call_service(\n 'notify/{0}'.format(target),\n message=notification.message,\n title=notification.title,\n data=notification.data)\n\n if notification.kind == NotificationTypes.single:\n self.registry.remove(notification)\n\n def create_omnifocus_task(self, title: str) -> None:\n \"\"\"Create a task in Aaron's omnifocus.\"\"\"\n self.notify(\n 'created on {0}'.format(str(self.datetime())),\n title=title,\n name='omnifocus')\n\n def create_persistent_notification(self, title: str, message: str) -> None:\n \"\"\"Create a notification in the HASS UI.\"\"\"\n self.call_service(\n 'persistent_notification/create', title=title, message=message)\n\n def dispatch(self, notification: Notification) -> Callable:\n \"\"\"Store and dispatch a notification, returning a cancel method.\"\"\"\n notification = self._adjust_for_blackout(notification)\n\n if not notification.target:\n notification.target = 'everyone'\n\n if notification.kind == NotificationTypes.single:\n handle = self.run_at(\n self._send_cb, notification.when, notification=notification)\n else:\n handle = self.run_every(\n self._send_cb,\n notification.when,\n notification.interval,\n notification=notification)\n\n def cancel(delete: bool = True) -> None:\n \"\"\"Define a method to cancel and return the notification.\"\"\"\n self.cancel_timer(handle)\n if delete:\n self.registry.remove(notification)\n\n notification.cancel = cancel\n self.registry.append(notification)\n\n return cancel\n\n def get_target_from_push_id(self, push_id: UUID) -> Union[None, Person]:\n \"\"\"Return a person from a provided permanent device ID.\"\"\"\n try:\n return next((\n person for person in self.global_vars[CONF_PEOPLE]\n if person.push_device_id == push_id))\n except StopIteration:\n return None\n\n def repeat(\n self,\n message: str,\n interval: int,\n *,\n title: str = None,\n when: Union[datetime, None] = None,\n target: Union[str, list, None] = None,\n data: Union[dict, None] = None,\n blackout_start_time: str = BLACKOUT_START,\n blackout_end_time: str = BLACKOUT_END) -> Callable:\n \"\"\"Send a repeating notification to one or more targets.\"\"\"\n return self.dispatch(\n Notification(\n NotificationTypes.repeating,\n message,\n title=title,\n blackout_end_time=blackout_end_time,\n blackout_start_time=blackout_start_time,\n data=data,\n interval=interval,\n target=target,\n when=when))\n\n def send(\n self,\n message: str,\n *,\n title: str = None,\n when: Union[datetime, None] = None,\n target: Union[str, list, None] = None,\n data: Union[dict, None] = None,\n blackout_start_time: str = BLACKOUT_START,\n blackout_end_time: str = BLACKOUT_END) -> Callable:\n \"\"\"Send a notification to one or more targets.\"\"\"\n return self.dispatch(\n Notification(\n NotificationTypes.single,\n message,\n title=title,\n blackout_end_time=blackout_end_time,\n blackout_start_time=blackout_start_time,\n data=data,\n target=target,\n when=when))\n","sub_path":"appdaemon/settings/apps/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4149744","text":"import re\n\nimport requests\n\n\nIP_ADDR = re.compile(r'^([0-9]+\\.){3}[0-9]+$')\n\nURL = 'https://api.ipify.org'\n\n\ndef get_current_ipaddr() -> str:\n \"\"\"Retrieves the current public IP address using ipify.\n\n Returns:\n str: the IP address\n\n Raises:\n APIResponseError: if an invalid response was detected\n\n \"\"\"\n response = requests.get(URL).text\n if IP_ADDR.match(response):\n return response\n else:\n raise APIResponseError(response)\n\n\nclass APIResponseError(ValueError):\n \"\"\"The *ipify* API did not return a valid IP address.\"\"\"\n def __init__(self, response) -> None:\n super().__init__(\n f'{URL} did not return a valid IP address. '\n f'Response: {response}'\n )\n","sub_path":"ipaddr.py","file_name":"ipaddr.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"592844618","text":"'''\r\n@author: PUM\r\n'''\r\nfrom cloud_providers.basecloudprovider import BaseCloudProvider\r\nimport boto.ec2\r\nimport time\r\nimport deployment\r\nimport datetime\r\nimport log\r\nimport re\r\nfrom boto.exception import EC2ResponseError\r\n\r\nlogger = log.setup_logger(__name__)\r\n\r\nre_url_app = re.compile(\"{{deploy_url_app}}\", re.IGNORECASE)\r\nre_parameter = re.compile(\"{{deploy_parameter}}\", re.IGNORECASE)\r\nre_shutdown = re.compile(\"{{deploy_shutdown}}\", re.IGNORECASE)\r\n\r\nclass Ec2CloudProvider(BaseCloudProvider):\r\n def __init__(self, access_key=None, secret_key=None, region='eu-west-1'):\r\n ''' If no aws-credentials specified, boto will use aws-cred. defined in environment vars or config files. See http://boto.readthedocs.org/en/latest/boto_config_tut.html '''\r\n self.access_key = access_key\r\n self.secret_key = secret_key\r\n self.region = region\r\n\r\n # Boto treats an empty string as an passed credential. To workaround a potential wrong database entry, make sure we use None in this case\r\n if self.access_key == '': self.access_key = None\r\n if self.secret_key == '': self.secret_key = None\r\n\r\n self.conn = None\r\n\r\n\r\n def _get_connection(self):\r\n if self.conn is None:\r\n self.conn = boto.ec2.connect_to_region(self.region,\r\n aws_access_key_id = self.access_key,\r\n aws_secret_access_key = self.secret_key\r\n )\r\n\r\n return self.conn\r\n\r\n def take_snapshot_after_shutdown(self, inst, snapshot_name, func_finished=None):\r\n while inst.state != 'running':\r\n logger.debug(\"Waiting for instance '%s' to enter running state\" % inst.id)\r\n time.sleep(10)\r\n inst.update()\r\n\r\n while inst.state != 'stopped':\r\n logger.debug(\"Waiting for instance '%s' to enter stopped state\" % inst.id)\r\n time.sleep(10)\r\n inst.update()\r\n\r\n logger.debug(\"Instance '%s' entered stopped stated -> taking snapshot '%s'\" % (inst.id, snapshot_name))\r\n\r\n snapshot_id, image_id = self.instance_to_snapshot(inst.id, snapshot_name)\r\n\r\n logger.debug(\"Created snapshot '%s' of instance '%s' -> terminating the instance now\" % (snapshot_name, inst.id))\r\n inst.terminate()\r\n\r\n if func_finished is not None:\r\n func_finished(snapshot_id, image_id)\r\n\r\n\r\n def _get_init_script(self, deploy_url_app, deploy_parameter, deploy_shutdown=True):\r\n init_script = open(\"cloud_providers/scripts/ec2init.sh\", \"r\").read()\r\n\r\n # Replace deployment parameters, use regex to have a case insensitive replace\r\n init_script = re_url_app.sub(deploy_url_app, init_script)\r\n init_script = re_parameter.sub(deploy_parameter, init_script)\r\n\r\n if deploy_shutdown:\r\n init_script = re_shutdown.sub(str(1), init_script)\r\n else:\r\n init_script = re_shutdown.sub(str(0), init_script)\r\n\r\n return init_script\r\n\r\n def instance_to_snapshot(self, instance_id, snapshot_name, description=\"\"):\r\n ''' Returns (snapshot_id, image_id) '''\r\n def take_snapshot(instance_id, snapshot_name, description=\"\"):\r\n conn = self._get_connection()\r\n\r\n image_id = conn.create_image(instance_id, snapshot_name, description)\r\n snapshot_id = None\r\n\r\n # Try to get the snapshot id from the image. This takes a while, so try multiple times\r\n for _ in range(5):\r\n if snapshot_id is None:\r\n snapshot_id = self.get_snapshot_from_image(image_id)\r\n\r\n time.sleep(5)\r\n else:\r\n break\r\n\r\n return snapshot_id, image_id\r\n\r\n # Make sure we actually create a snapshot and don't get a duplicate snapshot_name exception\r\n try:\r\n snapshot_id, image_id = take_snapshot(instance_id, snapshot_name, description)\r\n except EC2ResponseError as exc:\r\n if \"InvalidAMIName.Duplicate\" in str(exc):\r\n # In case the snapshot_name is already in use, we automatically append the current datetime to the snapshot_name\r\n logger.error(\"Snapshot creation failed, trying to append current datetime. Exception: '%s'\" % str(exc))\r\n snapshot_id, image_id = self.instance_to_snapshot(instance_id, snapshot_name + datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\"), description)\r\n else:\r\n logger.error(\"Snapshot creation failed with '%s'\" % str(exc))\r\n snapshot_id, image_id = None, None\r\n\r\n return snapshot_id, image_id\r\n\r\n def get_snapshot_from_image(self, image_id):\r\n conn = self._get_connection()\r\n\r\n try:\r\n image = conn.get_image(image_id)\r\n\r\n for _, block_obj in image.block_device_mapping.items():\r\n if block_obj.snapshot_id is not None and len(block_obj.snapshot_id) > 0:\r\n return block_obj.snapshot_id\r\n except:\r\n pass\r\n\r\n return None\r\n\r\n def initial_deployment(self, ami, key_name, security_group_ids, subnet_id, instance_type, deploy_shutdown, virtual_device_layer, test_layer, deploy_parameter=\"\", **kwargs):\r\n logger.debug(\"Starting deployment\")\r\n\r\n # Create archive with deploy.json file\r\n deployment.create_deploy_json_file(virtual_device_layer, test_layer)\r\n url_app = deployment.selfdeploy_to_s3()\r\n deployment.remove_deploy_json_file()\r\n\r\n init_script = self._get_init_script(url_app, deploy_parameter, deploy_shutdown)\r\n\r\n logger.debug(\"Starting deployment instance\")\r\n instances = self.launch_instance(ami=ami, key_name=key_name, security_group_ids=security_group_ids, subnet_id=subnet_id,instance_type=instance_type, user_data=init_script, **kwargs)\r\n\r\n return instances\r\n\r\n\r\n def launch_instance(self, ami, key_name, security_group_ids, subnet_id, instance_type, user_data=\"\", **kwargs):\r\n conn = self._get_connection()\r\n\r\n reservation = conn.run_instances(image_id=ami, key_name=key_name, security_group_ids=security_group_ids, subnet_id=subnet_id, instance_type=instance_type, user_data=user_data, **kwargs)\r\n return reservation.instances\r\n\r\n\r\n def start_instance(self, instance_id):\r\n conn = self._get_connection()\r\n\r\n logger.debug(\"Starting instance '%s'\" % instance_id)\r\n inst = conn.get_only_instances(instance_ids=[instance_id])\r\n inst[0].start()\r\n\r\n return True\r\n\r\n def stop_instance(self, instance_id):\r\n conn = self._get_connection()\r\n\r\n logger.debug(\"Stopping instance '%s'\" % instance_id)\r\n conn.stop_instances(instance_ids=[instance_id])\r\n\r\n return True\r\n\r\n def terminate_instance(self, instance_id):\r\n conn = self._get_connection()\r\n\r\n logger.debug(\"Terminating instance '%s'\" % instance_id)\r\n conn.terminate_instances(instance_ids=[instance_id])\r\n\r\n return True\r\n\r\n def instance_status(self, instance_id):\r\n conn = self._get_connection()\r\n\r\n logger.debug(\"Getting status of instance '%s'\" % instance_id)\r\n\r\n instances = conn.get_only_instances(instance_ids=[instance_id])\r\n\r\n try:\r\n return instances[0].state\r\n except:\r\n return None\r\n\r\n def remove_snapshot(self, snapshot_id):\r\n conn = self._get_connection()\r\n\r\n logger.debug(\"Removing snapshot '%s'\" % snapshot_id)\r\n\r\n try:\r\n snapshots = conn.get_all_snapshots(snapshot_ids=[snapshot_id])\r\n snapshot = snapshots[0]\r\n\r\n snapshot.delete()\r\n except (IndexError, EC2ResponseError):\r\n return False\r\n\r\n return True\r\n\r\n def remove_image(self, image_id):\r\n conn = self._get_connection()\r\n\r\n logger.debug(\"Removing image '%s'\" % image_id)\r\n\r\n try:\r\n images = conn.get_all_images(image_ids=[image_id])\r\n image = images[0]\r\n\r\n image.deregister()\r\n except (IndexError, EC2ResponseError):\r\n return False\r\n\r\n return True\r\n","sub_path":"cloud_providers/ec2provider.py","file_name":"ec2provider.py","file_ext":"py","file_size_in_byte":8196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"28427487","text":"import subprocess\nimport re\nimport os\nimport time\nimport sys\n\n\nc = \"cd /bin && ls | grep -i telnet\"\np = subprocess.Popen(c, shell=True, stdout=subprocess.PIPE)\noutput, err = p.communicate()\nn = str(output)\nproc = re.findall(\"telnet\", n)\nos.system(\"clear && date\")\n\nfor i in proc:\n if i == \"telnet\":\n print(\"telnet found, initializing...\")\n time.sleep(1)\n import subprocess_linux_telnet as slt\n\n # initializes the telnet connection.\n if __name__ == '__main__':\n slt.init()\n break\n\nelse:\n\n print(\"Seems like you currently have no telnet installed...\\n\")\n with input(\"Would you like to install it? \").lower() as ui:\n if ui == \"yes\" or \"y\":\n os.system(\"sudo apt-get install telnet && exit\")\n print(\"installation complete\")\n time.sleep(1)\n print(\"initializing telnet...\")\n time.sleep(2)\n os.system(\"clear && date\")\n import subprocess_linux_telnet as slt\n\n # initializes the telnet connection.\n if __name__ == '__main__':\n slt.init()\n else:\n sys.exit(0)\n","sub_path":"opt_black/itc.py","file_name":"itc.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"418407231","text":"import logging\nimport os\nimport random\nimport time\nimport threading\n\nfrom .timer import Timer\nfrom .download import Download\nfrom .xbee import XBee\nfrom .database import Database\n\nclass DataStationHandler(object):\n \"\"\"Communication handler for data stations (XBee station wakeup and SFTP download)\n\n This class manages downstream interfacing between payload and data\n station.\n\n SFTP Download:\n Each download is spawned as a worker thread to isolate the effect\n of failure in case of unexpected socket exceptions.\n\n XBee Wakeup:\n When the UAV arrives at a data station, the station is woken up with\n an XBee RF signal including its data station ID ('123', '200', etc.)\n\n \"\"\"\n\n def __init__(self, _connection_timeout_millis, _read_write_timeout_millis,\n _overall_timeout_millis, _rx_queue):\n\n self.connection_timeout_millis = _connection_timeout_millis\n self.read_write_timeout_millis = _read_write_timeout_millis\n self.overall_timeout_millis = _overall_timeout_millis\n self.rx_queue = _rx_queue\n self.xbee = XBee()\n self.db = Database()\n self._alive = True\n self.flight_id = None # Will be created before the flight's first download\n\n def connect(self):\n self.xbee.connect()\n\n def run(self, rx_lock, is_downloading):\n \"\"\"Loop forever and handle downloads as data stations are reached\"\"\"\n\n while self._alive:\n if not self.rx_queue.empty(): # You've got mail!\n self._wake_download_and_sleep(rx_lock, is_downloading)\n else:\n time.sleep(1) # Check RX queue again in 1 second\n\n logging.error(\"Data station handler terminated\")\n\n def stop(self):\n logging.info(\"Stopping data station handler...\")\n self._alive = False\n\n def _wake_download_and_sleep(self, rx_lock, is_downloading):\n\n # Update system status (used by heartbeat)\n is_downloading.set()\n\n # Get data station ID as message from rx_queue\n rx_lock.acquire()\n data_station_id = self.rx_queue.get().strip() # Removes invisible characters\n rx_lock.release()\n\n # Only add a flight when a data station is actually downloaded\n if self.flight_id == None:\n self.flight_id = self.db.insert_new_flight()\n\n self.db.insert_data_station(data_station_id)\n\n self.db.add_station_to_flight(data_station_id, self.flight_id)\n\n # Add the station to flights_stations table to pair with flight with percent 0.\n self._redownload_request = False # [ get redownload status from database for this ID ]\n\n logging.info('Data station arrival: %s', data_station_id)\n\n # Wake up data station\n logging.info('Waking up over XBee...')\n self.xbee.send_command(data_station_id, 'POWER_ON')\n\n xbee_wake_command_timer = Timer()\n wakeup_successful = True\n\n wakeup_timeout_s = self.db.get_timeout('wakeup')*60\n logging.debug(\"Wakeup timeout: %s s\", wakeup_timeout_s)\n\n if not (os.getenv('TESTING') == 'True'):\n while not self.xbee.acknowledge(data_station_id, 'POWER_ON'):\n wakeup_time_s = xbee_wake_command_timer.time_elapsed()\n logging.debug(\"POWER_ON data station %s\", data_station_id)\n self.xbee.send_command(data_station_id, 'POWER_ON')\n time.sleep(1) # Try again in 1.5s --> this gives 2-3 attempts in 5s listening window\n\n # Will try shutting down data station over XBee for 2 min before moving on\n if xbee_wake_command_timer.time_elapsed() > wakeup_timeout_s:\n wakeup_successful = False\n logging.error(\"POWER_ON command ACK failure. Moving on...\")\n break\n\n logging.debug(\"Total wakeup time: %s\", wakeup_time_s)\n\n did_connect = False\n did_find_device = False\n total_files = 0\n successful_downloads = 0\n download_speed_mbps = 0\n total_data_downloaded_mb = 0\n connection_time_s = 0\n download_time_s = 0\n\n # Don't actually download\n if (os.getenv('TESTING') == 'True'):\n r = random.randint(10,20)\n\n logging.debug('Simulating download for %i seconds', r)\n time.sleep(r) # \"Download\" for random time between 10 and 100 seconds\n\n # Only try download if wakeup was successful\n elif (wakeup_successful): # This is the real world (ahhh!)\n # '.local' ensures visibility on the network\n\n logging.info('XBee ACK received, beginning download...')\n\n redownload_request = self.db.get_redownload_request(data_station_id)\n timeout_event = threading.Event()\n download_over = threading.Event()\n\n connection_timeout_s = self.db.get_timeout('connection')*60\n\n download_worker = Download(data_station_id.strip()+'.local',\n redownload_request,\n self.flight_id,\n connection_timeout_s,\n timeout_event,\n download_over)\n\n try:\n # This throws an error if the connection times out\n download_worker.start()\n\n # Attempt to join the thread after timeout.\n # If still alive the download timed out.\n download_timeout_s = self.db.get_timeout('download')*60\n logging.debug(\"Download timeout: %s s\", download_timeout_s)\n\n download_worker.join(download_timeout_s)\n\n timeout_event.set()\n\n # Waits (at most 10s) for download_worker to unset this Event\n # signalling that the download has gracefully shut down\n download_over.wait(10)\n download_over.clear()\n\n did_connect = download_worker.did_connect\n did_find_device = download_worker.did_find_device\n successful_downloads = download_worker.successful_downloads\n total_files = download_worker.total_files\n download_speed_mbps = download_worker.download_speed_mbps\n total_data_downloaded_mb = download_worker.total_data_downloaded_mb\n connection_time_s = download_worker.connection_time_s\n download_time_s = download_worker.download_time_s\n\n if download_worker.is_alive():\n logging.info(\"Download timeout: Download cancelled\")\n else:\n logging.info(\"Download complete\")\n\n logging.debug(\"Total download time: %s\", download_time_s)\n\n except Exception as e:\n logging.error(e)\n\n # Wake up data station\n logging.info('Shutting down data station %s...', data_station_id)\n self.xbee.send_command(data_station_id, 'POWER_OFF')\n\n xbee_sleep_command_timer = Timer()\n shutdown_successful = True\n\n # Edge case where no wakeup happened, we don't want shutdown to be shown as successful\n if (wakeup_successful == False): shutdown_successful = False\n\n shutdown_timeout_s = self.db.get_timeout('shutdown')*60\n logging.debug(\"Shutdown timeout: %s s\", shutdown_timeout_s)\n\n # If the data station actually turned on and we're not in test mode, shut it down\n if not (os.getenv('TESTING') == 'True') and (wakeup_successful == True):\n while not self.xbee.acknowledge(data_station_id, 'POWER_OFF'):\n logging.debug(\"POWER_OFF data station %s\", data_station_id)\n self.xbee.send_command(data_station_id, 'POWER_OFF')\n time.sleep(1) # Try again in 0.5s\n\n # Will try shutting down data station over XBee for 60 seconds before moving on\n if xbee_sleep_command_timer.time_elapsed() > shutdown_timeout_s:\n logging.error(\"POWER_OFF command ACK failure. Moving on...\")\n shutdown_successful = False\n break\n\n shutdown_time_s = xbee_sleep_command_timer.time_elapsed()\n logging.debug(\"Total shutdown time: %s\", shutdown_time_s)\n\n self.db.update_flight_station_stats(data_station_id,\n self.flight_id,\n successful_downloads,\n total_files,\n wakeup_successful,\n did_connect,\n did_find_device,\n shutdown_successful,\n total_data_downloaded_mb,\n download_speed_mbps,\n wakeup_time_s,\n connection_time_s,\n download_time_s,\n shutdown_time_s)\n # Mark task as complete, even if it fails\n self.rx_queue.task_done()\n\n # Update system status (for heartbeat)\n is_downloading.clear() # Analagous to is_downloading = False\n","sub_path":"avionics/services/data_station_handler/data_station_handler.py","file_name":"data_station_handler.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"123479611","text":"#!/usr/bin/python3\n\nwith open(\"input\", \"r\") as f:\n directions = f.read().strip()\n\npos = [0,0]\ngifted = set()\ngifted.add((0,0))\n\nfor d in directions:\n if d == \"^\":\n pos = [pos[0], pos[1]+1]\n elif d == \">\":\n pos = [pos[0]+1, pos[1]]\n elif d == \"v\":\n pos = [pos[0], pos[1]-1]\n elif d == \"<\":\n pos = [pos[0]-1, pos[1]]\n\n gifted.add(tuple(pos))\n\nprint(len(gifted))\n","sub_path":"2015/day3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"367280846","text":"# -*- coding: utf-8 -*-\nimport logging\nimport re\nimport requests\n\nfrom urlparse import urlparse\n\nlog = logging.getLogger(__name__)\n\nclass AntiZapret(object):\n PAC_URL = \"http://antizapret.prostovpn.org/proxy.pac\"\n \n def __init__(self):\n self.az_proxy = None\n self.loaded = False\n\n def __getstate__(self):\n self.ensure_loaded()\n return self.__dict__\n \n def ensure_loaded(self):\n if not self.loaded:\n self.load()\n\n def load(self):\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}\n res = requests.get(self.PAC_URL, headers=headers)\n res.raise_for_status()\n except requests.exceptions.RequestException as e:\n log.warn(\"Coldn't load PAC: %s\" % e)\n return\n data = res.content\n proxy = {}\n r = re.search(r'\"PROXY (.*?);', data)\n if r:\n proxy['http'] = r.group(1)\n r = re.search(r'\"HTTPS (.*?);', data)\n if r:\n proxy['https'] = r.group(1)\n self.az_proxy = proxy\n self.loaded = True\n\n def get_proxy_list(self):\n self.ensure_loaded()\n return self.az_proxy","sub_path":"resources/lib/support/antizapret.py","file_name":"antizapret.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"199343652","text":"import click\nimport click_log\nimport Consumer\nimport ConsumerThread\nimport csv\nimport logging\nimport LogPrinter\nimport Producer\nimport ProducerThread\n\nimport Queue\nimport random\nimport threading\nimport time\nimport sys\n\n\nfrom boto.s3.connection import S3Connection\nfrom write_to_s3 import write_file_to_s3\nfrom config_logging import configure_logging\n\nconfigure_logging(__name__)\n\n@click.command()\n@click.argument('csv_file', type=click.File('rU'))\n@click.argument('bucket_name', type=click.STRING)\n@click.option('-p', '--max-producers', default=3, help='The maximum number of producers to run')\n@click.option('-c', '--max-consumers', default=5, help='The maximum number of consumers to run')\n@click.option('-q', '--max-queue-size', default=32, help='The maximum number of items the producers can put in the queue before pausing to let the consumers catch up')\n@click.option('-l', '--max-log-queue-size', default=1000, help='The maximum number of items to log in the print queue')\n@click_log.simple_verbosity_option()\n@click_log.init(__name__)\ndef cli(csv_file, bucket_name, max_producers, max_consumers, max_queue_size, max_log_queue_size):\n \"\"\"This is an experimental script for reading a file and writing it to S3\n \"\"\"\n logging.info('Reading input file {filename} and writing to S3 bucket {bucket_name}'.format(filename=csv_file.name, bucket_name=bucket_name))\n\n work_queue = Queue.Queue(maxsize=max_queue_size)\n log_queue = Queue.Queue(maxsize=max_log_queue_size)\n\n producers=[]\n for x in range(0, max_producers):\n producer = Producer.Producer(csv_file)\n producers.append(ProducerThread.ProducerThread(name='producer-{number}'.format(number=x), queue=work_queue, producer=producer))\n producers[x].start()\n time.sleep(1)\n\n logger=LogPrinter.LogPrinter(log_name='Consumer-log')\n logger_thread=ConsumerThread.ConsumerThread(name='logger-thread', queue=log_queue, consumer=logger)\n logger_thread.start()\n time.sleep(1)\n\n consumers=[]\n for x in range(0, max_consumers):\n consumer = Consumer.Consumer(log_queue=log_queue)\n consumers.append(ConsumerThread.ConsumerThread(name='consumer-{number}'.format(number=x), queue=work_queue, consumer=consumer))\n consumers[x].start()\n time.sleep(1)\n\n while True:\n if len(producers) > 0:\n for producer in producers:\n if not producer.running:\n logging.info(\"Sending kill signal to {name}\".format(name=producer.name))\n producer.join()\n producers.remove(producer)\n time.sleep(5)\n else:\n break\n\n while True:\n if work_queue.qsize() > 0:\n time.sleep(5)\n else:\n for consumer in consumers:\n logging.info(\"Sending kill signal to {name}\".format(name=consumer.name))\n consumer.stop()\n consumer.join()\n logging.info(\"Sending kill signal to LogPrinter\")\n logger_thread.stop()\n logger_thread.join()\n exit(0)\n\nif __name__ == '__main__':\n cli(sys.argv[1:])\n # conn = S3Connection()\n #\n # # Check if the bucket exists. If not exit 1\n # if not conn.lookup(bucket_name):\n # logging.error(\"Bucket {bucket_name} does NOT exist\".format(bucket_name=bucket_name))\n # exit(1)\n #\n # bucket = conn.get_bucket(bucket_name)\n #\n # field_names = ['version', 'field', 'type', 'description', 'file']\n #\n # with csv_file as open_file:\n # csv_reader = csv.DictReader(open_file, dialect=csv.excel, fieldnames=field_names)\n # for row in csv_reader:\n # stripped_filename = 'data/{filename}'.format(filename=row['file'].strip())\n #\n # try:\n # q.put((stripped_filename, bucket))\n # # write_file_to_s3(stripped_filename, bucket)\n # except IOError:\n # logging.error('Error writing file {filename}'.format(filename=stripped_filename))","sub_path":"examples/mytest/mytest.py","file_name":"mytest.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"211855600","text":"from utils.data_structure_utils import Queue, Stack\nfrom expression.Function import Sin, Asin, Cos\nfrom expression.Variable import Variable\nfrom expression.Value import Value\nfrom collections import defaultdict\n\n\ndef simplify(expr):\n previous = {}\n\n queue = Queue()\n queue.push(expr)\n\n while not queue.is_empty():\n expr = queue.pop()\n\n transformations = expr.get_transformations()\n print(expr)\n print(len(transformations))\n if len(transformations) == 0:\n return expr\n for new_expr in transformations:\n if new_expr not in previous:\n previous[new_expr] = expr\n queue.push(new_expr)\n\n return previous\n\ndef simplify2(expr):\n blacklist = set()\n while True:\n ts = expr.get_transformations()\n n = next((e for e in ts if e not in blacklist), expr)\n if n == expr:\n return expr\n expr = n\n blacklist.add(expr)\n print(expr)\n\n\nif __name__ == '__main__':\n x = Variable('x')\n expr = ((Value(2) * (x^2) + (Value(3) * Sin(x))) // x) * (x + 1)\n simp = simplify2(expr)\n print(simp)\n","sub_path":"expression/simplifier/GraphSimplifier.py","file_name":"GraphSimplifier.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"226944331","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import leastsq\nfrom scipy.integrate import odeint, solve_ivp\n\nfrom dataManagement import data\n\nfrom numpy import exp, linspace, pi, random, sign, sin\nfrom lmfit import Parameters, fit_report, minimize\n\n'''\nWe have implemented the SEIRD model, a variation of classical SIR model.\ndS/dt = - (beta/N) * S *I\ndE/dT = (beta/N) * S *I - alpha * E\ndI/dt = alpha * E - 1/T * I \ndR/dT = (1 - f)/T * I\ndD/dt = f/T * I \n\nbeta = infection rate\nalpha = incubation rate\nT = average infectious period\ngamma = 1/T\nepsilon = fraction of all removed individuals who die\n'''\n\n#total population of Emilia Romagna\nN = 4400000\n\n\"\"\"\n In questa fase decidiamo come procedere con il modello: \n - totalDays: sono i giorni totali di cui si posseggono dati osservati\n A questo punto suddivido i giorni totali in due parti per dividere le stime: \n - daysIteration sono i giorni che prenderò in considerazione per iterazioni successive\n - daysFirstIteration sono i giorni che predispongo per la prima iterazione, necessaria per una stima iniziale\n\"\"\"\n\ntotalDays = 200\ndaysIteration = 170\ndaysFirstIteration = totalDays - daysIteration\n\n\"\"\"\n Definisco il deltaT per la time discretization, l'ampiezza degli intervalli su cui vado a ricalcolare la minimizzazione.\n Da questa derivo il numero totale degli intervalli fornito dalla divisione troncata del numero di giorni adibiti a questa analisi \n (daysIteration) e l'ampiezza dell'intervallo\n \"\"\"\ndeltaT = 10\nmindelta = 0\nnumbersOfInterval = daysIteration // deltaT\n\n\"\"\"\nInizializzo i vettori in cui andrò a memorizzare tutti i parametri nei singoli intervalli \n\\\\TODO: REFACTORING HERE IS REQUIRED\n\"\"\"\n\nbetaEstimated = []\nalphaEstimated = []\nepsilonEstimated = []\ngammaEstimated = []\nroEstimated = []\n\n\ndef betaFunction(t,ro):\n if(t < daysFirstIteration):\n position = 0\n else:\n position = int(((t - daysFirstIteration)//deltaT))\n tk = daysFirstIteration + deltaT*(position -1)\n result= betaEstimated[position]*(1 - ro*(t - tk)/t)\n return result\n\n\n#definisco il modello SEIRD\ndef odeModel(z, t, beta, alpha, gamma, epsilon):\n S, E, I, R, D = z\n\n dSdt = -beta*S*I/N\n dEdt = beta*S*I/N - alpha*E\n dIdt = alpha*E - gamma*I\n dRdt = gamma*I * (1 - epsilon)\n dDdt = gamma * I * epsilon\n\n return [dSdt, dEdt, dIdt, dRdt, dDdt]\n\n\ndef fodeModel(z, t, ro, alpha, gamma, epsilon):\n S, E, I, R, D = z\n\n dSdt = -betaFunction(t,ro)*S*I/N\n dEdt = betaFunction(t,ro)*S*I/N - alpha*E\n dIdt = alpha*E - gamma*I\n dRdt = gamma*I * (1 - epsilon)\n dDdt = gamma * I * epsilon\n\n return [dSdt, dEdt, dIdt, dRdt, dDdt]\n\n\n#deifnisco il solver di EQ differeziali: utilizzo la funzione odeint che si basa su algotitmo LSODA (a passo variabile)\ndef odeSolver(t, initial_conditions, params):\n initE, initI, initR, initD = initial_conditions\n initS = N - initE - initI - initR - initD\n beta = params['beta']\n alpha = params['alpha']\n gamma = params['gamma']\n epsilon = params['epsilon']\n\n res = odeint(odeModel, [initS, initE, initI, initR, initD], t, args=(beta, alpha, gamma, epsilon))\n return res\n\n\n\ndef fodeSolver(t, initial_conditions, params):\n initE, initI, initR, initD = initial_conditions\n initS = N - initE - initI - initR - initD\n ro = params['ro']\n alpha = params['alpha']\n gamma = params['gamma']\n epsilon = params['epsilon']\n\n res = odeint(fodeModel, [initS, initE, initI, initR, initD], t, args=(ro, alpha, gamma, epsilon))\n return res\n\n#Definisco la funzione \"error\" deve essere minimizzata. Questa funzione contiene la differenza tra il valore valocato dal modello ed i dati effettivi\ndef error(params, initial_conditions, tspan, data, timek, timek_1):\n sol = odeSolver(tspan, initial_conditions, params)\n # vengono paragonate le colonne (infetti, guariti, e deceduti), vengono ignorati i Suscettibili e Esposti\n return (sol[:, 2:5] - data[timek:timek_1]).ravel()\n\ndef errorRO(ro, iteration, initial_conditions, tspan, data, timek, timek_1):\n sol = fodeSolver(tspan, initial_conditions, [ro, alphaEstimated[iteration], gammaEstimated[iteration], epsilonEstimated[iteration]])\n return (sol[:, 2:5] - data[timek:timek_1]).ravel()\n\nif __name__ == \"__main__\":\n#Prendo i valori iniziali degli infetti, dei recovered e dei deceduti direttamente dal database della protezione civile\n\n\n initI = data[0, 0]\n initE = initI*10\n #initE = initI*10\n initR = data[0, 1]\n initD = data[0, 2]\n\n #Setto i parametri iniziali\n T = 20\n gamma = 1 / T\n #gamma = 0.44\n #alpha = 1 / 3.2\n alpha = 0.52\n #epsilon = 0.7\n epsilon = 0.16\n #beta = 0.077\n beta = 0.1\n R0 = beta*T\n\n #I giorni totali sono in realtà 208 (in dpc-emilia), prendiamo i primi 200\n\n #daysFirstIteration = 200\n\n initial_conditions = [initE, initI, initR, initD]\n\n #Creo un vettore da 0 a daysFirstIteration (time discretization)\n tspan = np.arange(0, daysFirstIteration, 1)\n\n parametersToOptimize = Parameters()\n parametersToOptimize.add('beta', beta, min=0.1, max=0.3)\n parametersToOptimize.add('alpha', alpha)\n parametersToOptimize.add('gamma', gamma, min=0.04, max=0.05)\n parametersToOptimize.add('epsilon', epsilon)\n\n \"\"\" \n Avvio la prima stima di parametri sulla primo range di parametri (da 0 a daysFirstIteration)\n \"\"\"\n result = minimize(error, parametersToOptimize, args=(initial_conditions, tspan, data, 0, daysFirstIteration))\n beta0 = result.params['beta'].value\n alpha0 = result.params['alpha'].value\n epsilon0 = result.params['epsilon'].value\n gamma0 = result.params['gamma'].value\n\n \"\"\"\n Salvo i parametri nelle mie liste\n \"\"\"\n\n ro0 = 0.9\n #beta0, alpha0, gamma0, epsilon0 = result[\"\"]\n betaEstimated.append(beta0)\n alphaEstimated.append(alpha0)\n epsilonEstimated.append(epsilon0)\n gammaEstimated.append(gamma0)\n roEstimated.append(ro0)\n\n parametersToOptimize.add('beta', betaEstimated[0], min=0.1, max=0.3)\n parametersToOptimize.add('alpha', alphaEstimated[0])\n parametersToOptimize.add('gamma', gammaEstimated[0], min=0.04, max=0.05)\n parametersToOptimize.add('epsilon', epsilonEstimated[0])\n \n model_init = odeSolver(tspan, initial_conditions, parametersToOptimize)\n\n \n indexInit = totalDays - daysIteration - 1\n\n totalModelInfected = []\n totalModelRecovered = []\n totalModelDeath = []\n\n totalModelInfected[0:daysFirstIteration] = model_init[:, 2]\n totalModelRecovered[0:daysFirstIteration] = model_init[:, 3]\n totalModelDeath[0:daysFirstIteration] = model_init[:, 4]\n\n\n for i in range(0, numbersOfInterval):\n\n timek = totalDays - daysIteration + deltaT*i\n timek_analysis = timek - mindelta\n\n timek_1 = totalDays - daysIteration + deltaT*(i+1)\n timek_1_analysis = timek_1\n\n\n\n tspank = np.arange(timek_analysis, timek_1_analysis, 1)\n tspank_model = np.arange(timek, timek_1, 1)\n\n\n esposti_k = data[timek_analysis, 0]*10\n\n initial_conditions_k = [esposti_k, data[timek_analysis, 0], data[timek_analysis, 1], data[timek_analysis, 2]]\n\n parametersToOptimize.add('beta', betaEstimated[i], min=0.1, max=0.3)\n parametersToOptimize.add('alpha', alphaEstimated[i])\n parametersToOptimize.add('gamma', gammaEstimated[i], min=0.04, max=0.05)\n parametersToOptimize.add('epsilon', epsilonEstimated[i])\n #resultIteration = leastsq(error, np.asarray([beta, alpha, gamma, epsilon]), args=(initial_conditions_k, tspank, data, timek_analysis, timek_1_analysis))\n resultIteration = minimize(error, parametersToOptimize, args=(initial_conditions_k, tspank, data, timek_analysis, timek_1_analysis))\n\n betak = resultIteration.params['beta'].value\n alphak = resultIteration.params['alpha'].value\n epsilonk = resultIteration.params['epsilon'].value\n gammak = resultIteration.params['gamma'].value\n\n\n betaEstimated.append(betak)\n alphaEstimated.append(alphak)\n epsilonEstimated.append(epsilonk)\n gammaEstimated.append(gammak)\n\n parametersToOptimize.add('beta', betaEstimated[i+1], min=0.1, max=0.3)\n parametersToOptimize.add('alpha', alphaEstimated[i+1])\n parametersToOptimize.add('gamma', gammaEstimated[i+1], min=0.04, max=0.05)\n parametersToOptimize.add('epsilon', epsilonEstimated[i+1])\n #Calcolo del modello di ampiezza della k_esima iterazione\n modelk = odeSolver(tspank_model, initial_conditions_k, parametersToOptimize)\n\n #Salvaggio dei dati relativi alla finestra temporale pari a deltaT\n totalModelInfected[timek:timek_1] = modelk[:, 2]\n totalModelRecovered[timek:timek_1] = modelk[:, 3]\n totalModelDeath[timek:timek_1] = modelk[:, 4]\n\n datapoints = daysFirstIteration + deltaT*numbersOfInterval\n tspanfinal = np.arange(0, datapoints, 1)\n tspanparemeter = np.arange(totalDays-daysIteration, 200,deltaT)\n\n plt.plot()\n #ro0 = 0.9\n\n\n #Plot initial Valued Model\n #plt.plot(tspan, I, label=\"Infected (Model)\")\n #plt.plot(tspan, R, label=\"Recovered (Model)\")\n #plt.plot(tspan, D, label=\"Death (Model)\")\n\n #Plot Model with estimated parameters\n #print(totalModel)\n plt.plot(tspanfinal, totalModelInfected[:], label=\"Infected (Model 2)\")\n plt.plot(tspanfinal, totalModelRecovered[:], label=\"Recovered (Model 2)\")\n plt.plot(tspanfinal, totalModelDeath[:], label=\"Death(Model 2)\")\n\n #plt.plot(tspan, E1, label=\"Exposed (Model 2 )\")\n #plt.plot(tspan, I1, label=\"Infected (Model)\")\n #plt.plot(tspan, R1, label=\"Recovered (Model)\")\n #plt.plot(tspan, D1, label=\"Death (Model)\")\n\n #Plot Obeserved Value\n\n plt.plot(tspanfinal, data[0:datapoints, 0], label=\"Infected(Observed)\")\n plt.plot(tspanfinal, data[0:datapoints, 1], label=\"Recovered (Observed)\")\n plt.plot(tspanfinal, data[0:datapoints, 2], label=\"Death (Observed)\")\n\n\n plt.legend()\n plt.show()\n","sub_path":"optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":10007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"408010523","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator\nfrom django.contrib.contenttypes import generic\n\nfrom smart_selects.db_fields import ChainedForeignKey\n\nfrom apps.adverts.choices import *\nfrom apps.pages.models import Brand, Series, Model, Modification\nfrom apps.geo.models import Country, City\nfrom apps.images.models import Image\nfrom apps.images.holders import ImageHolder\n\nfrom djangosphinx.models import SphinxSearch\n\nclass Advert(models.Model, ImageHolder):\n #search = SphinxSearch() # optional: defaults to db_table\n\n INT_LIMIT = 999999999\n\n # СЛУЖЕБНОЕ\n user = models.ForeignKey(User)\n date_added = models.DateTimeField(auto_now_add=True, verbose_name=u'Дата публикации', blank=True, null=True)\n\n published = models.BooleanField(verbose_name=u'Опубликовано', default=False)\n banned = models.BooleanField(verbose_name=u'Заблокировано', default=False)\n has_images = models.BooleanField(verbose_name=u'Есть изображение', default=False)\n # certification = models.BooleanField(default=False, verbose_name=u'Сертифицирован')\n\n # ОСНОВНОЕ: Автомобиль\n brand = models.ForeignKey(Brand, verbose_name=u'Марка' ).extra(group=('general',))\n series = ChainedForeignKey(Series, verbose_name=u'Серия', chained_field=\"brand\", chained_model_field=\"brand\" ).extra(group=('general',))\n model = ChainedForeignKey(Model, verbose_name=u'Модель', chained_field=\"series\", chained_model_field=\"series\").extra(group=('general',))\n modification = ChainedForeignKey(Modification, verbose_name=u'Модификация', chained_field=\"model\", chained_model_field=\"model\" ).extra(group=('general',))\n\n # ОСНОВНОЕ: Числовые поля\n price = models.IntegerField(verbose_name=u'Цена (руб.)', validators=[MaxValueValidator(INT_LIMIT)] ).extra(group=('general',))\n run = models.IntegerField(verbose_name=u'Пробег (км)', validators=[MaxValueValidator(INT_LIMIT)] ).extra(group=('general', 'default_details'))\n engine_volume = models.IntegerField(verbose_name=u'Объём двигателя (см куб.)', validators=[MaxValueValidator(INT_LIMIT)] ).extra(group=('general', 'default_details'))\n engine_power = models.IntegerField(verbose_name=u'Мощность (л. с.)', validators=[MaxValueValidator(INT_LIMIT)] ).extra(group=('general', 'default_details'))\n vin = models.IntegerField(verbose_name=u'VIN', validators=[MaxValueValidator(INT_LIMIT)], blank=True, null=True).extra(group=('general', 'default_details'))\n\n # ОСНОВНОЕ: Селекты\n year = models.IntegerField(verbose_name=u'Год выпуска', choices=YEAR, max_length=4).extra(group=('general', 'default_details', 'extra_select'))\n color = models.IntegerField(verbose_name=u'Цвет', choices=COLOR, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n body = models.IntegerField(verbose_name=u'Тип кусозва', choices=BODY, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n engine = models.IntegerField(verbose_name=u'Тип двигателя', choices=ENGINE, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n drive = models.IntegerField(verbose_name=u'Привод', choices=DRIVE, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n transmission = models.IntegerField(verbose_name=u'Трансмиссия', choices=TRANSMISSION, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n wheel_position = models.IntegerField(verbose_name=u'Руль', choices=WHEEL_POSITION, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n state = models.IntegerField(verbose_name=u'Состояние', choices=STATE, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n custom_state = models.IntegerField(verbose_name=u'Таможня', choices=CUSTOM_STATE, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n owner_count = models.IntegerField(verbose_name=u'Хозяев по ПТС', choices=OWNER_COUNT, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n exchange = models.IntegerField(verbose_name=u'Обмен', choices=EXCHANGE, max_length=2).extra(group=('general', 'default_details', 'extra_select'))\n\n # ОСНОВНОЕ: Описание\n text = models.TextField(blank=True, verbose_name=u'Описание').extra(group=('general', 'default_details'))\n\n # КОНТАКТЫ: Продавец\n contact_name = models.CharField(verbose_name=u'Контактное лицо', max_length=255).extra(group='contacts')\n phone = models.CharField(verbose_name=u'Телефон', max_length=255).extra(group='contacts')\n\n # КОНТАКТЫ: Место осмотра\n country = models.ForeignKey(Country, verbose_name=u'Страна', null=True ).extra(group='contacts')\n city = ChainedForeignKey(City, verbose_name=u'Город', null=True, chained_field=\"country\", chained_model_field=\"country\").extra(group='contacts')\n\n # КОМПЛЕКТАЦИЯ: Селекты\n conditioner = models.IntegerField(verbose_name=u'Кондиционер', choices=CONDITIONER, max_length=2, blank=True, null=True).extra(group='complect')\n multimedia = models.IntegerField(verbose_name=u'Мультимедия', choices=MULTIMEDIA, max_length=2, blank=True, null=True).extra(group='complect')\n pillows = models.IntegerField(verbose_name=u'Подушки безопасности', choices=PILLOWS, max_length=2, blank=True, null=True).extra(group='complect')\n driver_seat_control = models.IntegerField(verbose_name=u'Рег-ка сиденья водителя', choices=SEAT_CONTROL, max_length=2, blank=True, null=True).extra(group='complect')\n passenger_seat_control = models.IntegerField(verbose_name=u'Рег-ка сиденья пассажира', choices=SEAT_CONTROL, max_length=2, blank=True, null=True).extra(group='complect')\n wheel_control = models.IntegerField(verbose_name=u'Рег-ка Руля', choices=WHEEL_CONTROL, max_length=2, blank=True, null=True).extra(group='complect')\n interior_fabric = models.IntegerField(verbose_name=u'Материал салона', choices=INTERIOR_FABRIC, max_length=2, blank=True, null=True).extra(group='complect')\n wheel_gain = models.IntegerField(verbose_name=u'Усилитель руля', choices=WHEEL_GAIN, max_length=2, blank=True, null=True).extra(group='complect')\n interior_color = models.IntegerField(verbose_name=u'Цвет салона', choices=INTERIOR_COLOR, max_length=2, blank=True, null=True).extra(group='complect')\n powered = models.IntegerField(verbose_name=u'Электростеклоподъемники', choices=POWERED, max_length=2, blank=True, null=True).extra(group='complect')\n\n # КОМПЛЕКТАЦИЯ: Флаги\n brakes_abs = models.BooleanField(verbose_name=u'Антиблокировоч. сист. (ABS)', default=False).extra(group='complect')\n traction_control = models.BooleanField(verbose_name=u'Антипробуксовочная система ', default=False).extra(group='complect')\n board_computer = models.BooleanField(verbose_name=u'Бортовой компьютер', default=False).extra(group='complect')\n gbo = models.BooleanField(verbose_name=u'Газобаллон. оборуд. (ГБО)', default=False).extra(group='complect')\n rain_sensor = models.BooleanField(verbose_name=u'Датчик дождя', default=False).extra(group='complect')\n light_sensor = models.BooleanField(verbose_name=u'Датчик света', default=False).extra(group='complect')\n cruise_control = models.BooleanField(verbose_name=u'Круиз-контроль', default=False).extra(group='complect')\n xenon_headlights = models.BooleanField(verbose_name=u'Ксеноновые фары', default=False).extra(group='complect')\n alloy_wheels = models.BooleanField(verbose_name=u'Легкосплавные диски', default=False).extra(group='complect')\n hatch = models.BooleanField(verbose_name=u'Люк', default=False).extra(group='complect')\n heated_mirrors = models.BooleanField(verbose_name=u'Обогрев зеркал', default=False).extra(group='complect')\n seat_heating = models.BooleanField(verbose_name=u'Обогрев сидений', default=False).extra(group='complect')\n headlamp_washer = models.BooleanField(verbose_name=u'Омыватель фар', default=False).extra(group='complect')\n security_system = models.BooleanField(verbose_name=u'Охранная система', default=False).extra(group='complect')\n parktronic = models.BooleanField(verbose_name=u'Парктроник', default=False).extra(group='complect')\n heading_hold_system = models.BooleanField(verbose_name=u'Сист. курсовой стабилизации', default=False).extra(group='complect')\n tinted_windows = models.BooleanField(verbose_name=u'Тонированные стекла', default=False).extra(group='complect')\n central_locking = models.BooleanField(verbose_name=u'Центральный замок', default=False).extra(group='complect')\n electromirrors = models.BooleanField(verbose_name=u'Электрозеркала', default=False).extra(group='complect')\n\n images = generic.GenericRelation(Image)\n\n def get_fields(self, group_name):\n \"\"\" Используется в шаблонах для вывода полей в циклах.\n - Добавляет к field свойство value, хранящее значение поля.\n - В случае селекта, значение получается через get_FIELD_display()\"\"\"\n for field in self._meta.fields + self._meta.many_to_many:\n if group_name in field.group:\n if field.choices:\n field.value = eval('self.get_'+field.name+'_display()') # Ну неужели нельзя по-другому?\n else:\n field.value = getattr(self, field.name)\n yield field\n\n def has_complect_fields(self):\n \"\"\"Используется в шаблонах для проверки наличия хоть чего-то имеющегося в комплектации, чтобы не выводить html-обёртку\n т.к. False в комлектации - тоже значение.\"\"\"\n for a in self.get_fields('complect'):\n if a.value:\n return True\n return False\n\n # TODO: Доделать наконец\n # search = SphinxSearch(weights={\n # 'price': 100,\n # 'run': 30\n # })\n\n # TODO: Узна��ь как взять все обязательные поля, и только тогда дать возможность к публикации вместо того что используется в\n # TODO: advert.views.publish_ad","sub_path":"apps/adverts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"310487304","text":"# 题目:对10个数进行排序。\n# 程序分析:可以利用选择法,即从后9个比较过程中,选择一个最小的与第一个元素交换,下次类推,即用第二个元素与后8个进行比较,并进行交换。\na = []\nfor n in range(1, 10):\n i = int(input(\"输入数字\"))\n a.append(i)\nprint(a)\nfor j in range(1, 9):\n for k in range(0, j):\n if a[j] < a[k]:\n x = a[j]\n a[j] = a[k]\n a[k] = x\nprint(a)\n","sub_path":"习题037.py","file_name":"习题037.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583109918","text":"import os\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport plotly\nimport pandas as pd\n\napp = dash.Dash()\n# This is the varialtion which updates on refresh\n\n\n# This function will get data from extraction\n# For now, let's have a 2D array, index of arrai is que\n# Array[0] is for time/date of solved que, Array[1] for complexity\n# Lets generate a random data for now.\n\n# we will use just -> team_data = extraxt()\nfrom random import randint\nran_comp = ['logn', 'n', 'nlogn', 'n^2', 'n^3']\nteam_data = [[randint(1, 20) for i in range(15)], [ran_comp[randint(0, 4)] for i in range(15)]]\nteam_data2 = [[randint(1, 20) for i in range(15)], [ran_comp[randint(0, 4)] for i in range(15)]]\nteam_data_que_time = team_data[0]; #for line chart\nteam_data_que_time2 = team_data2[0]\ncomp_values = [team_data[1].count(complexity) for complexity in ran_comp] #for pie chart\ncomp_values2 = [team_data2[1].count(complexity) for complexity in ran_comp]\n# random team_data generation block end\n\n\n\n\n\n#https://community.plotly.com/t/two-graphs-side-by-side/5312\n#link to troubleshoot problems in display of multiple charts in 1 page\n\n\ndef load_page_layout():\n # Again we will just use team_data = extract()\n team_data = [[randint(1, 20) for i in range(15)], [ran_comp[randint(0, 4)] for i in range(15)]]\n team_data2 = [[randint(1, 20) for i in range(15)], [ran_comp[randint(0, 4)] for i in range(15)]]\n team_data_que_time = team_data[0]; #for line chart\n team_data_que_time2 = team_data2[0]\n comp_values = [team_data[1].count(complexity) for complexity in ran_comp] #for pie chart\n comp_values2 = [team_data2[1].count(complexity) for complexity in ran_comp]\n # random team_data generation block end\n\n print(team_data[0])\n return html.Div([\n html.Div([\n html.H1(\n children='XYZ data',\n style={\n 'textAlign': 'center',\n 'color': 'black',\n }\n ),\n html.Div([\n html.Label('display-value'),\n dcc.Dropdown(\n id='dropdown',\n options=[\n {'label': 'Whole Team', 'value': 'TM'},\n {'label': 'Team Member 1', 'value': 'TM1'},\n {'label': 'Team Member 2', 'value': 'TM2'},\n {'label': 'Team Member 3', 'value': 'TM3'},\n {'label': 'Team Member 4', 'value': 'TM4'},\n ],\n value='TM'\n ),\n html.Div(id='display-value')\n ],\n style={\n 'textAlign': 'center',\n 'color': 'black',\n }\n ),\n html.Div([\n dcc.Graph(\n id='que_solved',\n figure={\n 'data': [\n {'y' : team_data_que_time, 'type' : 'line'},\n ],\n 'layout': go.Layout(\n xaxis={'title' : 'Number of questions'},\n yaxis={'title' : 'Days'},\n hovermode='closest'\n )\n }\n )\n ], className = \"row\"),\n html.Div([\n dcc.Graph(\n id='que_solved_pie',\n figure={\n 'data': [\n {'labels' : ran_comp, 'values' : comp_values, 'type' : 'pie'},\n ],\n 'layout': {\n }\n }\n )\n ], className = \"row\"),\n ])\n ])\n'''\napp.layout = html.Div([\n dcc.Graph(\n id='que_solved_pie',\n figure={\n 'data': [\n {'labels' : ran_comp, 'values' : [5, 3, 8, 15, 4], 'type' : 'pie'},\n ],\n 'layout': {\n }\n }\n )\n])\n'''\n\napp.layout = load_page_layout\n\napp.css.append_css({\n 'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'\n})\n\n# Below code is for interactivity. \n\n'''\n@app.callback(dash.dependencies.Output('que_solved', 'figure'),\n [dash.dependencies.Input('dropdown', 'value')])\ndef update_line(value):\n fig = plotly.subplots.make_subplots()\n #print(\"----------\" + str(type(value)))\n if(value == 'TM'):\n fig={\n 'data': [\n {'y' : team_data_que_time, 'type' : 'line'},\n ],\n 'layout': go.Layout(\n xaxis={'title' : 'Number of questions'},\n yaxis={'title' : 'Days'},\n hovermode='closest'\n )\n }\n else:\n fig={\n 'data': [\n {'y' : team_data_que_time2, 'type' : 'line'},\n ],\n 'layout': go.Layout(\n xaxis={'title' : 'Number of questions'},\n yaxis={'title' : 'Days'},\n hovermode='closest'\n )\n }\n\n return fig\n\n@app.callback(dash.dependencies.Output('que_solved_pie', 'figure'),\n [dash.dependencies.Input('dropdown', 'value')])\ndef update_pie(value):\n fig = plotly.subplots.make_subplots()\n #print(\"----------\" + str(type(value)))\n if(value == 'TM'):\n fig={\n 'data': [\n {'labels' : ran_comp, 'values' : comp_values, 'type' : 'pie'},\n ],\n 'layout': {\n }\n }\n else:\n fig={\n 'data': [\n {'labels' : ran_comp, 'values' : comp_values2, 'type' : 'pie'},\n ],\n 'layout': {\n }\n }\n\n return fig\n'''\nif __name__ == '__main__':\n app.run_server()\n","sub_path":"layouts_helperA.py","file_name":"layouts_helperA.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"31401177","text":"class Animals:\n weight = 0\n eyes = 2\n\n def weight_animal(self, amount):\n self.weight += amount\n\nclass Mammals(Animals):\n hooves = 4\n\n def Toilet(self):\n self.weight -= 5\n\nclass Birds(Animals):\n wings = 2\n flying = False\n\n def fly(self, boolean):\n self.flying = boolean\n\nclass Cows(Mammals):\n\n def voice_cow(self):\n print(\"moooooow\")\n\nclass Goats(Mammals):\n\n def voice_goat(self):\n print(\"BBBeeeeeee\")\n\nclass Sheeps(Mammals):\n \n def voice_sheep(self):\n print(\"BBBeeeebbbbee\")\n\n\nclass Pigs(Mammals):\n\n def voice_pig(self):\n print(\"hrruuuuu\")\n\nclass Ducks(Birds):\n\n def voice_duck(self):\n print(\"kkkkkrrrrreeeeeee\")\n\nclass Chikens(Birds):\n\n def voice_chiken(self):\n print(\"cccooooddddccccooodaaaaa\")\n\nclass Geese(Birds):\n\n def voice_gees(self):\n print(\"ggooooogggoooooo\")\n\npig1 = Pigs()\nprint(\"вес свиньи:\",pig1.weight)\npig1.weight_animal(100)\nprint(\"вес свиньи:\",pig1.weight)\npig1.voice_pig()\npig1.Toilet()\nprint(\"вес свиньи:\",pig1.weight)\n\ngoat1 = Goats()\nprint(\"вес козла:\",goat1.weight)\nprint(\"Сколько копыт у козла\",Goats.hooves,\"А сколько глаз у козла?\",Goats.eyes)\ngoat1.weight_animal(20)\nprint(\"вес козла:\",goat1.weight)\ngoat1.voice_goat()\n\ngees1 = Geese()\ngees1.voice_gees()\nprint(\"Гусь в небе?\",gees1.flying)\ngees1.fly(True)\nprint(\"Гусь в небе?\",gees1.flying,\"Сколько глаз у гуся?\",gees1.eyes)\ngees1.weight_animal(10)\nprint(\"Сколько гусь весит?\",gees1.weight)\n\nchiken1 = Chikens()\nchiken1.voice_chiken()\nprint(\"Курица в небе?\",chiken1.flying)\nchiken1.fly(True)\nprint(\"Курица в небе?\",chiken1.flying)\nprint(\"Сколько у курицы крыльев?\",chiken1.wings)","sub_path":"Loader.py","file_name":"Loader.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"421634960","text":"\"\"\"\nImplementation of a Modbus Client Using Twisted\n--------------------------------------------------\n\nExample run::\n\n from twisted.internet import reactor, protocol\n from resource.pymodbus.client.async import ModbusClientProtocol\n\n def printResult(result):\n print \"Result: %d\" % result.bits[0]\n\n def process(client):\n result = client.write_coil(1, True)\n result.addCallback(printResult)\n reactor.callLater(1, reactor.stop)\n\n defer = protocol.ClientCreator(reactor, ModbusClientProtocol\n ).connectTCP(\"localhost\", 502)\n defer.addCallback(process)\n\nAnother example::\n\n from twisted.internet import reactor\n from resource.pymodbus.client.async import ModbusClientFactory\n\n def process():\n factory = reactor.connectTCP(\"localhost\", 502, ModbusClientFactory())\n reactor.stop()\n\n if __name__ == \"__main__\":\n reactor.callLater(1, process)\n reactor.run()\n\"\"\"\nfrom twisted.internet import defer, protocol\nfrom resource.pymodbus.factory import ClientDecoder\nfrom resource.pymodbus.exceptions import ConnectionException\nfrom resource.pymodbus.transaction import ModbusSocketFramer\nfrom resource.pymodbus.transaction import FifoTransactionManager\nfrom resource.pymodbus.transaction import DictTransactionManager\nfrom resource.pymodbus.client.common import ModbusClientMixin\nfrom twisted.python.failure import Failure\n\n#---------------------------------------------------------------------------#\n# Logging\n#---------------------------------------------------------------------------#\nimport logging\n_logger = logging.getLogger(__name__)\n\n\n#---------------------------------------------------------------------------#\n# Connected Client Protocols\n#---------------------------------------------------------------------------#\nclass ModbusClientProtocol(protocol.Protocol, ModbusClientMixin):\n '''\n This represents the base modbus client protocol. All the application\n layer code is deferred to a higher level wrapper.\n '''\n\n def __init__(self, framer=None, **kwargs):\n ''' Initializes the framer module\n\n :param framer: The framer to use for the protocol\n '''\n self._connected = False\n self.framer = framer or ModbusSocketFramer(ClientDecoder())\n if isinstance(self.framer, ModbusSocketFramer):\n self.transaction = DictTransactionManager(self, **kwargs)\n else: self.transaction = FifoTransactionManager(self, **kwargs)\n\n def connectionMade(self):\n ''' Called upon a successful client connection.\n '''\n _logger.debug(\"Client connected to modbus server\")\n self._connected = True\n\n def connectionLost(self, reason):\n ''' Called upon a client disconnect\n\n :param reason: The reason for the disconnect\n '''\n _logger.debug(\"Client disconnected from modbus server: %s\" % reason)\n self._connected = False\n for tid in self.transaction:\n self.transaction.getTransaction(tid).errback(Failure(\n ConnectionException('Connection lost during request')))\n\n def dataReceived(self, data):\n ''' Get response, check for valid message, decode result\n\n :param data: The data returned from the server\n '''\n self.framer.processIncomingPacket(data, self._handleResponse)\n\n def execute(self, request):\n ''' Starts the producer to send the next request to\n consumer.write(Frame(request))\n '''\n request.transaction_id = self.transaction.getNextTID()\n packet = self.framer.buildPacket(request)\n self.transport.write(packet)\n return self._buildResponse(request.transaction_id)\n\n def _handleResponse(self, reply):\n ''' Handle the processed response and link to correct deferred\n\n :param reply: The reply to process\n '''\n if reply is not None:\n tid = reply.transaction_id\n handler = self.transaction.getTransaction(tid)\n if handler:\n handler.callback(reply)\n else: _logger.debug(\"Unrequested message: \" + str(reply))\n\n def _buildResponse(self, tid):\n ''' Helper method to return a deferred response\n for the current request.\n\n :param tid: The transaction identifier for this response\n :returns: A defer linked to the latest request\n '''\n if not self._connected:\n return defer.fail(Failure(\n ConnectionException('Client is not connected')))\n\n d = defer.Deferred()\n self.transaction.addTransaction(d, tid)\n return d\n\n #----------------------------------------------------------------------#\n # Extra Functions\n #----------------------------------------------------------------------#\n #if send_failed:\n # if self.retry > 0:\n # deferLater(clock, self.delay, send, message)\n # self.retry -= 1\n\n\n#---------------------------------------------------------------------------#\n# Not Connected Client Protocol\n#---------------------------------------------------------------------------#\nclass ModbusUdpClientProtocol(protocol.DatagramProtocol, ModbusClientMixin):\n '''\n This represents the base modbus client protocol. All the application\n layer code is deferred to a higher level wrapper.\n '''\n\n def __init__(self, framer=None, **kwargs):\n ''' Initializes the framer module\n\n :param framer: The framer to use for the protocol\n '''\n self.framer = framer or ModbusSocketFramer(ClientDecoder())\n if isinstance(self.framer, ModbusSocketFramer):\n self.transaction = DictTransactionManager(self, **kwargs)\n else: self.transaction = FifoTransactionManager(self, **kwargs)\n\n def datagramReceived(self, data, params):\n ''' Get response, check for valid message, decode result\n\n :param data: The data returned from the server\n :param params: The host parameters sending the datagram\n '''\n _logger.debug(\"Datagram from: %s:%d\" % params)\n self.framer.processIncomingPacket(data, self._handleResponse)\n\n def execute(self, request):\n ''' Starts the producer to send the next request to\n consumer.write(Frame(request))\n '''\n request.transaction_id = self.transaction.getNextTID()\n packet = self.framer.buildPacket(request)\n self.transport.write(packet)\n return self._buildResponse(request.transaction_id)\n\n def _handleResponse(self, reply):\n ''' Handle the processed response and link to correct deferred\n\n :param reply: The reply to process\n '''\n if reply is not None:\n tid = reply.transaction_id\n handler = self.transaction.getTransaction(tid)\n if handler:\n handler.callback(reply)\n else: _logger.debug(\"Unrequested message: \" + str(reply))\n\n def _buildResponse(self, tid):\n ''' Helper method to return a deferred response\n for the current request.\n\n :param tid: The transaction identifier for this response\n :returns: A defer linked to the latest request\n '''\n d = defer.Deferred()\n self.transaction.addTransaction(d, tid)\n return d\n\n\n#---------------------------------------------------------------------------#\n# Client Factories\n#---------------------------------------------------------------------------#\nclass ModbusClientFactory(protocol.ReconnectingClientFactory):\n ''' Simple client protocol factory '''\n\n protocol = ModbusClientProtocol\n\n#---------------------------------------------------------------------------#\n# Exported symbols\n#---------------------------------------------------------------------------#\n__all__ = [\n \"ModbusClientProtocol\", \"ModbusUdpClientProtocol\",\n \"ModbusClientFactory\",\n]\n","sub_path":"resource/pymodbus/client/async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"287521953","text":"import shutil\nfrom filecmp import cmpfiles, dircmp\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\nimport pytest\nfrom typer.testing import CliRunner\n\nfrom openapi_python_client.cli import app\n\n\ndef _compare_directories(\n record: Path,\n test_subject: Path,\n expected_differences: Optional[Dict[str, str]] = None,\n):\n first_printable = record.relative_to(Path.cwd())\n second_printable = test_subject.relative_to(Path.cwd())\n dc = dircmp(record, test_subject)\n missing_files = dc.left_only + dc.right_only\n if missing_files:\n pytest.fail(f\"{first_printable} or {second_printable} was missing: {missing_files}\", pytrace=False)\n\n expected_differences = expected_differences or {}\n _, mismatch, errors = cmpfiles(record, test_subject, dc.common_files, shallow=False)\n mismatch = set(mismatch)\n\n for file_name in mismatch | set(expected_differences.keys()):\n if file_name not in expected_differences:\n continue\n if file_name not in mismatch:\n pytest.fail(f\"Expected {file_name} to be different but it was not\", pytrace=False)\n generated = (test_subject / file_name).read_text()\n assert generated == expected_differences[file_name], f\"Unexpected output in {file_name}\"\n del expected_differences[file_name]\n mismatch.remove(file_name)\n\n if mismatch:\n pytest.fail(\n f\"{first_printable} and {second_printable} had differing files: {mismatch}, and errors {errors}\",\n pytrace=False,\n )\n\n for sub_path in dc.common_dirs:\n _compare_directories(record / sub_path, test_subject / sub_path, expected_differences=expected_differences)\n\n\ndef run_e2e_test(extra_args=None, expected_differences=None):\n runner = CliRunner()\n openapi_path = Path(__file__).parent / \"openapi.json\"\n config_path = Path(__file__).parent / \"config.yml\"\n gr_path = Path(__file__).parent / \"golden-record\"\n output_path = Path.cwd() / \"my-test-api-client\"\n shutil.rmtree(output_path, ignore_errors=True)\n\n args = [\"generate\", f\"--config={config_path}\", f\"--path={openapi_path}\"]\n if extra_args:\n args.extend(extra_args)\n result = runner.invoke(app, args)\n\n if result.exit_code != 0:\n raise result.exception\n _compare_directories(gr_path, output_path, expected_differences=expected_differences)\n\n import mypy.api\n\n out, err, status = mypy.api.run([str(output_path), \"--strict\"])\n assert status == 0, f\"Type checking client failed: {out}\"\n\n shutil.rmtree(output_path)\n\n\ndef test_end_to_end():\n run_e2e_test()\n\n\ndef test_custom_templates():\n run_e2e_test(\n extra_args=[\"--custom-template-path=end_to_end_tests/test_custom_templates\"],\n expected_differences={\"README.md\": \"my-test-api-client\"},\n )\n","sub_path":"end_to_end_tests/test_end_to_end.py","file_name":"test_end_to_end.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"456046707","text":"from . import base\n\n\nclass Migration(base.BaseMigration):\n \"\"\"\n Create invite_link_temp table\n \"\"\"\n table_name = \"invite_link_temp\"\n\n forwards_query = f\"\"\"\n ALTER TABLE actor DROP COLUMN IF EXISTS private_initial_key; \n \"\"\"\n\n backwards_query = f\"\"\"\"\"\"\n","sub_path":"auth_perms/core/migrations/012_delete_private_initial_key_column.py","file_name":"012_delete_private_initial_key_column.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"639295504","text":"import logging\nlogger = logging.getLogger(\"autoPwn.fuzzers.AFL\")\n\nimport os\nimport subprocess\nimport shlex\nfrom prettytable import PrettyTable\nfrom . import *\nfrom ..Config import global_config as GlobalConfig\n\ntry:\n import tracer, angr, simuvex, fuzzer\nexcept:\n logger.error(\"Unable to find required angr/mechaphish libraries. Make sure mechaphish is installed.\")\n exit(1)\n\n# Just to be sure...\ntry:\n input = raw_input\nexcept:\n pass\n\nAFL_ROOT = \"/home/angr/.virtualenvs/angr/bin/afl-unix/\"\n\nclass AFL(Fuzzer):\n\n def __init__(self, bininfo):\n \n self.fuzzer = None\n self.target = GlobalConfig.target\n self.target_args = GlobalConfig.arguments\n self.work_dir = GlobalConfig.work_dir\n self.threads = GlobalConfig.threads\n\n # Use QEMU or not?\n self.qemu = not bininfo.afl\n self.dictionary = None\n\n if GlobalConfig.args.disable_odr_violations:\n os.environ['ASAN_OPTIONS'] = 'abort_on_error=1:symbolize=0:detect_odr_violation=0'\n\n #########\n # Calls #\n #########\n # Implement these in your class!\n\n def alive(self):\n \"\"\"bool: Is the fuzzer alive and running?\"\"\"\n return self.fuzzer.alive\n\n def stats(self):\n \"\"\"str: Return string representing stats of AFL fuzzing.\"\"\"\n # afl_version\n\n table = PrettyTable([\" \",\"bitmap\",\"cycles\",\"execs\",\"pfavs\",\"tfavs\",\"crash\",\"hang\"])\n table.border = False # Border only takes up space!\n\n fuzzer_stats = self.fuzzer.stats\n \n # Each fuzzer instance is a row\n for fuzzerName in sorted(fuzzer_stats):\n fuzzerInstance = fuzzer_stats[fuzzerName]\n \n table.add_row([\n fuzzerName,\n fuzzerInstance['bitmap_cvg'],\n fuzzerInstance['cycles_done'],\n fuzzerInstance['execs_done'],\n fuzzerInstance['pending_favs'],\n fuzzerInstance['paths_favored'],\n fuzzerInstance['unique_crashes'],\n fuzzerInstance['unique_hangs'],\n ])\n \n return str(table)\n\n def start(self):\n \"\"\"Start the fuzzer.\"\"\"\n self.fuzzer.start()\n\n def kill(self):\n \"\"\"Kill the fuzzer.\"\"\"\n if self.fuzzer.alive:\n self.fuzzer.kill()\n self.fuzzer = None # Do i need to kill it like this?\n\n def get_paths(self):\n return self.fuzzer.queue()\n\n def get_bitmap(self):\n \"\"\"Return AFL map of paths for currently known paths.\"\"\"\n return self.fuzzer.bitmap()\n\n def pollenate(self, paths):\n \"\"\"pollenate the fuzzer with new seeds.\"\"\"\n self.fuzzer.pollenate(paths)\n\n def set_dictionary(self, dictionary):\n \"\"\"Sets the dictionary for this fuzzer to work with.\"\"\"\n self.dictionary = dictionary\n\n # Need to restart if we are running\n if self.fuzzer.alive:\n self.fuzzer.kill()\n self.fuzzer = None\n self.fuzzer.start()\n\n # If we're not alive, just set the variable\n else:\n self.fuzzer.dictionary = dictionary\n\n def quit(self):\n self.kill()\n exit(0)\n\n @staticmethod\n def compile_file(source, ASAN, MSAN, UBSAN):\n full_path = os.path.abspath(source)\n base = os.path.basename(full_path)\n dir = os.path.dirname(full_path)\n env = copy(os.environ)\n env['AFL_HARDEN'] = '1' # TODO: Make this an option?\n\n out_name = \"afl_\" + '.'.join(base.split(\".\")[:-1])\n\n # Guess which to use\n if base.split(\".\")[-1].lower() in [\"cpp\", \"cc\", \"C\", \"cxx\", \"c++\"]:\n clang = os.path.join(AFL_ROOT, \"afl-clang++\")\n else:\n clang = os.path.join(AFL_ROOT, \"afl-clang\")\n\n # Assuming CLang for now.\n #compile_line = \"{clang} -fsanitize=address -fsanitize=memory -fno-omit-frame-pointer -O1 -g {source} -o {out_name}\".format(source=base, out_name=out_name, clang=clang)\n compile_line = [clang,'-fno-omit-frame-pointer','-O2','-g']\n\n # These are exclusive\n if ASAN:\n env['AFL_USE_ASAN'] = \"1\"\n #compile_line.append('-fsanitize=address')\n elif MSAN:\n env['AFL_USE_MSAN'] = \"1\"\n #compile_line.append('-fsanitize=memory')\n #compile_line.append('-fsanitize-memory-track-origins')\n \n # This apparently might cause issues with AFL\n #if UBSAN:\n # compile_line.append('-fsanitize=undefined')\n\n compile_line.append('-o')\n compile_line.append(out_name)\n compile_line.append(base)\n\n subprocess.check_output(compile_line, cwd=dir, env=env)\n\n # Return the name of the new file\n return os.path.join(dir, out_name)\n\n @staticmethod\n def compile_make(command, ASAN, MSAN, UBSAN):\n env = copy(os.environ)\n env['AFL_HARDEN'] = '1' # TODO: Make this an option?\n env['CC'] = os.path.join(AFL_ROOT, \"afl-clang\")\n env['CXX'] = os.path.join(AFL_ROOT, \"afl-clang++\")\n env['CFLAGS'] = \"-fno-omit-frame-pointer -O2 -g\"\n env['CXXFLAGS'] = \"-fno-omit-frame-pointer -O2 -g\"\n\n # These are exclusive\n if ASAN:\n env['AFL_USE_ASAN'] = \"1\"\n elif MSAN:\n env['AFL_USE_MSAN'] = \"1\"\n\n subprocess.call(command, env=env, shell=True)\n\n ##############\n # Properties #\n ##############\n\n @property\n def fuzzer(self):\n \"\"\"The fuzzer instance. Automatically created if it was set to None.\"\"\"\n\n if self.__fuzzer is None:\n self.__fuzzer = fuzzer.Fuzzer(self.target, self.work_dir, afl_count=self.threads, qemu=self.qemu, target_opts=self.target_args, memory=\"none\")\n self.__fuzzer.dictionary = self.dictionary\n\n return self.__fuzzer\n\n @fuzzer.setter\n def fuzzer(self, fuzzer):\n self.__fuzzer = fuzzer\n\n @property\n def status(self):\n \"\"\"int: Return the status of the fuzzer.\"\"\"\n raise Exception(\"Not implemented.\")\n\n @property\n def qemu(self):\n \"\"\"bool: To use QEMU mode for AFL fuzzing.\"\"\"\n return self.__qemu\n\n @qemu.setter\n def qemu(self, qemu):\n assert type(qemu) is bool, \"Invalid type for qemu of '{}'\".format(type(qemu))\n self.__qemu = qemu\n\n @property\n def dictionary(self):\n \"\"\"str: Full path to dictionary for AFL to use.\"\"\"\n return self.__dictionary\n\n @dictionary.setter\n def dictionary(self, dictionary):\n # Santiy check. Don't try to set a path that doesn't exist\n if type(dictionary) is str and not os.path.exists(dictionary):\n logger.error(\"Dictionary doesn't exist! Not setting.\")\n self.__dictionary = None\n\n else:\n self.__dictionary = dictionary\n","sub_path":"autoPwn/fuzzers/AFL.py","file_name":"AFL.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"196971642","text":"class Solution:\n def canPartition(self, nums):\n \"\"\"\n 动态规划,dp[i]记录的是这些数字能否组成i(True or False)\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n if sum(nums)%2 ==1:\n return False\n\n half_sum = int(sum(nums)/2)\n n = len(nums)\n dp = [False]*(half_sum+1)\n dp[0]=True\n\n for num in nums:\n for j in range(half_sum,num-1,-1):\n # dp[i]表示不取这个数,dp[i-num]表示取这个数\n dp[j] = dp[j] or dp[j-num]\n return dp[half_sum]\n\nprint(Solution().canPartition(nums=[1,5,11,5]))","sub_path":"leetcode/0315-分割等和子集.py","file_name":"0315-分割等和子集.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"357885533","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom .models import BlogPost, Post\nfrom .forms import BlogPostForm, PostForm\n\n# Create your views here.\ndef index(request):\n \"\"\"The home page for Blog.\"\"\"\n return render(request, 'blogs/index.html')\n\ndef blogposts(request):\n \"\"\"Show all blogposts.\"\"\"\n blogposts = BlogPost.objects.order_by('date_added')\n context = {'blogposts': blogposts}\n return render(request, 'blogs/blogposts.html', context)\n\ndef blogpost(request, blogpost_id):\n \"\"\"Show a single blogpost and all its posts.\"\"\"\n blogpost = BlogPost.objects.get(id=blogpost_id)\n posts = blogpost.post_set.order_by('-date_added')\n context = {'blogpost': blogpost, 'posts': posts}\n return render(request, 'blogs/blogpost.html', context)\n\ndef new_blogpost(request):\n \"\"\"Add a new blogpost.\"\"\"\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form = BlogPostForm()\n else:\n # POST data submitted; process data.\n form = BlogPostForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('blogs:blogposts'))\n\n context = {'form': form}\n return render(request, 'blogs/new_blogpost.html', context)\n\ndef new_post(request, blogpost_id):\n \"\"\"Add a new post for a particular blogpost.\"\"\"\n blogpost = BlogPost.objects.get(id=blogpost_id)\n\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form = PostForm()\n else:\n # POST data submitted; process data.\n form = PostForm(data=request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.blogpost = blogpost\n new_post.save()\n return HttpResponseRedirect(reverse('blogs:blogpost',\n args=[blogpost_id]))\n\n context = {'blogpost': blogpost, 'form': form}\n return render(request, 'blogs/new_post.html', context)\n\ndef edit_post(request, post_id):\n \"\"\"Edit an existing post.\"\"\"\n post = Post.objects.get(id=post_id)\n blogpost = post.blogpost\n \n if request.method != 'POST':\n\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data.\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('blogs:blogpost',\n args=[blogpost.id]))\n context = {'post': post, 'blogpost': blogpost, 'form': form}\n return render(request, 'blogs/edit_post.html', context)\n","sub_path":"blogs.views.py","file_name":"blogs.views.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"623028128","text":"import scipy.special as special\n\nfrom UQpy.Distributions import Uniform\nfrom UQpy.Surrogates.PCE.Polynomials import Polynomials\n\n\nclass Legendre(Polynomials):\n \"\"\"\n Class of univariate polynomials appropriate for data generated from a\n uniform distribution.\n\n **Inputs:**\n\n * **degree** ('int'):\n Maximum degree of the polynomials.\n\n * **dist_object** ('class'):\n Distribution object of the generated samples.\n\n **Methods:**\n \"\"\"\n\n def __init__(self, degree, dist_object):\n super().__init__(dist_object, degree)\n self.degree = degree\n self.pdf = self.dist_object.pdf\n\n def get_polys(self, x):\n \"\"\"\n Calculates the normalized Legendre polynomials evaluated at sample points.\n\n **Inputs:**\n\n * **x** (`ndarray`):\n `ndarray` containing the samples.\n\n * **y** (`ndarray`):\n `ndarray` containing the samples.\n\n **Outputs:**\n\n (`list`):\n Returns a list of 'ndarrays' with the design matrix and the\n normalized polynomials.\n\n \"\"\"\n a, b = -1, 1\n m, scale = Polynomials.get_mean(self), Polynomials.scale(self)\n x_ = Polynomials.standardize_uniform(x, m, scale)\n\n uni = Uniform(a, b - a)\n pdf_st = uni.pdf\n\n p = []\n for i in range(self.degree):\n p.append(special.legendre(i, monic=False))\n\n return Polynomials.normalized(self.degree, x_, a, b, pdf_st, p)","sub_path":"src/UQpy/Surrogates/PCE/polynomials/Legendre.py","file_name":"Legendre.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"156648670","text":"#!/usr/bin/env python3\n# Emulation of encoding X bytes of semirandom data to XGMII format and then to 64b/66b format\n# Model: Queue <-> MAC <-XGMII-bus-> PCS <-PHY-bus-> PMA <-> MDI\n# XGMII encoding is 8b/9b format - 1 bit of ctrl for every 8 bits of data\n# 64b/66b encoding - 2 bits of ctrl for every 64 bits of data\n#\n## IMPORT\nfrom collections import deque\nfrom random import randint\nfrom textwrap import wrap\nfrom sys import argv\n\nfrom bitstring import BitArray\n#\n## IP Payload size\nif len(argv) > 1:\n PAYLOAD_BYTES = int(argv[1]) # bytes of IP data\nelse:\n PAYLOAD_BYTES = 64 # Change here\n#\n## CONST\nETH_L1 = {\n 'PREAMBLE': BitArray(hex='55555555555555'),\n 'SFD': BitArray(hex='d5')\n}\nETH_L2 = {\n 'DSTMAC': BitArray(hex='deadbeef4dad'),\n 'SRCMAC': BitArray(hex='badb00b4feed'),\n 'TYPE': BitArray(hex='0800'),\n 'FCS': BitArray(hex='11223344')\n}\nPAYLOAD = BitArray(bin=''.join([str(randint(0,1)) for _ in range(PAYLOAD_BYTES*8)]))\nDATASET = sum([ETH_L1['PREAMBLE'], ETH_L1['SFD'], ETH_L2['DSTMAC'], ETH_L2['SRCMAC'], ETH_L2['TYPE'], PAYLOAD, ETH_L2['FCS']])\n#\nXGMII_CTRL = {\n 'CTRL': BitArray(bin='1'),\n 'DATA': BitArray(bin='0')\n}\nXGMII_SEQ = {\n 'IDLE': BitArray(hex='ff'), # CCCC_CCCC\n 'START': BitArray(hex='c0'), # SDDD_DDDD\n 'DATA': BitArray(hex='00') # DDDD_DDDD\n}\nXGMII_CODE = {\n 'I': BitArray(hex='07'), # IDLE\n 'S': BitArray(hex='fb'), # START\n 'T': BitArray(hex='fd'), # TERMINATE\n 'E': BitArray(hex='fe') # ERROR\n}\nENC64_66_CTRL = {\n 'DATA': BitArray(bin='01'),\n 'CTRL': BitArray(bin='10')\n}\nENC64_66_CODE = {\n 'C': BitArray(hex='1e0e1c3870e1c387'), # CCCC_CCCC\n 'S': BitArray(hex='78'),# SDDD_DDDD\n 0: BitArray(hex='87'), # pos0: TCCC_CCCC\n 1: BitArray(hex='99'), # pos1: DTCC_CCCC\n 2: BitArray(hex='aa'), # pos2: DDTD_CCCC\n 3: BitArray(hex='b4'), # pos3: DDDT_CCCC\n 4: BitArray(hex='cc'), # pos4: DDDD_TCCC\n 5: BitArray(hex='d2'), # pos5: DDDD_DTCC\n 6: BitArray(hex='e1'), # pos6: DDDD_DDTC\n 7: BitArray(hex='ff'), # pos7: DDDD_DDDT\n}\nFILL = [i for i in wrap(BitArray(hex='1e0e1c3870e1c387').bin, 8)]\n## VAR\n#\nq = deque(maxlen=1500)\nxgmii_bus = []\nphy_bus = []\n#\n## CLASS\nclass QueueClass:\n def __init__(self):\n self.dataset = None\n @property\n def content(self):\n return 'Bytes in Queue: {}'.format(len(q))\n @staticmethod\n def put_data(dataset):\n for i in wrap(dataset.bin, 8):\n q.append(BitArray(bin=i))\n#\nclass MacClass:\n '''\n XGMII is 10 Gigabit Media Independant Interface\n MAC layer sends data formed in 32 bit chunks to PCS\n [XGMII32][clk312][Single data rate][10GBASE-R]\n '''\n def __init__(self):\n self._end_of_stream = False\n self._chunk1, self._chunk2, self._chunks = [], [], []\n @property\n def content(self):\n return 'XGMII bus content: {}'.format(str(xgmii_bus))\n def clear_chunks(self):\n self._chunk1.clear()\n self._chunk2.clear()\n self._chunks.clear()\n def check_queue(self):\n xgmii_bus.clear()\n self.clear_chunks()\n # if new data\n # then add S(0xfb) and x7 D\n if len(q) == len(DATASET.bin)/8:\n xgmii_bus.append(XGMII_SEQ['START']) # 0xc0 for CDDD_DDDD\n self._chunk1.append(XGMII_CODE['S'])\n for _ in range(3):\n self._chunk1.append(q.popleft())\n for _ in range(4):\n self._chunk2.append(q.popleft())\n xgmii_bus.extend([sum(self._chunk1), sum(self._chunk2)])\n return xgmii_bus\n # if there are 8 or more bytes of data\n # then add x8 D\n elif len(q) >= 8:\n # sync code is 0x00 as there are no control information\n xgmii_bus.append(XGMII_SEQ['DATA'])\n # a uniq case when FD at the end of 2nd chunk32\n if len(q) == 8:\n self._end_of_stream = True\n # simply form x2 chunk32\n for _ in range(4):\n self._chunk1.append(q.popleft())\n for _ in range(4):\n self._chunk2.append(q.popleft())\n xgmii_bus.extend([sum(self._chunk1), sum(self._chunk2)])\n return xgmii_bus\n # if less than 8 bytes but not empty\n # then take all, add T and fill with I until chunk32\n elif len(q) < 8 and q:\n # take everything left\n while q:\n self._chunks.append(BitArray(q.popleft()))\n # start forming XGMII sync code with 1 for every 8 bytes of data\n xgmii_bus.append(XGMII_CTRL['DATA']*len(self._chunks))\n # fill sync code with 0 until 8 bits\n while len(xgmii_bus[0]) < 8:\n xgmii_bus[0] += XGMII_CTRL['CTRL']\n # add T after data bytes\n self._chunks.append(XGMII_CODE['T'])\n # and fill all free space with I (0x07)\n while len(self._chunks) < 12:\n self._chunks.append(XGMII_CODE['I'])\n # split chunk64 to two chunk32\n for i in range(4):\n self._chunk1.append(self._chunks[i])\n for i in range(4,8):\n self._chunk2.append(self._chunks[i])\n xgmii_bus.extend([sum(self._chunk1), sum(self._chunk2)])\n return xgmii_bus\n # if there is no data and not because the end of stream been reached\n # then full of Idle\n elif not q and not self._end_of_stream:\n # add two 32 bits chunks of I\n xgmii_bus.append(XGMII_CTRL['CTRL']*8) # 0xff for CCCC_CCCC\n for _ in range(4):\n self._chunk1.append(XGMII_CODE['I'])\n for _ in range(4):\n self._chunk2.append(XGMII_CODE['I'])\n xgmii_bus.extend([sum(self._chunk1), sum(self._chunk2)])\n return xgmii_bus\n # if there is no data and because the end of stream been reached\n # then Terminate and x7 Idle\n # uniq case when T at the start of a new pair of chunk32\n elif not q and self._end_of_stream:\n self._end_of_stream = False\n # sync code is 0xff as there are only control info\n xgmii_bus.append(XGMII_CTRL['CTRL']*8) # 0xff for TCCC_CCCC\n self._chunk1.append(XGMII_CODE['T'])\n for _ in range(3):\n self._chunk1.append(XGMII_CODE['I'])\n for _ in range(4):\n self._chunk2.append(XGMII_CODE['I'])\n xgmii_bus.extend([sum(self._chunk1), sum(self._chunk2)])\n return xgmii_bus\n#\nclass PcsClass:\n '''\n Physical Coding Sublayer\n A receiver of XGMII encoded data from MAC, encodes to 64b/66b and sends to PMA\n '''\n def __init__(self):\n self.tx_data = None\n # self.to_proceed = []\n self.to_proceed = deque()\n self.align = deque()\n @property\n def content(self):\n return 'PHY bus content: {}'.format(str(phy_bus))\n def check_bus(self):\n self.tx_data = None\n phy_bus.clear()\n self.tx_data = xgmii_bus[1] + xgmii_bus[2]\n # if chunk1 is 07070707\n # then 10 CCCC_CCCC\n if xgmii_bus[1] == XGMII_CODE['I']*4:\n phy_bus.extend([ENC64_66_CTRL['CTRL'], ENC64_66_CODE['C']])\n # if START (0xc0)\n elif xgmii_bus[0] == XGMII_SEQ['START']:\n phy_bus.append(ENC64_66_CTRL['CTRL'])\n phy_bus.append(ENC64_66_CODE['S'] + BitArray(self.tx_data[8:]))\n # if DATA (0x00)\n elif xgmii_bus[0] == XGMII_SEQ['DATA']:\n phy_bus.append(ENC64_66_CTRL['DATA'])\n phy_bus.append(self.tx_data)\n # if pertial control\n # then have to fit bytes wisely and replace 07s\n else:\n # there might be valid 0xfd, so we have to find T from the right side\n # going thru sequence of 0x07\n phy_bus.append(ENC64_66_CTRL['CTRL'])\n # prepare bank with 1 byte chunks to make iterations easier\n for i in wrap(self.tx_data.bin, 8):\n self.align.append(BitArray(bin=i))\n counter = 7\n # cut 07s one by one until 0xFB\n while True:\n tmp = self.align.pop()\n if tmp != XGMII_CODE['I']:\n break\n counter -= 1\n # now we know the index of T, so its easy to set a correct Block Type field\n # from 64b/66b vector types\n self.to_proceed.append(ENC64_66_CODE[counter])\n # all blocks with data move to a list with awaiting code\n while self.align:\n self.to_proceed.append(self.align.popleft())\n # and populate with parts of '1e0e1c3870e1c387' according to index\n while len(self.to_proceed) < 8:\n self.to_proceed.append(BitArray(bin=FILL[len(self.to_proceed)]))\n phy_bus.append(sum(self.to_proceed))\n#\n# Testing\ndef main():\n # init queue, MAC, bus and PCS\n queue, mac, pcs = QueueClass(), MacClass(), PcsClass()\n # queue, mac, xgmii, pcs, phy = QueueClass(), MacClass(), XgmiiBusClass(), PcsClass(), PhyBusClass()\n # ensure that queue been initialized without any data\n print(queue.content)\n # Bytes in Queue: 0\n # then MAC checks a queue\n mac.check_queue()\n # but as it is empty, IDLE is generated\n print(mac.content)\n # XGMII bus content: [BitArray('0xff'), BitArray('0x07070707'), BitArray('0x07070707')]\n # IDLE code and two chunks full of 0x07\n # even if IDLE a PCS encodes it\n pcs.check_bus()\n # check what have been serialized to PCS-PCA bus\n print(pcs.content)\n # PHY bus content: [BitArray('0x10'), [BitArray('0x1e00000000000000')]]\n # Block type 1e and 7 blocks of zeroes are encoded to 64/66b format\n #\n # lets add some data to a queue (change PAYLOAD_SIZE at CONST section)\n queue.put_data(DATASET)\n #\n # process while there are a data in a queue\n for _ in range(int(len(DATASET) / 52)):\n # check current queue length\n print(queue.content)\n # mac checks a queue and process the data [code 0xN, chunk32, chunk32]\n mac.check_queue()\n # XGMII bus content\n print(mac.content)\n # PCS reads the data and encode to 64/66b format\n # pairing a \"control XGMII Code\" to a type in \"64b/66b vector types table\"\n pcs.check_bus()\n # PHY bus content\n print(pcs.content)\n#\nif __name__ == '__main__':\n main()","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":10401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"342310951","text":"import torch\nimport numpy as np\n\nnp.set_printoptions(suppress=False)\nfrom torch.autograd import grad as gradient\nfrom pandas import DataFrame\nfrom matplotlib import pyplot as plt\n\nfrom TorchAdversarial import TorchNNCore\nfrom utils import load_split\nfrom metric import Metric\n\n\nclass PreProcFlip(object):\n\tdef __init__(\n\t\tself, data, attr, method=\"FGSM\", k=10, seed=None, max_iter=1000, dR=0.1, dF=0.1\n\t):\n\t\tprint((data, attr, method, dF, dR))\n\n\t\tself._data = data\n\t\tself._attr = attr\n\t\tself._seed = seed\n\t\tself._max_iter = max_iter\n\t\tself._epsilon = 0.1\n\t\tself._dR = dR\n\t\tself._dF = dF\n\t\tself._method = method\n\t\tif self._seed is not None:\n\t\t\tnp.random.seed(self._seed)\n\t\t\ttorch.manual_seed(self._seed)\n\n\t\tself._train_np, self._test_np = load_split(data, attr)\n\n\t\tif type(k) is int:\n\t\t\tself._k = k\n\t\telif type(k) is float:\n\t\t\tself._k = np.ceil(self._train_np[\"X\"].shape[0] * k).astype(int)\n\t\telse:\n\t\t\traise ValueError(\"k must be float or int\")\n\n\t\tprint(\n\t\t\t\"Flipping %.4f%% (%d) records every epoch\"\n\t\t\t% (self._k * 100.0 / self._train_np[\"X\"].shape[0], self._k)\n\t\t)\n\n\t\tself._train = {\n\t\t\t\"X\": torch.tensor(self._train_np[\"X\"], dtype=torch.float),\n\t\t\t\"y_orig\": torch.tensor(\n\t\t\t\tself._train_np[\"y\"].reshape(-1, 1), dtype=torch.float\n\t\t\t),\n\t\t\t\"y\": torch.tensor(self._train_np[\"y\"].reshape(-1, 1), dtype=torch.float),\n\t\t\t\"s\": self._train_np[\"s\"],\n\t\t\t\"c\": torch.tensor(self._train_np[\"c\"].reshape(-1, 1), dtype=torch.float),\n\t\t}\n\n\t\tself._test = {\n\t\t\t\"X\": torch.tensor(self._test_np[\"X\"], dtype=torch.float),\n\t\t\t\"y\": torch.tensor(self._test_np[\"y\"].reshape(-1, 1), dtype=torch.float),\n\t\t}\n\n\tdef _BCELoss(self, y_pred, y, reduction=True):\n\t\tif reduction:\n\t\t\treturn -torch.mean(\n\t\t\t\ty * torch.log(0.99 * y_pred)\n\t\t\t\t+ (1.0 - y) * torch.log(1.0 - 0.99 * y_pred)\n\t\t\t)\n\t\telse:\n\t\t\treturn -(\n\t\t\t\ty * torch.log(0.99 * y_pred)\n\t\t\t\t+ (1.0 - y) * torch.log(1.0 - 0.99 * y_pred)\n\t\t\t)\n\n\tdef _DISPLoss(self, c, y_pred):\n\t\treturn torch.square(torch.sum(c * y_pred))\n\n\tdef _AccDisp(self, y, y_pred, s):\n\t\tmetric = Metric(true=y.reshape(-1).tolist(), pred=y_pred.reshape(-1).tolist())\n\t\treturn metric.accuracy(), metric.positive_disparity(s=s)\n\n\tdef _Scaler(self, a_):\n\t\ta = np.array(a_)\n\t\ta_min = a.min()\n\t\ta_max = a.max()\n\t\tif a_min == a_max:\n\t\t\treturn a\n\t\telse:\n\t\t\treturn (a - a_min) / (a_max - a_min) * 2 - 1\n\n\tdef _Hessian(self, model, X_train, y_train):\n\t\ty_pred = model(X_train)\n\t\tloss = self._BCELoss(y_pred, y_train)\n\t\tweights = model.layers[-2].weight\n\t\tbias = model.layers[-2].bias\n\t\tgrad_L_w_1 = gradient(\n\t\t\tloss, (weights, bias), retain_graph=True, create_graph=True\n\t\t)\n\t\tHessian = []\n\t\tfor i in range(0, grad_L_w_1[0].shape[1]):\n\t\t\tgrad_L_w_2 = gradient(\n\t\t\t\tgrad_L_w_1[0][0][i], (weights, bias), retain_graph=True\n\t\t\t)\n\t\t\tHessian.append(grad_L_w_2[0][0].tolist() + grad_L_w_2[1].tolist())\n\t\tgrad_L_w_2 = gradient(grad_L_w_1[1][0], (weights, bias), retain_graph=True)\n\t\tHessian.append(grad_L_w_2[0][0].tolist() + grad_L_w_2[1].tolist())\n\t\treturn np.array(Hessian)\n\n\tdef AdvExp(self, X, y=None, method=\"FGSM\", eps=0.1):\n\t\tif type(X) is torch.Tensor:\n\t\t\tX_ = X.clone().detach().requires_grad_(True)\n\t\telse:\n\t\t\tX_ = torch.tensor(X, dtype=torch.float, requires_grad=True)\n\t\tif method == \"FGSM\":\n\t\t\ty_pred = self._model(X_)\n\t\t\tif y is not None:\n\t\t\t\ty_ = torch.tensor(y.reshape(-1, 1), dtype=torch.float)\n\t\t\telse:\n\t\t\t\ty_ = torch.round(y_pred).detach()\n\t\t\tloss = self._BCELoss(y_pred, y_)\n\t\t\tnoise = eps * torch.sign(torch.autograd.grad(loss, X_)[0])\n\t\t\treturn (X_ + noise).detach()\n\t\telif method == \"PGD\":\n\t\t\tif y is not None:\n\t\t\t\ty_ = torch.tensor(y.reshape(-1, 1), dtype=torch.float)\n\t\t\telse:\n\t\t\t\ty_ = None\n\t\t\tfor i in range(0, 10):\n\t\t\t\ty_pred = self._model(X_)\n\t\t\t\tif y_ is None:\n\t\t\t\t\ty_ = torch.round(y_pred).detach()\n\t\t\t\tloss = self._BCELoss(y_pred, y_)\n\t\t\t\tnoise = (eps * 0.1) * torch.sign(torch.autograd.grad(loss, X_)[0])\n\t\t\t\tX_ = (X_ + noise).detach().requires_grad_(True)\n\t\t\treturn X_.detach()\n\n\tdef fit_transform(self, test_output=True, save=False):\n\n\t\tjudgement_set = \"train\"\n\n\t\tres = {\n\t\t\t\"setting\": \"%s_%s\" % (self._data, self._attr),\n\t\t\t\"train\": {\n\t\t\t\t\"orig\": [],\n\t\t\t\t\"attk\": [],\n\t\t\t\t\"disp\": [],\n\t\t\t},\n\t\t\t\"test\": {\n\t\t\t\t\"orig\": [],\n\t\t\t\t\"attk\": [],\n\t\t\t\t\"disp\": [],\n\t\t\t},\n\t\t\t\"iter\": [],\n\t\t\t\"valid\": True,\n\t\t}\n\n\t\tmodel = TorchNNCore(\n\t\t\tinps=self._train[\"X\"].shape[1],\n\t\t\thiddens=[128],\n\t\t\tseed=self._seed,\n\t\t\thidden_activation=torch.nn.LeakyReLU,\n\t\t)\n\t\tself._model = model\n\n\t\toptim = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-4)\n\t\tloss_func = torch.nn.BCELoss()\n\n\t\tself._train[\"X_adv\"] = None\n\n\t\tchosen_R = []\n\t\tchosen_F = []\n\t\tmax_R_value = 0.0\n\n\t\tfor it in range(0, self._max_iter):\n\n\t\t\tif self._train[\"X_adv\"] is not None:\n\t\t\t\tX_train = torch.vstack(\n\t\t\t\t\t[self._train[\"X\"], self._train[\"X_adv\"]]\n\t\t\t\t).detach()\n\t\t\t\ty_train = torch.vstack(\n\t\t\t\t\t[self._train[\"y\"], self._train[\"y\"][chosen_R]]\n\t\t\t\t).detach()\n\t\t\telse:\n\t\t\t\tX_train = self._train[\"X\"].detach()\n\t\t\t\ty_train = self._train[\"y\"].detach()\n\n\t\t\tself._X_train = X_train\n\t\t\tself._y_train = y_train\n\n\t\t\t# BEGIN: Train modified model\n\t\t\ttolerence = 10\n\t\t\tlast_loss = None\n\t\t\tfor epoch in range(0, 1000):\n\t\t\t\toptim.zero_grad()\n\t\t\t\ty_pred = model(X_train)\n\t\t\t\tloss = self._BCELoss(y_pred, y_train)\n\t\t\t\tthis_loss = loss.tolist()\n\t\t\t\tif last_loss is not None:\n\t\t\t\t\tif this_loss > last_loss or abs(last_loss - this_loss) < 1e-5:\n\t\t\t\t\t\ttolerence -= 1\n\t\t\t\t\tif tolerence == 0:\n\t\t\t\t\t\tbreak\n\t\t\t\tlast_loss = this_loss\n\t\t\t\tloss.backward()\n\t\t\t\toptim.step()\n\t\t\t# END\n\n\t\t\t# Calculate inverse of Hessian Matrix\n\t\t\tHessian_inv = np.linalg.inv(self._Hessian(model, X_train, y_train))\n\t\t\t# END\n\n\t\t\t# BEGIN: Common calculations\n\t\t\tweights = model.layers[-2].weight\n\t\t\tbias = model.layers[-2].bias\n\t\t\ty_pred = model(self._train[\"X\"])\n\t\t\torig_loss_utility = self._BCELoss(y_pred, self._train[\"y_orig\"])\n\t\t\torig_grad_utility = gradient(\n\t\t\t\torig_loss_utility, (weights, bias), retain_graph=True\n\t\t\t)\n\t\t\torig_grad_utility = np.array(\n\t\t\t\torig_grad_utility[0][0].tolist() + orig_grad_utility[1].tolist()\n\t\t\t)\n\t\t\t# END: Common calculations\n\n\t\t\t# BEGIN: Pre-processing of fairness\n\t\t\tinfl = -np.ones(self._train[\"s\"].shape[0])\n\t\t\tif (\n\t\t\t\tlen(res[judgement_set][\"disp\"]) > 0\n\t\t\t\tand res[judgement_set][\"disp\"][-1] > self._dF\n\t\t\t\tand len(chosen_F) < self._train[\"X\"].shape[0]\n\t\t\t):\n\t\t\t\ty_pred = model(X_train)\n\n\t\t\t\tloss_orig = self._BCELoss(y_pred, y_train, reduction=False)\n\t\t\t\tloss_flip = self._BCELoss(y_pred, 1 - y_train, reduction=False)\n\n\t\t\t\tI_theta = []\n\n\t\t\t\tfor i in range(0, self._train[\"y\"].shape[0]):\n\n\t\t\t\t\tgrad_loss_orig = gradient(\n\t\t\t\t\t\tloss_orig[i], (weights, bias), retain_graph=True\n\t\t\t\t\t)\n\t\t\t\t\tgrad_loss_orig = np.array(\n\t\t\t\t\t\tgrad_loss_orig[0][0].tolist() + grad_loss_orig[1].tolist()\n\t\t\t\t\t)\n\n\t\t\t\t\tgrad_loss_flip = gradient(\n\t\t\t\t\t\tloss_flip[i], (weights, bias), retain_graph=True\n\t\t\t\t\t)\n\t\t\t\t\tgrad_loss_flip = np.array(\n\t\t\t\t\t\tgrad_loss_flip[0][0].tolist() + grad_loss_flip[1].tolist()\n\t\t\t\t\t)\n\n\t\t\t\t\tgrad_diff = grad_loss_flip - grad_loss_orig\n\n\t\t\t\t\tI_theta.append(-np.dot(Hessian_inv, grad_diff).reshape(-1))\n\n\t\t\t\ty_pred = model(self._train[\"X\"])\n\t\t\t\torig_loss_fairness = self._DISPLoss(self._train[\"c\"], y_pred)\n\t\t\t\torig_grad_fairness = gradient(\n\t\t\t\t\torig_loss_fairness, (weights, bias), retain_graph=True\n\t\t\t\t)\n\t\t\t\torig_grad_fairness = np.array(\n\t\t\t\t\torig_grad_fairness[0][0].tolist() + orig_grad_fairness[1].tolist()\n\t\t\t\t)\n\n\t\t\t\tfor i in range(0, self._train[\"y\"].shape[0]):\n\t\t\t\t\tinfl[i] = np.dot(orig_grad_fairness, I_theta[i]) * np.exp(\n\t\t\t\t\t\t-abs(np.dot(orig_grad_utility, I_theta[i]))\n\t\t\t\t\t)\n\n\t\t\t\tindices = np.argsort(infl)\n\t\t\t\tchosen_F_set = set(chosen_F)\n\t\t\t\tactual_indices = []\n\t\t\t\tfor item in indices:\n\t\t\t\t\tif infl[item] >= 0:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif item not in chosen_F_set:\n\t\t\t\t\t\tactual_indices.append(item)\n\t\t\t\t\t\tif len(actual_indices) == self._k:\n\t\t\t\t\t\t\tbreak\n\t\t\t\tchosen_F.extend(actual_indices)\n\t\t\t\tfor item in actual_indices:\n\t\t\t\t\tself._train[\"y\"][item][0] = 1.0 - self._train[\"y\"][item][0]\n\n\t\t\t# END: Pre-processing of fairness\n\n\t\t\t# BEGIN: Pre-processing of robustness\n\t\t\tinfl = -np.ones(self._train[\"s\"].shape[0])\n\t\t\tif (\n\t\t\t\tlen(res[judgement_set][\"attk\"]) > 0\n\t\t\t\tand res[judgement_set][\"attk\"][-1] < self._dR\n\t\t\t\tand len(chosen_R) < self._train[\"X\"].shape[0]\n\t\t\t):\n\n\t\t\t\tX_adv = self.AdvExp(\n\t\t\t\t\tX=self._train[\"X\"], y=self._train[\"y\"], method=self._method\n\t\t\t\t)\n\t\t\t\ty_adv_pred = model(X_adv)\n\t\t\t\tloss_robustness = self._BCELoss(\n\t\t\t\t\ty_adv_pred, self._train[\"y\"], reduction=False\n\t\t\t\t)\n\n\t\t\t\tI_theta = []\n\n\t\t\t\tfor i in range(0, self._train[\"X\"].shape[0]):\n\t\t\t\t\tgrad_loss = gradient(\n\t\t\t\t\t\tloss_robustness[i], (weights, bias), retain_graph=True\n\t\t\t\t\t)\n\t\t\t\t\tgrad_loss = np.array(\n\t\t\t\t\t\tgrad_loss[0][0].tolist() + grad_loss[1].tolist()\n\t\t\t\t\t)\n\t\t\t\t\tI_theta.append(-np.dot(Hessian_inv, grad_loss).reshape(-1))\n\n\t\t\t\ty_pred_adv = model(X_adv)\n\t\t\t\torig_loss_robustness = self._BCELoss(y_pred_adv, self._train[\"y_orig\"])\n\t\t\t\torig_grad_robustness = gradient(\n\t\t\t\t\torig_loss_robustness, (weights, bias), retain_graph=True\n\t\t\t\t)\n\t\t\t\torig_grad_robustness = np.array(\n\t\t\t\t\torig_grad_robustness[0][0].tolist()\n\t\t\t\t\t+ orig_grad_robustness[1].tolist()\n\t\t\t\t)\n\n\t\t\t\tfor i in range(0, self._train[\"y\"].shape[0]):\n\t\t\t\t\tinfl[i] = np.dot(orig_grad_robustness, I_theta[i]) * np.exp(\n\t\t\t\t\t\t-abs(np.dot(orig_grad_utility, I_theta[i]))\n\t\t\t\t\t)\n\n\t\t\t\tindices = np.argsort(infl)\n\t\t\t\tchosen_R_set = set(chosen_R)\n\t\t\t\tactual_indices = []\n\t\t\t\tfor item in indices:\n\t\t\t\t\tif infl[item] >= 0:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif item not in chosen_R_set:\n\t\t\t\t\t\tactual_indices.append(item)\n\t\t\t\t\t\tif len(actual_indices) == self._k:\n\t\t\t\t\t\t\tbreak\n\t\t\t\tchosen_R.extend(actual_indices)\n\t\t\t\tif self._train[\"X_adv\"] is None:\n\t\t\t\t\tself._train[\"X_adv\"] = X_adv[actual_indices, :].detach()\n\t\t\t\telse:\n\t\t\t\t\tself._train[\"X_adv\"] = torch.vstack(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\tself._train[\"X_adv\"],\n\t\t\t\t\t\t\tX_adv[actual_indices, :].clone().detach(),\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t# END: Pre-processing of robustness\n\n\t\t\t# BEGIN: Metrics of training\n\t\t\ty_pred = model(self._train[\"X\"])\n\t\t\tmetric = Metric(\n\t\t\t\ttrue=self._train_np[\"y\"], pred=y_pred.detach().numpy().reshape(-1)\n\t\t\t)\n\t\t\tacc_train_org = metric.accuracy()\n\t\t\tdisp_train_org = metric.positive_disparity(s=self._train_np[\"s\"])\n\t\t\ty_pred_adv = model(\n\t\t\t\tself.AdvExp(X=self._train[\"X\"], y=self._train[\"y\"], method=self._method)\n\t\t\t)\n\t\t\tacc_train_atk = Metric(\n\t\t\t\ttrue=self._train_np[\"y\"], pred=y_pred_adv.detach().numpy().reshape(-1)\n\t\t\t).accuracy()\n\t\t\tprint(\n\t\t\t\t\"Iter: %d, Train: (%.4f, %.4f, %.4f)\"\n\t\t\t\t% (it + 1, acc_train_org, acc_train_atk, disp_train_org),\n\t\t\t\tend=\", \",\n\t\t\t)\n\t\t\tres[\"train\"][\"orig\"].append(acc_train_org)\n\t\t\tres[\"train\"][\"attk\"].append(acc_train_atk)\n\t\t\tres[\"train\"][\"disp\"].append(disp_train_org)\n\t\t\t# END: Metrics of training\n\n\t\t\t# BEGIN: Testing\n\t\t\tif test_output:\n\t\t\t\t# X_test.grad=None\n\t\t\t\ty_test_pred = model(self._test[\"X\"])\n\t\t\t\tmetric = Metric(\n\t\t\t\t\ttrue=self._test_np[\"y\"],\n\t\t\t\t\tpred=y_test_pred.detach().numpy().reshape(-1),\n\t\t\t\t)\n\t\t\t\tacc_test_org = metric.accuracy()\n\t\t\t\tdisp_test_org = metric.positive_disparity(s=self._test_np[\"s\"])\n\n\t\t\t\tX_test_adv = self.AdvExp(X=self._test[\"X\"], y=None, method=self._method)\n\t\t\t\ty_test_pred_atk = model(X_test_adv)\n\t\t\t\tacc_test_atk = Metric(\n\t\t\t\t\ttrue=self._test_np[\"y\"],\n\t\t\t\t\tpred=y_test_pred_atk.detach().numpy().reshape(-1),\n\t\t\t\t).accuracy()\n\n\t\t\t\tprint(\n\t\t\t\t\t\"Test: (%.4f, %.4f, %.4f), Coverage: (R:%.2f, F:%.2f)\"\n\t\t\t\t\t% (\n\t\t\t\t\t\tacc_test_org,\n\t\t\t\t\t\tacc_test_atk,\n\t\t\t\t\t\tdisp_test_org,\n\t\t\t\t\t\tlen(chosen_R) / self._train[\"X\"].shape[0],\n\t\t\t\t\t\tlen(chosen_F) / self._train[\"X\"].shape[0],\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\tres[\"test\"][\"orig\"].append(acc_test_org)\n\t\t\t\tres[\"test\"][\"attk\"].append(acc_test_atk)\n\t\t\t\tres[\"test\"][\"disp\"].append(disp_test_org)\n\t\t\t\tif res[\"test\"][\"attk\"][-1] > max_R_value:\n\t\t\t\t\tmax_R_value = res[\"test\"][\"attk\"][-1]\n\t\t\t# END: Testing\n\n\t\t\tif (\n\t\t\t\tres[judgement_set][\"attk\"][-1] >= self._dR\n\t\t\t\tand res[judgement_set][\"disp\"][-1] <= self._dF\n\t\t\t):\n\t\t\t\tbreak\n\n\t\tif save:\n\n\t\t\tif self._train[\"X_adv\"] is not None:\n\t\t\t\tX_train = torch.vstack(\n\t\t\t\t\t[self._train[\"X\"], self._train[\"X_adv\"]]\n\t\t\t\t).detach()\n\t\t\t\ty_train = torch.vstack(\n\t\t\t\t\t[self._train[\"y\"], self._train[\"y\"][chosen_R]]\n\t\t\t\t).detach()\n\t\t\telse:\n\t\t\t\tX_train = self._train[\"X\"].detach()\n\t\t\t\ty_train = self._train[\"y\"].detach()\n\n\t\t\tdownstream_train = {\n\t\t\t\t\"X\": X_train.detach(),\n\t\t\t\t\"y\": y_train.detach(),\n\t\t\t\t\"s\": self._train[\"s\"],\n\t\t\t\t\"idx\": chosen_R,\n\t\t\t}\n\n\t\t\tres[\"downstream_train\"] = self._downstream_train\n\t\t\tres[\"downstream_test\"] = self._test\n\n\t\treturn res\n\n\ndef draw(res):\n\n\tfig, ax = plt.subplots(1, 2)\n\tfig.set_size_inches(12.8, 4.8)\n\n\tdata = np.hstack(\n\t\t[\n\t\t\tnp.array(res[\"train\"][\"orig\"]).reshape(-1, 1),\n\t\t\tnp.array(res[\"train\"][\"attk\"]).reshape(-1, 1),\n\t\t]\n\t)\n\tdf_train = DataFrame(\n\t\tdata, index=res[\"iter\"], columns=[\"Accuracy_Orig.\", \"Accuracy_Attk.\"]\n\t)\n\tmain_ax = df_train.plot(ax=ax[0])\n\tmain_ax.set_xlabel(\"Training epochs\")\n\tmain_ax.set_ylabel(\"Accuracy\")\n\tmain_ax.set_title(res[\"setting\"] + \"_train\")\n\n\tdata = np.array(res[\"train\"][\"disp\"]).reshape(-1, 1)\n\tdf_train = DataFrame(data, index=res[\"iter\"], columns=[\"Disparity\"])\n\tm2nd_ax = df_train.plot(secondary_y=True, ax=main_ax)\n\tm2nd_ax.set_ylabel(\"Statistical Parity\")\n\n\tdata = np.hstack(\n\t\t[\n\t\t\tnp.array(res[\"test\"][\"orig\"]).reshape(-1, 1),\n\t\t\tnp.array(res[\"test\"][\"attk\"]).reshape(-1, 1),\n\t\t]\n\t)\n\tdf_test = DataFrame(\n\t\tdata, index=res[\"iter\"], columns=[\"Accuracy_Orig.\", \"Accuracy_Attk.\"]\n\t)\n\tmain_ax = df_test.plot(ax=ax[1])\n\tmain_ax.set_xlabel(\"Training epochs\")\n\tmain_ax.set_ylabel(\"Accuracy\")\n\tmain_ax.set_title(res[\"setting\"] + \"_test\")\n\n\tdata = np.array(res[\"test\"][\"disp\"]).reshape(-1, 1)\n\tdf_test = DataFrame(data, index=res[\"iter\"], columns=[\"Disparity\"])\n\tm2nd_ax = df_test.plot(secondary_y=True, ax=main_ax)\n\tm2nd_ax.set_ylabel(\"Statistical Parity\")\n\n\tfig.tight_layout()\n\n\nif __name__ == \"__main__\":\n\n\timport sys\n\n\tif len(sys.argv) >= 2:\n\t\tdata = sys.argv[1]\n\t\tattr = sys.argv[2]\n\t\tmethod = sys.argv[3]\n\t\tdF = round(float(sys.argv[4]), 2)\n\t\tdR = round(float(sys.argv[5]), 2)\n\t\tk = 0.003\n\t\tif len(sys.argv > 6) and sys.argv[6].strip() == \"save\":\n\t\t\tsaveflag = True\n\telse:\n\t\tdata = \"adult\"\n\t\tattr = \"race\"\n\t\tmethod = \"FGSM\"\n\t\tdF = 0.6\n\t\tdR = 0.30\n\t\tk = 0.003\n\t\tsaveflag = True\n\n\timport time\n\n\tseed = int(time.time())\n\n\tprint(\"Seed is %d\" % seed)\n\n\tmodel = PreProcFlip(\n\t\tdata, attr, method=method, k=k, max_iter=1000, seed=seed, dR=dR, dF=dF\n\t)\n\tres = model.fit_transform(save=saveflag)\n\n\tres[\"data\"] = data\n\tres[\"attr\"] = attr\n\tres[\"method\"] = method\n\tres[\"dF\"] = dF\n\tres[\"dR\"] = dR\n\tres[\"k\"] = k\n\tres[\"seed\"] = seed\n\n\tif saveflag:\n\n\t\timport pickle\n\n\t\twith open(\n\t\t\t\"./result/predata/\" + f\"{data}_{attr}_{method}_{seed}.pre\", \"wb\"\n\t\t) as handle:\n\t\t\tpickle.dump(res, handle)\n\n\telse:\n\n\t\timport json\n\n\t\tf = open(f\"./result/preproc/FnR.txt\", \"a\")\n\t\tf.write(json.dumps(res) + \"\\n\")\n\t\tf.close()\n","sub_path":"PreProcessInflu.py","file_name":"PreProcessInflu.py","file_ext":"py","file_size_in_byte":14660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"317718415","text":"# 本代码目的:获取PB,PE,PS等因子,按时间存入矩阵,并绘制分位数图,直观了解市场或板块的当前估值状态\n# 平台:Mindgo\n\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\n# starttime = datetime.datetime.now() # 当前时间,用于计算代码运行所用时间\n\n###############################################################################\n# part1. 基本信息赋值\nbegin = datetime.date(2007, 1, 1) # 设定起始时间\nend = datetime.date(2018, 9, 7) # 设定终止时间\n\nTdays = get_trade_days(begin, end) # 获取交易日序列,具体请参考Mindgo平台文档\nQt = ['0.8', '0.9', '0.95'] # 一个存储分位百分比的列表\n\n# 每天PB分位数所对应的矩阵初始化\nQuantiles = pd.DataFrame(index=Qt, columns=Tdays)\n\n\n###############################################################################\n# part2. 计算分位数\nfor d in Tdays: # 遍历Tdays时间列表中的日期\n Today = d\n # 获取股票池中某一天所有股票的PB值(若要更换因子,在此处将PB换为其他因子),\n # 并把日期设为索引\n # universe = list(get_all_securities('stock', Today).index) # 全A股\n universe = get_index_stocks('000300.SH', Today) # 沪深300\n # Mindgo平台获取截面数据的函数,具体请参考Mindgo平台文档\n q = query(valuation.symbol, valuation.pb).filter(\n valuation.symbol.in_(universe)).order_by(valuation.symbol)\n df = get_fundamentals(q, date=Today).drop(\"valuation_symbol\", 1)\n\n # 去掉缺省值,计算得到某一天的分位数,保存至factor_quantiles\n factor_quantiles = df.dropna().quantile([0.8, 0.9, 0.95])\n if Today in Quantiles.columns:\n # 将某一天的分位数列表转存至统一的列表中\n Quantiles[Today] = factor_quantiles.values\n\n\n###############################################################################\n# part3. 绘图\nx = Quantiles.columns # 日期序列\ny1 = Quantiles.values[0] # 80分位\ny2 = Quantiles.values[1] # 90分位\ny3 = Quantiles.values[2] # 95分位\n\nplt.figure(figsize=(24, 6)) # 设定图的大小\n\n# 設置x軸主刻度顯示格式(日期)\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))\n# 設置x軸主刻度間距\nplt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=12))\nplt.plot(x, y1) # 80分位线\nplt.plot(x, y2) # 90分位线\nplt.plot(x, y3) # 95分位线\n\n# 设定图的名字,并设定字体大小\nplt.title(\"PB Distribution Over Time\", fontsize=16)\n# 给出图标的图例名称\nplt.legend(['0.8_Q', '0.9_Q', '0.95_Q'])\n\nplt.show() # 出图\n\n# 打印当前分位数所对应的PB值\nprint(Quantiles[end.strftime(\"%Y-%m-%d\")])\n\n# # 打印所用时间\n# endtime = datetime.datetime.now()\n# print((endtime - starttime).seconds)\n","sub_path":"Mindgo_research/PBquantile.py","file_name":"PBquantile.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"127906959","text":"import numpy as np\nimport model\nimport pandas as pd\nfrom sklearn.base import clone\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import confusion_matrix, precision_recall_curve, \\\n roc_auc_score, roc_curve, f1_score, classification_report, recall_score\nfrom sklearn.model_selection import cross_val_score, ShuffleSplit, RandomizedSearchCV, \\\n GridSearchCV\n\n\ndef cross_validation(cls, X, y, scoring='f1', n_jobs=-1, n_splits=3):\n \"\"\"\n\n :param cls: the classifier, inhered from BaseModel\n :param X: input features\n :param y: input class\n :param scoring: {'f1', 'roc_auc', 'both', 'all'}\n :param n_jobs: The\n :return: scores\n \"\"\"\n cv = ShuffleSplit(n_splits=n_splits, test_size=0.3, random_state=0)\n\n # This faster implementation is not allowed...\n # Because it can only allow 1 number output\n # return cross_val_score(cls, X, y, cv=cv, scoring=_scoring_func, n_jobs=n_jobs)\n\n # Thus I choose a stupid one\n if scoring == 'both':\n output = {}\n for scoring in ('f1', 'roc_auc'):\n output[scoring] = cross_val_score(cls, X, y, cv=cv, scoring=scoring, n_jobs=n_jobs)\n return output\n if scoring == 'all':\n output = {}\n for scoring in ('f1', 'roc_auc', 'precision', 'recall'):\n output[scoring] = cross_val_score(cls, X, y, cv=cv, scoring=scoring, n_jobs=n_jobs)\n return output\n if scoring != 'both' and scoring != 'all':\n return cross_val_score(cls, X, y, cv=cv, scoring=scoring, n_jobs=n_jobs)\n\n\ndef estimate(cls, X_train, X_test, y_train, y_test, use_confusion_matrix=False):\n if not isinstance(cls, model.BaseModel):\n model.BaseModel.register(type(cls))\n\n cls.fit(X_train, y_train)\n\n y_score_test = cls.predict_proba(X_test)\n y_pred_test = cls.predict(X_test)\n\n y_score_train = cls.predict_proba(X_train)\n y_pred_train = cls.predict(X_train)\n\n assert y_score_test.dtype.kind == 'f',\\\n \"Predict Proba value should be float. Change the predict definition in {}\".format(cls.__class__)\n assert np.all(np.equal(y_pred_test, y_pred_test.astype(int))), \\\n \"Predict value should be int. Change the predict definition in {}\".format(cls.__class__)\n\n if use_confusion_matrix:\n plot_confusion_matrix(y_true=y_test, y_pred=y_pred_test)\n\n y_score_test = score_transform(y_score_test)\n y_score_train = score_transform(y_score_train)\n return {\n 'train': {\n 'roc_auc': roc_auc_score(y_true=y_train, y_score=y_score_train),\n 'f1': f1_score(y_true=y_train, y_pred=y_pred_train)\n },\n 'test': {\n 'roc_auc': roc_auc_score(y_true=y_test, y_score=y_score_test),\n 'f1': f1_score(y_true=y_test, y_pred=y_pred_test)\n }\n }\n\n\ndef score_transform(y_score):\n if len(y_score.shape) == 1:\n return y_score\n if y_score.shape[1] == 1:\n return y_score[:, 0]\n assert y_score.shape[1] == 2 and len(y_score.shape) == 2, \"Not support y_score type with shape {}\".format(y_score)\n return y_score[:, 1]\n\n\ndef plot_confusion_matrix(y_pred, y_true):\n cnf_matrix = confusion_matrix(y_true=y_true, y_pred=y_pred)\n print(\"the recall for this model is :\", cnf_matrix[1, 1] / (cnf_matrix[1, 1] + cnf_matrix[1, 0]))\n sns.heatmap(cnf_matrix, cmap=\"coolwarm_r\", annot=True, linewidths=0.5)\n plt.title(\"Confusion_matrix\")\n plt.xlabel(\"Predicted_class\")\n plt.ylabel(\"Real class\")\n plt.show()\n print(\"\\n----------Classification Report------------------------------------\")\n print(classification_report(y_true, y_pred))\n\n\ndef _scoring_func(estimator, X, y):\n pred = estimator.predict(X)\n score = estimator.predict_proba(X)\n return [\n f1_score(y_true=y, y_pred=pred),\n recall_score(y_true=y, y_pred=pred),\n roc_auc_score(y_true=y, y_score=score)\n ]\n\n\ndef best_param_search(estimator, params, X, y, verbose=True, n_jobs=-1):\n \"\"\"\n The automatic search method\n\n :param estimator: the learner\n :param params:\n A list of param list. the search will start tuning from\n the first 1.\n\n For example:\n [\n {'C': [0.01, 0.1, 1], 'kernel':['rbf', 'poly']},\n {'gamma': [0.01, 0.025, 0.05, 0.1, 0.2, 0.4, 0.8]}\n ]\n This method will grid search `C` and `kernel` params first,\n by cross validation, using the default `gamma` value.\n And then use the best `C` and `kernel` params to grid search\n the best setting of `gamma`, so on and so forth.\n :param X: features\n :param y: labels\n :param verbose: {True, False} whether print the info while tuning\n :return:\n best_params: dict. {'C': 0.1, 'kernel': 'rbf', 'gamma': 0.1}\n df_scores: pd.DataFrame(index=params, columns=k_fold_score)\n best_estimator_\n \"\"\"\n all_available_params = estimator.get_params()\n for ps in params:\n for param in ps.keys():\n if param not in all_available_params:\n raise ValueError(\"{} is not a param in {} class\".format(param, estimator.__class__))\n\n best_params = {}\n df_scores = pd.DataFrame(columns=['test_score', 'train_score', 'fit_time', 'score_time'])\n _estimator = estimator\n clf = None\n for ps in params:\n estimator = clone(_estimator)\n for name, value in best_params.items():\n if name not in ps:\n ps[name] = [value]\n\n cv = ShuffleSplit(n_splits=3, test_size=0.3, random_state=0)\n clf = GridSearchCV(estimator, ps, scoring='f1', cv=cv, n_jobs=n_jobs, return_train_score=True)\n clf.fit(X, y)\n for name, value in clf.best_params_.items():\n best_params[name] = value\n\n for i, dikt in enumerate(clf.cv_results_['params']):\n index_name = ';'.join(['{}:{}'.format(a, b) for a, b in dikt.items()])\n df_scores.loc[index_name] = [\n clf.cv_results_['mean_test_score'][i],\n clf.cv_results_['mean_train_score'][i],\n clf.cv_results_['mean_fit_time'][i],\n clf.cv_results_['mean_score_time'][i],\n ]\n return best_params, df_scores, getattr(clf, 'best_estimator_', None)\n\n\n__all__ = ['cross_validation']\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401595203","text":"from aiohttp import ClientSession, ClientTimeout\nfrom urllib.parse import ParseResult, urlunparse, urlencode\nfrom .log import logger\n\n\nasync def pub_to_nsq(address, topic, msg, timeout=60):\n url = urlunparse(ParseResult(scheme='http', netloc=address, path='/pub', params='',\n query=urlencode({'topic': topic}), fragment=''))\n async with ClientSession(timeout=ClientTimeout(total=timeout)) as session:\n async with session.request(\"POST\", url, data=msg) as resp:\n if resp.status != 200:\n logger.error(\"[pub to nsq error] topic: {}\".format(topic))\n\n\nasync def mpub_to_nsq(address, topic, msgs, timeout=60):\n if any(map(lambda x: '\\n' in x, msgs)):\n raise ValueError(r\"msgs contain \\n\")\n url = urlunparse(ParseResult(scheme='http', netloc=address, path='/mpub', params='',\n query=urlencode({'topic': topic}), fragment=''))\n async with ClientSession(timeout=ClientTimeout(total=timeout)) as session:\n async with session.request(\"POST\", url, data=\"\\n\".join(msgs)) as resp:\n if resp.status != 200:\n logger.error(\"[pub to nsq error] topic: {}\".format(topic))\n","sub_path":"util/pub.py","file_name":"pub.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"110862669","text":"def main():\n # problem1()\n problem2()\n\n\n# Create a function that has a loop that quits with q\n# Allow the User to enter names until q is entered\n# Add each name entered to a List\n# When the User enters q print the list of names\n# ADDITIONAL REQUIREMENTS:\n#\n# Your code should be able to process the quit command (q) the User enters regardless of case\n\n# This function is given a loop to continue with the list until \"q\" is entered to quit\n\n# def problem1():\n#\n# givenLoop = input(\"Enter a name:\")\n# givenLoop2 = \"\"\n# while(givenLoop != givenLoop2 ):\n# # if(givenLoop != 'q'):\n# givenLoop2 = input(givenLoop != \"q\")\n\n # print(givenLoop)\n\n # givenLoop(str(givenLoop2.insert))\n\n\n\n\n\n\n\n\n#\ndef problem2():\n\n myDictionaryList = [0 ,1 ,2 ]\nmyDictionaryList = [\n {\n \"name\": \"Kelvin\",\n \"age\": 30\n },\n {\n \"name\": \"Bob\",\n \"age\": 50\n },\n {\n \"name\": \"Alex\",\n \"age\": 21\n }\n ]\n\n\ncharacter = input(\"Enter the name or age in dictionary:\")\n# myDictionaryList\nwhile character != 'q':\n print(character)\n\n # if(character == 'q'):\n # return myDictionaryList\n # print(myDictionaryList)\n\n\n if __name__ == '__main__':\n main()","sub_path":"DataStructure.py","file_name":"DataStructure.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"644027579","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpRequest\nfrom django.db import IntegrityError \nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP, SMTPAuthenticationError, SMTPException\nfrom myapp import views\nfrom myapp import models \nfrom myapp import form \nfrom cart.cart import Cart\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\n\ng_status='' \n \ndef send_mail(uname,uemail):\n strSmtp = \"smtp.gmail.com:587\" #主機\t \n strAccount = \"chris2012test@gmail.com\" #帳號\n strPassword = \"aaaaaaaaa\" #密碼\n content = '

    歡迎成為克里斯美食中心會員,請利用下列網址進行認證後即可使用服務:

    我要認證!' #郵件內容\n msg = MIMEText(content, \"html\", \"utf-8\")\n msg[\"Subject\"] = \"克里斯美食中心-認證信\" #郵件標題\n mailto = uemail #收件者\n\t#mailto = [\"收件者電子郵件\"] #收件者\n\t#mailto = [\"收件者電子郵件一\", \"收件者電子郵件二\"]\n\n server = SMTP(strSmtp) #建立SMTP連線\n server.ehlo() #跟主機溝通\n server.starttls() #TTLS安全認證\n try:\n server.login(strAccount, strPassword) #登入\n server.sendmail(strAccount, mailto, msg.as_string()) #寄信\n hint = \"郵件已發送!\"\n except SMTPAuthenticationError:\n hint = \"無法登入!\"\n except:\n hint = \"郵件發送產生錯誤!\"\n server.quit() #關閉連線\n\n\ndef index(request):\n global g_status \n status = g_status \n\n message = ''\n message2 = '您好 請先登入'\n foods = models.ProductModel.objects.filter(p_category__name__exact ='食物')\n stationeries = models.ProductModel.objects.filter(p_category__name__exact ='文具')\n computers = models.ProductModel.objects.filter(p_category__name__exact ='3C用品')\n necessities = models.ProductModel.objects.filter(p_category__name__exact ='生活用品')\n\n # 當欲查詢 ForeignKey 屬性時預設會查詢對應之主鍵,如要查別項需用 __name__exact說明\n\n if request.session.get('login_user'):\n status = 'login'\n else:\n status = ''\n\n if request.method == 'POST':\n login_account = request.POST['login_account']\n try:\n user = models.UserModel.objects.get(user_account = login_account)\n if request.POST['login_password'] == user.user_password:\n # 確認密碼\n request.session['login_user'] = login_account \n # 為使用者儲存自訂(key)名稱為 'login_user' 之 Session\n request.session.set_expiry(7200) \n # 設定 session 持續 7200 sec\n\n message = '歡迎光臨! ' + str(user.user_account) + '~ Hooray!'\n g_status = 'login'\n status = g_status\n return render(request,'index.html',locals())\n\n else:\n message2 = '密碼錯誤 請重新輸入!'\n except ArithmeticError:\n message = '發生問題請先註冊'\n\n cart = Cart(request)\n # 引入購物車物件\n\n return render(request,'index.html',locals())\n\n\ndef logout(request):\n request.session.clear()\n auth.logout(request)\n\n global g_status\n g_status = ''\n status = ''\n return redirect('/index/')\n\n\ndef signin(request):\n message = ''\n\n if request.method == 'POST': # 如果接收到表單以POST方式傳送之資料\n try:\n user_form = form.UserModel(request.POST) # 以 request.POST 取得資料並建立表單\n if user_form.is_valid(): # 如果驗證通過 \n\n signin_account = user_form.cleaned_data['signin_account'] # 以 form_name.cleaned_data[' '] 收集資料\n signin_email = user_form.cleaned_data['signin_email']\n check_password = user_form.cleaned_data['check_password']\n user_gender = user_form.cleaned_data['user_gender']\n \n # 建立一筆新資料\n new_record = models.UserModel.objects.create(user_account = signin_account, user_password = check_password, user_email = signin_email, user_gender = user_gender )\n\n # 將新資料存入資料庫\n new_record.save()\n \n # to do by JS message = '註冊中...完成後將自動返回首頁!'\n\n send_mail(signin_account,signin_email) # 寄認證信\n return redirect ('/index/') # 驗證成功後返回首頁\n\n else:\n message = '資料驗證失敗,請重新輸入!'\n except IntegrityError:\n message = '帳號與他人重複 請重新輸入!'\n \n # 此處不可加 else,因網頁必須無條件返回 http 物件\n return render(request,'index.html',locals())\n\ndef account_ckeck(request):\n status = ''\n if request.method == 'GET' and request.is_ajax():\n current_account = request.GET.get('current_account')\n\n status = 'ok' \n if not current_account:\n status = 'illegal'\n return HttpResponse(status) \n\n import re\n # 強迫帳號格式為英文數字混和,並介於5~12個字元\n if re.match(r'^(?=^.{5,12}$)(([a-zA-Z]+\\d+|\\d+[a-zA-Z]+)[a-zA-Z0-9]*)$', current_account ):\n pass\n else:\n status = 'illegal'\n return HttpResponse(status)\n\n # 驗證帳號是否重複\n users = models.UserModel.objects.all()\n # objects.all() 格式為大物件包含小物件\n for user in users:\n if user.user_account == current_account: \n # 以.存取 QuerySet 中物件屬性\n status = 'duplicate'\n return HttpResponse(status)\n else:\n pass\n return HttpResponse(status)\n\ndef email_check(request):\n if request.method == 'GET' and request.is_ajax():\n import re\n current_email = request.GET.get('current_email')\n status = 'ok'\n if re.match(r'^[\\w-]+@[\\w\\.-]+\\.[a-zA-Z]+$', current_email ):\n return HttpResponse(status)\n else:\n status = 'not_ok'\n return HttpResponse(status)\n return HttpResponse(status)\n\n#@login_required\ndef add_to_cart(request, product_id, quantity):\n\n quantity_count = 0\n if request.method == 'GET' and request.is_ajax():\n product = models.ProductModel.objects.get(id = product_id)\n cart = Cart(request)\n cart.add(product, product.p_price, quantity)\n\n quantity_count = cart.count()\n # 使用 cart 物件之 count() 方法, ps. 要直接執行需有"括號"!!  \n return HttpResponse(quantity_count)\n return HttpResponse(quantity_count)\n\n#@login_required\ndef remove_from_cart(request, product_id):\n product = models.ProductModel.objects.get(id = product_id)\n cart = Cart(request)\n cart.remove(product)\n return redirect('/cart/')\n\n#@login_required\ndef shop_cart(request):\n\n status = g_status\n all_cateragies = models.Category.objects.all()\n cart = Cart(request)\n\n return render(request,'cart.html',locals())\n\n\n\n\n\n ","sub_path":"ShopCart/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"650380119","text":"import collections\nimport pendulum\nimport re\nfrom flask import current_app\nfrom app.api.services import evidence_service\nfrom app.api.helpers import is_valid_email, state_to_long_name\n\n\nclass SupplierValidator(object):\n\n def __init__(self, supplier):\n self.supplier = supplier\n\n def validate_all(self):\n result = (\n self.validate_basics() +\n self.validate_documents() +\n self.validate_representative() +\n self.validate_recruiter() +\n self.validate_candidates()\n )\n warnings = [n for n in result if n.get('severity', 'error') == 'warning']\n errors = [n for n in result if n.get('severity', 'error') == 'error']\n validation_result = collections.namedtuple('Notification', ['warnings', 'errors'])\n return validation_result(warnings=warnings, errors=errors)\n\n def validate_basics(self):\n errors = []\n if not self.supplier.name:\n errors.append({\n 'message': 'You must include your business name in your profile.',\n 'severity': 'error',\n 'step': 'business-details',\n 'id': 'S001'\n })\n\n return errors\n\n def validate_pricing(self):\n errors = []\n pricing = self.supplier.data.get('pricing', {})\n recruiter = self.supplier.data.get('recruiter')\n supplier_domains = self.supplier.domains\n frontend_url = current_app.config['FRONTEND_ADDRESS']\n\n if recruiter == 'no' or recruiter == 'both':\n for supplier_domain in supplier_domains:\n if (\n supplier_domain.domain.name in self.supplier.assessed_domains and\n supplier_domain.domain.name not in pricing\n ):\n errors.append({\n 'message': 'You must provide your maximum daily rate (including GST) for {domain} using '\n '{{Skills Framework for the Information Age (SFIA) Level 5}} '\n 'as a guide.'\n .format(\n domain=supplier_domain.domain.name\n ),\n 'links': {\n 'Skills Framework for the Information Age (SFIA) Level 5': 'https://www.sfia-online.org/en/'\n 'framework/sfia-7/busskills/'\n 'level-5'\n },\n 'severity': 'error',\n 'step': 'pricing',\n 'id': 'S002-{}'.format(supplier_domain.domain.id)\n })\n elif supplier_domain.price_status == 'rejected':\n errors.append({\n 'message': 'Your daily rate for {domain} exceeds the maximum '\n 'threshold that applies to this category. '\n 'Update your case study to meet {{additional criteria}} '\n 'then submit for assessment.'\n .format(\n domain=supplier_domain.domain.name\n ),\n 'links': {\n 'additional criteria': 'https://marketplace1.zendesk.com/hc/en-gb/'\n 'articles/333757011655-Assessment-criteria'\n },\n 'severity': 'info', # TODO: this message needs some work. switch back when ready\n 'step': 'pricing',\n 'id': 'S003-{}'.format(supplier_domain.domain.id)\n })\n\n return errors\n\n def __validate_case_study(self, case_study):\n errors = []\n frontend_url = current_app.config['FRONTEND_ADDRESS']\n if case_study.status == 'rejected':\n errors.append({\n 'message': 'You must update {{{title}}} to demonstrate the {{minimum number of criteria}} for {domain}.'\n .format(\n title=case_study.data.get('title', ''),\n domain=case_study.data.get('service')\n ),\n 'links': {\n 'minimum number of criteria': 'https://marketplace1.zendesk.com/hc/'\n 'en-gb/articles/333757011655-Assessment-criteria',\n case_study.data.get('title'): '{}/case-study/{}'.format(frontend_url, case_study.id)\n },\n 'severity': 'warning',\n 'step': 'case-study',\n 'id': 'S005-{}'.format(case_study.id)\n })\n\n return errors\n\n def validate_documents(self):\n documents = self.supplier.data.get('documents')\n if not documents:\n return [{\n 'message': 'Your seller profile is missing required insurance and financial documents.'\n 'If you have multiple files for a document, please scan and merge as one upload.',\n 'severity': 'error',\n 'step': 'documents',\n 'id': 'S006'\n }]\n\n now = pendulum.now('Australia/Canberra')\n return (self.__validate_document(documents, 'liability', now) +\n self.__validate_document(documents, 'indemnity', now) +\n self.__validate_document(documents, 'workers', now) +\n self.__validate_document(documents, 'financial', now, False))\n\n def __validate_document(self, documents, name, now, has_expiry=True):\n errors = []\n document = documents.get(name)\n document_required = (\n (\n name == 'workers' and\n (\n document and\n document.get('noWorkersCompensation', False) is False\n )\n ) or (\n document and\n 'noWorkersCompensation' not in document\n )\n )\n\n name_translation = {\n 'indemnity': 'Professional Indemnity Insurance',\n 'liability': 'Public Liability Insurance',\n 'workers': 'Workers Compensation Insurance',\n 'financial': 'Financial statement'\n }\n document_name = name_translation.get(name)\n\n if not document:\n errors.append({\n 'message': 'Your seller profile is missing your {document_name} document.'\n .format(\n document_name=document_name\n ),\n 'severity': 'error',\n 'step': 'documents',\n 'id': 'S007-{}'.format(name)\n })\n return errors\n\n filename = document.get('filename', '')\n if not filename and document_required:\n errors.append({\n 'message': 'You must set a filename for your {document_name} document.'\n .format(\n document_name=document_name\n ),\n 'severity': 'error',\n 'step': 'documents',\n 'id': 'S008-{}'.format(name)\n })\n\n if has_expiry:\n expiry = document.get('expiry')\n if not expiry and document_required:\n errors.append({\n 'message': 'You must set an expiry date for your {document_name} document.'\n .format(\n document_name=document_name\n ),\n 'severity': 'error',\n 'step': 'documents',\n 'id': 'S009-{}'.format(name)\n })\n elif document_required:\n try:\n expiry_date = pendulum.parse(expiry)\n if now.date() > expiry_date.date():\n e = pendulum.instance(expiry_date)\n delta = now.diff(e).in_days()\n message = 'Your {document_name} document has expired. Please upload an updated version.'\n severity = 'warning'\n if delta > 28:\n message = (\n 'Your {document_name} document has expired. Please upload an updated version. '\n 'Failure to provide this documentation may result in the '\n 'suspension of your seller profile.'\n )\n severity = 'error'\n errors.append({\n 'message': message\n .format(\n document_name=document_name\n ),\n 'severity': severity,\n 'step': 'documents',\n 'id': 'S010-{}'.format(name)\n })\n elif now.add(days=28).date() > expiry_date.date():\n errors.append({\n 'message': 'Your {document_name} document will expire on {expiry_date}. '\n 'Please upload an updated version before the expiry date.'\n .format(\n document_name=document_name,\n expiry_date=expiry_date.date()\n ),\n 'severity': 'warning',\n 'step': 'documents',\n 'id': 'S011-{}'.format(name)\n })\n except ValueError:\n errors.append({\n 'message': 'Please fix the format of the expiry date for your '\n '{document_name} document, eg 21/09/2019'\n .format(\n document_name=document_name\n ),\n 'severity': 'error',\n 'step': 'documents',\n 'id': 'S012-{}'.format(name)\n })\n\n return errors\n\n def validate_recruiter(self):\n errors = []\n recruiter = self.supplier.data.get('recruiter')\n\n if recruiter and (recruiter == 'yes' or recruiter == 'both'):\n labour_hire = self.supplier.data.get('labourHire', {})\n now = pendulum.now('Australia/Canberra').date()\n for state, state_value in labour_hire.items():\n if not state_value or state == 'sa':\n continue\n licence_number = state_value.get('licenceNumber')\n expiry = state_value.get('expiry')\n if not licence_number or not expiry:\n errors.append({\n 'message': (\n 'Licence number and expiry must be both filled for {}'.format(state_to_long_name(state))\n ),\n 'severity': 'warning',\n 'step': 'recruiter'\n })\n\n if expiry:\n try:\n expiry_date = pendulum.from_format(\n expiry,\n 'YYYY-MM-DD',\n tz='Australia/Sydney'\n )\n\n if now > expiry_date.date():\n errors.append({\n 'message': 'Your {} labour hire licence has expired.'.format(state_to_long_name(state)),\n 'severity': 'warning',\n 'step': 'recruiter',\n 'id': 'S014'\n })\n except ValueError:\n errors.append({\n 'message': '\"{}\" is an invalid date format'.format(expiry),\n 'severity': 'error',\n 'step': 'recruiter',\n 'id': 'S014'\n })\n return errors\n\n def validate_representative(self, step=None):\n errors = []\n if not step:\n step = 'your-info'\n\n representative = self.supplier.data.get('representative', '').replace(' ', '')\n if not representative:\n errors.append({\n 'message': 'Authorised representative name is required',\n 'severity': 'error',\n 'step': step,\n 'id': 'S013-representative'\n })\n\n phone = self.supplier.data.get('phone', '').replace(' ', '')\n match = re.search(r'[ 0-9()+]+', phone)\n if not phone:\n errors.append({\n 'message': 'Authorised representative phone is required',\n 'severity': 'error',\n 'step': step,\n 'id': 'S013-phone'\n })\n elif (\n len(phone) < 10 or\n not match or\n (match and phone[match.span()[0]:match.span()[1]] != phone)\n ):\n errors.append({\n 'message': 'Authorised representative phone is not valid',\n 'severity': 'error',\n 'step': step,\n 'id': 'S013-phone'\n })\n\n email = self.supplier.data.get('email', '').replace(' ', '')\n if not email:\n errors.append({\n 'message': 'Authorised representative email is required',\n 'severity': 'error',\n 'step': step,\n 'id': 'S013-email'\n })\n elif not is_valid_email(email):\n errors.append({\n 'message': 'Authorised representative email is not valid',\n 'severity': 'error',\n 'step': step,\n 'id': 'S013-email'\n })\n\n return errors\n\n def validate_candidates(self):\n errors = []\n candidates = self.supplier.data.get('candidates', {})\n recruiter = self.supplier.data.get('recruiter')\n\n required_fields = {\n 'database_size': 'Candidate database size',\n 'active_candidates': 'Number of candidates looking',\n 'margin': 'Margin',\n 'markup': 'Mark-up',\n 'placed_candidates': 'Number of candidates successfully placed'\n }\n\n if recruiter == 'yes' or recruiter == 'both':\n if candidates == {} or candidates is None:\n errors.append({\n 'message': 'Candidate information is required',\n 'severity': 'error',\n 'step': 'candidates',\n 'id': 'S015'\n })\n else:\n for field, label in required_fields.items():\n value = candidates.get(field)\n if not value:\n errors.append({\n 'field': field,\n 'message': '{} is required'.format(label),\n 'severity': 'error',\n 'step': 'candidates',\n 'id': 'S015'\n })\n else:\n errors = errors + self.__validate_candidate_value(field, value, label)\n\n return errors\n\n def __validate_candidate_value(self, field, value, label):\n errors = []\n\n fields_with_whole_numbers = {\n 'database_size': True,\n 'active_candidates': True,\n 'placed_candidates': True\n }\n\n fields_with_decimals = {\n 'margin': True,\n 'markup': True\n }\n\n if fields_with_whole_numbers.get(field):\n try:\n int(value)\n except ValueError:\n errors.append({\n 'field': field,\n 'message': '{} must be a whole number'.format(label),\n 'severity': 'error',\n 'step': 'candidates',\n 'id': 'S015'\n })\n\n if fields_with_decimals.get(field):\n whole_number = None\n decimal = None\n\n try:\n whole_number = int(value)\n except ValueError:\n pass\n\n try:\n decimal = float(value)\n except ValueError:\n pass\n\n if whole_number is None and decimal is None:\n errors.append({\n 'field': field,\n 'message': '{} must be a whole number or a decimal'.format(label),\n 'severity': 'error',\n 'step': 'candidates',\n 'id': 'S015'\n })\n\n return errors\n","sub_path":"app/api/business/validators/supplier_validator.py","file_name":"supplier_validator.py","file_ext":"py","file_size_in_byte":16955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"11329866","text":"#/* -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n#\n# File Name : distance.py\n#\n# Purpose :\n#\n# Creation Date : 15-01-2019\n#\n# Last Modified : Tue Jan 15 18:06:38 2019\n#\n# Created By : Hongjian Fang: hfang@mit.edu \n#\n#_._._._._._._._._._._._._._._._._._._._._.*/\ndef distance_mesh(lat1,lon1,lat2,lon2):\n import numpy as np\n R = 6371\n\n dLat = lat2-lat1\n dLon = lon2-lon1\n a = np.sin(dLat/2) * np.sin(dLat/2) + np.sin(dLon/2) * np.sin(dLon/2) * np.cos(lat1) * np.cos(lat2)\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))\n d = R * c\n return d\n","sub_path":"distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438749380","text":"# -*- coding: utf-8 -*-\n\nimport chardet, time\nfrom util import toDict, check_params\n\n\nclass InputWithName(object):\n\n\tdef __init__(self, driver, name):\n\t\t\"\"\"\n\t\tCreate a new driver that will issue commands using the wire protocol.\n\n\t\t:Args:\n\t\t - command_executor - Either a command.CommandExecutor object or a string that specifies the URL of a remote server to send commands to.\n\t\t - desired_capabilities - Dictionary holding predefined values for starting a browser\n\t\t - browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.\n\t\t\"\"\"\n\t\tself.driver = driver\n\t\tself.name = name\n\n\t@check_params\n\tdef type(self, value):\n\t\telement = self.driver.find_element_by_name(self.name)\n\t\telement.clear()\n\t\telement.send_keys(value)\n\n\nclass TextWithClass(object):\n\n\tdef __init__(self, driver, class_):\n\t\tself.driver = driver\n\t\tself.class_ = class_\n\n\tdef Text(self):\n\t\treturn self.driver.find_element_by_class_name(self.class_).text\n\n\nclass BtnWithClass(object):\n\n\tdef __init__(self, driver, classname):\n\t\tself.driver = driver\n\t\tself.classname = classname\n\n\tdef click(self):\n\t\tself.driver.find_element_by_class_name(self.classname).click()\n\n\nclass BtnWithId(object):\n\n\tdef __init__(self, driver, id_):\n\t\tself.driver = driver\n\t\tself.id_ = id_\n\n\tdef click(self):\n\t\tself.driver.find_element_by_id(self.id_).click()\n\n\nclass BtnWithText(object):\n\n\t@check_params\n\tdef __init__(self, driver, text):\n\t\tself.driver = driver\n\t\tself.text = text\n\n\tdef click(self):\n\t\tfor ele in self.driver.find_elements_by_tag_name('button'):\n\t\t\tif ele.text == self.text:\n\t\t\t\tele.click()\n\t\t\t\treturn True\n\t\treturn False\n\n\nclass AWithText(object):\n\n\t@check_params\n\tdef __init__(self, driver, text):\n\t\tself.driver = driver\n\t\tself.text = text\n\n\tdef click(self):\n\t\tfor ele in self.driver.find_elements_by_tag_name('a'):\n\t\t\tif ele.text == self.text:\n\t\t\t\tele.click()\n\t\t\t\treturn True\n\t\treturn False\n\n\nclass ElementWithClass(object):\n\n\t@check_params\n\tdef __init__(self, driver, class_, text):\n\t\tself.driver = driver\n\t\tself.class_ = class_\n\t\tself.text = text\n\n\tdef click(self):\n\t\tfor ele in self.driver.find_elements_by_class_name(self.class_):\n\t\t\tif ele.text == self.text:\n\t\t\t\tele.click()\n\t\t\t\treturn True\n\t\treturn False\n\n\nclass PageControl(object):\n\n\tdef __init__(self, driver):\n\t\tself.driver = driver\n\n\tdef switch(self, modulename):\n\t\tjs = '''var alist = $('a')\n\t\t\t\t\tfor (var i=0, a; a=alist[i++];){\n\t\t\t\t\t\tif(a.innerText == \"%s\")\n\t\t\t\t\t\t\ta.click()\n\t\t\t\t\t}\n\t\t\t''' % modulename\n\t\tself.driver.execute_script(js)\n\n\tdef loading(self):\n\t\tself.driver.wait_for_loading()\n\n\tdef switch_to_form(self, id_= None, text= None, formId= None):\n\t\ttime.sleep(1)\n\t\tif id_ is not None:\n\t\t\tself.driver.find_element_by_id(id_).click()\n\t\telif text is not None:\n\t\t\tAWithText(self.driver, text).click()\n\n\t\tif formId is not None:\n\t\t\treturn self.driver.find_element_by_id(formId)\n\n\nclass DropMenuWithText(object):\n\n\tdef __init__(self, driver, name_= None):\n\t\tself.driver = driver\n\t\tself.name_ = name_\n\n\t@check_params\n\tdef choose(self, text):\n\t\tif self.name_ is not None:\n\t\t\tself.driver.find_element_by_name(self.name_).click()\n\t\ttime.sleep(1)\n\t\taList = self.driver.find_elements_by_tag_name('a')\n\t\tfor a in aList:\n\t\t\tif a.text == text:\n\t\t\t\ta.click()\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef submit(self, btnOk):\n\t\tself.driver.find_element_by_id(btnOk).click()\n\nclass DropMenuWithTitle(object):\n\n\tdef __init__(self, driver, name):\n\t\tself.driver = driver\n\t\tself.name = name\n\n\t@check_params\n\tdef choose_by_title(self, title):\n\t\tself.driver.find_element_by_name(self.name).click()\n\t\ttime.sleep(1)\n\t\taList = self.driver.find_elements_by_tag_name('a')\n\t\tfor a in aList:\n\t\t\tif a.get_attribute('title') == title:\n\t\t\t\ta.click()\n\t\t\t\treturn True\n\t\treturn False\n\nclass Table(object):\n\n\tdef __init__(self, driver, tableId):\n\t\tself.driver = driver\n\t\tself.tableId = tableId\n\n\tdef choose_from_popList(self, row):\n\t\tdiv = self.driver.find_element_by_id(self.tableId)\n\t\ttable = div.find_element_by_tag_name('table')\n\t\ttbody = table.find_element_by_tag_name('tbody')\n\t\ttrs = tbody.find_elements_by_tag_name('tr')\n\t\tfor rowdx, tr in enumerate(trs):\n\t\t\tif rowdx == row:\n\t\t\t\ttds = tr.find_elements_by_tag_name('td')\n\t\t\t\ttds[0].click()\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef get_from_traditionList(self, row):\n\t\tretList = []\n\n\t\tdiv = self.driver.find_element_by_id(self.tableId)\n\t\ttbody = div.find_element_by_tag_name('tbody')\n\t\ttrs = tbody.find_elements_by_tag_name('tr')\n\t\tfor rowdx, tr in enumerate(trs):\n\t\t\tif rowdx == row:\n\t\t\t\ttds = tr.find_elements_by_tag_name('td')\n\t\t\t\tfor index, td in enumerate(tds):\n\t\t\t\t\tretList.append(td.get_attribute('title'))\n\t\t\t\treturn retList\n\t\treturn False\n\n\tdef choose_from_traditionList(self, row):\n\t\tdiv = self.driver.find_element_by_id(self.tableId)\n\t\ttable = div.find_element_by_class_name('table-editable')\n\t\ttbody = table.find_element_by_tag_name('tbody')\n\t\ttrs = tbody.find_elements_by_tag_name('tr')\n\t\tfor rowdx, tr in enumerate(trs):\n\t\t\tif rowdx == row:\n\t\t\t\ttds = tr.find_elements_by_tag_name('td')\n\t\t\t\ttds[0].click()\n\t\t\t\treturn True\n\t\treturn False\n\n\nclass PopMenuWithName(object):\n\n\tdef __init__(self, driver, menuInfo, name_= None, id_= None):\n\t\tself.driver = driver\n\t\tself.name_ = name_\n\t\tself.id_ = id_\n\t\tself.menuInfo = toDict(menuInfo)\n\n\t@check_params\n\tdef choose(self, text):\n\t\tdriver = self.menuInfo.driver\n\n\t\tif self.name_ is not None:\n\t\t\tself.driver.find_element_by_name(self.name_).click()\n\t\tif self.id_ is not None:\n\t\t\tself.driver.find_element_by_id(self.id_).click()\n\n\t\tdriver.wait_for_loading()\n\n\t\tsearch_form = driver.find_element_by_id(self.menuInfo.formId)\n\t\tsrcName = search_form.find_element_by_name(self.menuInfo.srcName)\n\t\tsrcName.clear()\n\t\tsrcName.send_keys(text)\n\t\tsearch_form.find_element_by_class_name('xtion-search-icon').click()\n\t\t\n\t\tdriver.wait_for_loading()\n\n\t\tlist_table = Table(driver, self.menuInfo.tblId)\n\t\tlist_table.choose_from_popList(0)\n\n\t\tif self.menuInfo.has_key('btnOk'):\n\t\t\tdriver.find_element_by_id(self.menuInfo.btnOk).click()\n\n\nclass Radio(object):\n\n\tdef __init__(self, driver, name):\n\t\tself.driver = driver\n\t\tself.name = name\n\n\tdef choose(self, text):\n\t\tpElement = self.driver.find_element_by_xpath('//input[@name= \"%s\"][1]/parent::label' % self.name)\n\t\tfor span in pElement.find_elements_by_tag_name('span'):\n\t\t\tif span.text == ' ' + text:\n\t\t\t\tspan.click()\n\t\t\t\treturn True\n\t\treturn False\n\n\nclass DatePiker(object):\n\n\tmonthMap = {\n\t\tu'十二月': '12',\n\t\tu'十一月': '11',\n\t\tu'十月': '10',\n\t\tu'九月': '9',\n\t\tu'八月': '8',\n\t\tu'七月': '7',\n\t\tu'六月': '6',\n\t\tu'五月': '5',\n\t\tu'四月': '4',\n\t\tu'三月': '3',\n\t\tu'二月': '2',\n\t\tu'一月': '1',\n\t}\n\n\n\tdef __init__(self, driver, id_):\n\t\tself.driver = driver\n\t\tself.id_ = id_\n\t\tself.dateEle = None\n\n\tdef getmonth(self):\n\t\t'''\n\t\tauthor: chenpengyu\n\t\ttime: 2015-11-26\n\t\tusage: 从日期控件中获取月份\n\t\targ: [params][row] eg: getmonth(element)\n\t\treturns: get month from element\n\t\t \n\t\t'''\n\t\tmonthYear = TextWithClass(self.dateEle, 'switch').Text()\n\t\tcurrentDay = monthYear.split(' ', 1)[::-1] \n\t\tfor key, value in self.monthMap.items():\n\t\t\tif key == currentDay[1]:\n\t\t\t\tcurrentDay[1] = value\n\t\treturn int(currentDay[1])\n\n\tdef getyear(self):\n\t\t'''\n\t\tauthor: chenpengyu\n\t\ttime: 2015-11-26\n\t\tusage: 从日期控件中获取年份\n\t\targ: [params][row] eg: getyear(element)\n\t\treturns: get year from element\n\t\t \n\t\t''' \n\n\t\tmonthYear = TextWithClass(self.dateEle, 'switch').Text()\n\t\treturn int(monthYear.split(' ', 1)[::-1][0])\n\n\n\tdef checkyear(self, destYear):\n\t\t'''\n\t\tauthor: chenpengyu\n\t\ttime: 2015-11-26\n\t\tusage: 检查年份\n\t\targ: [params][row] eg: checkyear(element, '2015')\n\t\treturns: none\n\t\t \n\t\t'''\n\t\tprevBtn = BtnWithClass(self.dateEle, 'prev')\n\t\tnextBtn = BtnWithClass(self.dateEle, 'next')\n\n\t\twhile True:\n\t\t\tsrcYear = self.getyear()\n\t\t\tif srcYear == destYear:\n\t\t\t\tbreak\n\t\t\telif srcYear > destYear:\n\t\t\t\t#click prev\n\t\t\t\tprevBtn.click()\n\t\t\telif srcYear < destYear:\n\t\t\t\t#click next\n\t\t\t\tnextBtn.click()\n\n\n\tdef checkmonth(self, destmonth):\n\t\t'''\n\t\tauthor: chenpengyu\n\t\ttime: 2015-11-26\n\t\tusage: 检查月份\n\t\targ: [params][row] eg: checkyear(element, '10')\n\t\treturns: none\n\t\t \n\t\t''' \n\t\tprevBtn = BtnWithClass(self.dateEle, 'prev')\n\t\tnextBtn = BtnWithClass(self.dateEle, 'next')\n\n\t\twhile True:\n\t\t\tsrcMonth = self.getmonth()\n\t\t\tif srcMonth == destmonth:\n\t\t\t\tbreak\n\t\t\telif srcMonth > destmonth:\n\t\t\t\t#click prev\n\t\t\t\tprevBtn.click()\n\t\t\telif srcMonth < destmonth:\n\t\t\t\t#click next\n\t\t\t\tnextBtn.click()\n\n\tdef checkday(self, destday):\n\t\t'''\n\t\tauthor: chenpengyu\n\t\ttime: 2015-11-26\n\t\tusage: 检查日期\n\t\targ: [params][row] eg: checkyear(element, '15')\n\t\treturns: none\n\t\t \n\t\t'''\n\t\tElementWithClass(self.dateEle, 'day', destday).click()\n\n\n\tdef chooseBlockDateControl(self, name):\n\t\t'''\n\t\tauthor: chenpengyu\n\t\ttime: 2015-12-03\n\t\tusage: 选中打开的日期控件\n\t\targ: chooseBlockDateControl(driver, driver, 'name')\n\t\treturns: 日期的element\n\t\t \n\t\t''' \n\t\tdates = self.driver.find_elements_by_class_name(name)\n\t\tstatus = None\n\t\tfor dateControl in dates:\n\t\t\tstyle = dateControl.get_attribute('style')\n\t\t\ttry:\n\t\t\t\tstatus = style.split('display: ', 1)[1].split(';',1)[0]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tif status == 'block':\n\t\t\t\treturn dateControl.find_element_by_class_name('datetimepicker-days')\n\n\n\tdef choose(self, destDate):\n\t\t'''\n\t\tauthor: chenpengyu\n\t\ttime: 2015-11-26\n\t\tusage: 从日期控件中选择规定的日期\n\t\targ: [params][row] eg: checkyear(element, 'promStartTime', '2015.12.05')\n\t\treturns: none\n\t\t \n\t\t''' \n\t\tdateInfo = destDate.split('-', 2)\n\n\t\t#日期中需要带签到0,为了合理的与列表中的数据判断,但选择时需去前导0\n\t\tif dateInfo[1][0] == '0':\n\t\t\tdateInfo[1] = dateInfo[1][1]\n\t\tif dateInfo[2][0] == '0':\n\t\t\tdateInfo[2] = dateInfo[2][1]\n\n\t\t#展开控件\n\t\tBtnWithId(self.driver, self.id_).click()\n\n\t\t#判断控件是否展开,且定位\n\t\tself.dateEle = self.chooseBlockDateControl('datetimepicker')\n\n\t\tself.checkyear(int(dateInfo[0]))\n\t\tself.checkmonth(int(dateInfo[1]))\n\t\tself.checkday(dateInfo[2])","sub_path":"atdriver/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":9968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"277103197","text":"\ndef read_file() -> list:\n\tinput_file = open(\"input.txt\", \"r\")\n\tlines = input_file.readlines()\n\treturn lines\n\n# Function where we count how many times each character in the string \n# appears and we populate the corresponding dictionary\ndef count_chars_in_lines() -> list:\n\tlines = read_file()\n\tcount_chars = {}\n\tcount = 0\n\tfor line in lines:\n\t\tcount += 1\n\t\tprint(\"\\nLine \", count, \" : \", line, end = \"\")\n\t\tfor ch in line:\n\t\t\tif ch not in count_chars:\n\t\t\t\tcount_chars[ch] = 1\n\t\t\telse:\n\t\t\t\tcount_chars[ch] += 1\n\t\tprint(count_chars)\n\t\tcount_chars = {}\n\treturn count_chars\n\ndef find_occurences():\n\tcount_chars = count_chars_in_lines()\n\tresult_dict = {\"2_occur\":0, \"3_occur\": 0}\n\tif 2 in char_found.values():\n\t\tresult_dict[\"2_occur\"] += 1\n\tif 3 in char_found.values():\n\t\tresult_dict[\"3_occur\"] += 1\n\treturn (result_dict[\"2_occur\"] * result_dict[\"3_occur\"])\n\nif __name__ == \"__main__\":\n\tchecksum = find_occurences()\n\tprint(checksum)","sub_path":"2018/Day02/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"107832286","text":"from flask import g\nfrom datetime import datetime\nimport pymongo\nfrom user_service import UserService\n\n\nclass FollowService:\n def __init__(self):\n pass\n\n @staticmethod\n def follow(user_id, requester_id):\n\n if user_id == requester_id:\n raise Exception(\"You are already following yourself\")\n\n count = g.mongo.follows.count({\"follower_id\": requester_id, \"following_id\": user_id})\n\n if count != 0:\n raise Exception(\"You are already following this user\")\n\n inserted_id = g.mongo.follows.insert_one({\n \"following_id\": user_id,\n \"follower_id\": requester_id,\n \"unread\": 0,\n \"created_on\": datetime.now(),\n \"modified_on\": datetime.now()\n }).inserted_id\n\n return {\n \"id\": str(inserted_id)\n }\n\n @staticmethod\n def un_follow(user_id, requester_id):\n g.mongo.follows.delete_one({\"follower_id\": requester_id, \"following_id\": user_id})\n return {\n \"follower_id\": requester_id,\n \"following_id\": user_id\n }\n\n @staticmethod\n def get_following(requester_id, page, size):\n follows = g.mongo.follows.find({\"follower_id\": requester_id})\\\n .sort(\"unread\", pymongo.DESCENDING)\\\n .limit(size)\\\n .skip(size * page)\n\n follows = list(follows)\n\n user_ids = []\n\n for follow in follows:\n user_ids.append(follow[\"following_id\"])\n\n user_details_map = UserService.get_user_details_map_bulk(user_ids)\n\n result = []\n\n for follow in follows:\n if not follow[\"following_id\"] in user_details_map:\n continue\n\n result.append({\n \"id\": str(follow[\"_id\"]),\n \"follower_id\": follow[\"follower_id\"],\n \"following\": user_details_map[follow[\"following_id\"]],\n \"unread\": follow[\"unread\"],\n \"created_on\": follow[\"created_on\"]\n })\n\n return result\n\n @staticmethod\n def increment_unread_counts(following_id):\n g.mongo.follows.update({\"following_id\": following_id},\n {\"$inc\": {\"unread\": 1}})\n\n @staticmethod\n def reset_unread_count(following_id, follower_id):\n g.mongo.follows.update_one({\"following_id\": following_id, \"follower_id\": follower_id},\n {\"unread\": 0})\n return {\n \"following_id\": following_id,\n \"follower_id\": follower_id\n }\n","sub_path":"libgateway/follow_service.py","file_name":"follow_service.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"461753635","text":"import re\n\ndef fun(s):\n pattern = r'^([a-zA-Z0-9\\_\\-]+)@([a-zA-Z0-9]+)\\.([a-zA-Z0-9]{1,3})$'\n compiled_pattern = re.compile(pattern)\n matches = re.findall(compiled_pattern, s)\n if matches:\n return True\n return False\n\ndef filter_mail(emails):\n return list(filter(fun, emails))\n\nif __name__ == '__main__':\n n = int(input())\n emails = []\n for _ in range(n):\n emails.append(input())\n\nfiltered_emails = filter_mail(emails)\nfiltered_emails.sort()\nprint(filtered_emails)","sub_path":"validating_email_addresses_with_a_filter.py","file_name":"validating_email_addresses_with_a_filter.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"84672483","text":"#!/Users/poudel/anaconda/bin/python\n# -*- coding: utf-8 -*-#\n#\n# Author : Bhishan Poudel; Physics Graduate Student, Ohio University\n# Date : Sep 9, 2017 Sat\n# Last update :\n# Est time :\n\n# Imports\nimport numpy as np\nimport matplotlib.pyplot as plt\n\npvalues = np.logspace(-1, 0, 6)\nx = np.linspace(0, 10)\n# y = np.sin(x) * np.exp(-pval * x) # for different pvals\n\n\n\ndef plot(x):\n norm = lambda pval: (pval - pvalues[0]) / float(pvalues[-1] - pvalues[0]) + 0.1\n for pval in pvalues:\n y = np.sin(x) * np.exp(-pval * x)\n color = plt.cm.YlOrBr(norm(pval))\n plt.plot(x, y, 's', color=color)\n leg = plt.legend(['%0.1f' % v for v in pvalues], ncol=2)\n leg.set_title('decay rate')\n plt.show()\n\ndef main():\n \"\"\"Run main function.\"\"\"\n plot(x)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Tutorial_Python/plotting/sequential_colors/seq_colors2.py","file_name":"seq_colors2.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"161173822","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 7 19:24:26 2018\n\n@author: Theresa Lang\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport uth.utils as utils\n\ndef get_plot_quantities():\n \"\"\" Returns dictionary with all information needed for plotting certain\n quantities. \n \"\"\"\n return {\n 'T': {'function': lambda p: p.atmosphere['T'], \n 'label': 'temperature [K]', \n 'factor': 1,\n 'color': 'C3'},\n 'H2O': {'function': lambda p: p.atmosphere['abs_species-H2O'], \n 'label': 'H$_{2}$O [VMR]', \n 'factor': 1,\n 'color': 'C0'}, \n 'O3': {'function': lambda p: p.atmosphere['abs_species-O3'],\n 'label': 'O$_{3}$ [VMR]',\n 'factor': 1,\n 'color': 'C1'},\n 'rh': {'function': lambda p: p.rh, \n 'label': 'relative humidity [%]', \n 'factor': 1e2,\n 'color': 'C6'}, \n 'liquid_water': {'function': lambda p: p.atmosphere['scat_species-LWC-mass_density'],\n 'label': 'liquid water [kg/m$^3$]',\n 'factor': 1,\n 'color': 'C0'},\n 'ice_water': {'function': lambda p: p.atmosphere['scat_species-IWC-mass_density'],\n 'label': 'ice water [kg/m$^3$]',\n 'factor': 1,\n 'color': 'C4'},\n 'microwave_jacobian': {'function': lambda p: p.jacobian['microwave'][-1], \n 'label': 'microwave \\n jacobian [K/1]', \n 'factor': 1, \n 'color': 'C4'},\n 'microwave_jacobian_limb': {'function': lambda p: p.jacobian['microwave'][0], \n 'label': 'microwave \\n jacobian [K/1]', \n 'factor': 1, \n 'color': 'C4'},\n 'infrared_jacobian': {'function': lambda p: p.jacobian['infrared'][-1], \n 'label': 'infrared \\n jacobian [K/1]', \n 'factor': 1,\n 'color': 'C4'},\n 'net_htr': {'function': lambda p: p.radiation['net_htr'],\n 'label': 'net radiative cooling [K/day]', \n 'factor': -1,\n 'color': 'C2'},\n 'stability': {'function': lambda p: np.append(p.get_stability(), np.nan), \n 'label': 'static stability [K/hPa]',\n 'factor': 1e2,\n 'color': 'C8'},\n 'subsidence': {'function': lambda p: np.append(p.get_diabatic_subsidence(), np.nan), \n 'label': 'diabatic subsidence \\n [hPa/day]', \n 'factor': 1e-2,\n 'color': 'C9'},\n 'pressure': {'function': lambda p: p.atmosphere['p'], \n 'label': 'pressure [hPa]', \n 'factor': 1e-2},\n 'altitude': {'function': lambda p: p.atmosphere['z'], \n 'label': 'altitude [km]', \n 'factor': 1e-3}\n }\n\ndef profiles(Profile, vertical_coordinate, quantities, ax, colors=False):\n \"\"\" plots quantities of a Profile against pressure or altitude.\n \n parameters:\n Profile (Profile): profile object\n vertical_coordinate (string): 'pressure' or 'altitude'\n quantities (list of strings): list of quantities to plot. The strings \n must match the keys of the dictionary returned by \n \"get_plot_quantities\". \n \"\"\" \n to_attr = get_plot_quantities()\n \n for q, i in zip(quantities, range(len(quantities))):\n if colors:\n color = to_attr[q]['color']\n if np.logical_or(np.logical_or(q == 'H2O', q == 'O3'), q == 'stability'):\n ax[i].semilogx(to_attr[q]['function'](Profile) * to_attr[q]['factor'], \n to_attr[vertical_coordinate]['function'](Profile) * to_attr[vertical_coordinate]['factor'],\n color=color)\n else: \n ax[i].plot(to_attr[q]['function'](Profile) * to_attr[q]['factor'], \n to_attr[vertical_coordinate]['function'](Profile) * to_attr[vertical_coordinate]['factor'],\n color=color)\n else:\n if np.logical_or(q == 'H2O', q == 'O3'):\n ax[i].semilogx(to_attr[q]['function'](Profile) * to_attr[q]['factor'], \n to_attr[vertical_coordinate]['function'](Profile) * to_attr[vertical_coordinate]['factor'])\n else: \n ax[i].plot(to_attr[q]['function'](Profile) * to_attr[q]['factor'], \n to_attr[vertical_coordinate]['function'](Profile) * to_attr[vertical_coordinate]['factor'])\n \n if q == 'subsidence':\n ax[i].set_xlim(-1, 100)\n \n ax[i].set_xlabel(to_attr[q]['label'])\n if vertical_coordinate == 'pressure': \n ax[i].set_ylim(1050, 1)\n \n ax[0].set_ylabel(to_attr[vertical_coordinate]['label'])\n \n \ndef plot_atmosphere(Profile, vertical_coordinate, profile_index, save=True):\n \"\"\" Plots profiles of temperature, water vapor and ozone.\n \n Parameters:\n Profile (Profile): Profile object containing atmospheric profiles\n vertical_coordinate (string): 'pressure' or 'altitude'\n profile_index (int): Number of profile\n save (logical): True if plot should be saved\n \"\"\"\n\n quantities = ['T', 'H2O', 'O3']\n fig, ax = plt.subplots(1, len(quantities), sharey=True, figsize=(15, 10))\n profiles(Profile, vertical_coordinate, quantities, ax, colors=True)\n \n if save:\n plt.savefig('../plots/{:04}_atmosphere.pdf'.format(profile_index))\n plt.close()\n else:\n plt.show()\n \ndef plot_radiation(Profile, vertical_coordinate, profile_index, save=True):\n \"\"\" Plots profiles of RH, stability, radiative cooling, diabatic velocity\n and the jacobian fo ONE atmospheric profile.\n \n Parameters: \n Profile (Profile): Profile object containing atmospheric profiles\n vertical_coordinate (string): 'pressure' or 'altitude'\n profile_index (int): Number of profile\n save (logical): True if plot should be saved\n \"\"\"\n \n quantities = ['rh', 'stability', 'net_htr', 'subsidence', \n 'microwave_jacobian', 'infrared_jacobian']\n fig, ax = plt.subplots(1, len(quantities), sharey=True, figsize=(15, 10))\n profiles(Profile, vertical_coordinate, quantities, ax, colors=True)\n \n if save:\n plt.savefig('../plots/{:04}_radiation_jacobian.pdf'.format(profile_index))\n plt.close()\n else:\n plt.show()\n \ndef scatter(x, y, save=True):\n \"\"\" Creates a scatterplot of two variables.\n \n Parameters:\n x (vector):\n y (vector):\n save (logical): True if plot should be saved\n \"\"\"\n fig, ax = plt.subplots() \n ax.scatter(x, y, s=5)\n plt.show()\n \ndef tb_uth(tb, uth, color='C1', a=np.nan, b=np.nan, cbtext='Mean temperature [K]', save=True):\n \"\"\"\n Plots ln(UTH) against brigthness temperature. \n \n Parameters:\n tb: brightness temperatures\n uth: uths\n color: color (string or variable)\n r: correlation coefficient\n cbtext: text for colorbar\n \"\"\"\n plt.rcParams.update({'font.size': 25})\n fig, ax = plt.subplots(figsize=(15, 10))\n im = ax.scatter(tb, np.log(uth), c=color, s=30)\n ax.set_xlabel('brightness temperature [K]')\n ax.set_ylabel('ln(UTH)')\n ax.plot(tb, a + b*tb, color='C3')\n #ax.text(250, 1, 'r = {}'.format(np.round(r, 3)))\n #ax.set_ylim(-0.02, 0.02)\n ax.set_ylim(-5, 3)\n \n if np.logical_not(isinstance(color, str)):\n cb = fig.colorbar(im)\n cb.set_clim(0.9, 1.1)\n cb.set_label(cbtext)\n\n plt.show()\n \ndef plot_residuals(tb, uth, a, b, color, cbtext):\n \n plt.rcParams.update({'font.size': 25})\n residuals = np.log(uth) - (a + tb*b)\n \n fig, ax = plt.subplots(figsize=(15, 10))\n im = ax.scatter(tb, residuals, c=color, s=5)\n ax.set_xlabel('brightness temperature [K]')\n ax.set_ylabel('deviation from linear regression')\n ax.set_ylim(-3, 2)\n \n if np.logical_not(isinstance(color, str)):\n cb = fig.colorbar(im)\n# cb.set_clim(0.8, 1.2)\n cb.set_label(cbtext)\n \n plt.show()\n \ndef plot_old_definition(Profile, spectral_range, profile_index, save=True):\n \"\"\" Plot illustration of the old definition of the UTH.\n Parameters:\n Profile: profile object\n Spectral range (string): 'infrared' or 'microwave'\n profile index (numerical): profile number\n \"\"\"\n \n jacobian = Profile.jacobian[spectral_range][0]\n weights = np.abs(jacobian / np.sum(jacobian)) * 10\n rh = Profile.rh\n pressure = Profile.atmosphere['p']\n\n plt.rcParams.update({'font.size': 15})\n fig, ax = plt.subplots(1, 2, figsize=(10, 10))\n ax[0].plot(rh * 1e2, pressure * 1e-2, color='C0')\n ax[1].plot(jacobian, pressure * 1e-2, color='C1')\n \n ax[0].set_ylim(1050, 10)\n ax[0].set_xlim(0, 1e2)\n ax[1].set_ylim(1050, 10)\n ax[1].set_xlim(-1.5, 0.5)\n \n ax[0].set_ylabel('Pressure [hPa]')\n ax[0].set_xlabel('RH [%]')\n #ax[1].set_xlabel('{} \\n jacobian [K / 1]'.format(spectral_range))\n ax[1].set_xlabel('AMSU-B channel 18 \\n humidity jacobian [K / 1]'.format(spectral_range))\n \n for lev in range(len(pressure)-1):\n ax[0].fill_between([0, 1e2], pressure[lev] * 1e-2, \\\n pressure[lev+1] * 1e-2, color='C1', alpha = weights[lev], lw=0)\n \n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n \n if save:\n plt.savefig('../plots/{:04}_uth_old.pdf'.format(profile_index))\n plt.close()\n else:\n plt.show()\n \ndef plot_new_definition(Profile, profile_index, save=True):\n \"\"\" Plot illustration of the new definition of the UTH.\n Parameters:\n Profile: profile object\n profile index (numerical): profile number\n \"\"\"\n rh = Profile.rh\n pressure = Profile.atmosphere['p']\n subsidence = Profile.get_diabatic_subsidence()\n p_gradient_subsidence = Profile.get_gradient_subsidence()\n \n plt.rcParams.update({'font.size': 15})\n fig, ax = plt.subplots(1, 2, figsize=(10, 10))\n ax[0].plot(rh * 1e2, pressure * 1e-2, color='C0')\n ax[1].plot(subsidence * 1e-2, pressure[:-1] * 1e-2, color='C3')\n \n ax[1].plot(np.array([-10, 150]), np.ones(2) * p_gradient_subsidence * 1e-2, color='C3', alpha=0.4, linestyle='--')\n ax[0].fill_between([-10, 150], p_gradient_subsidence * 1e-2 + 300, \\\n p_gradient_subsidence * 1e-2, alpha=0.4, color='C3', lw=0)\n \n ax[0].set_ylim(1050, 10)\n ax[0].set_xlim(0, 1e2)\n ax[1].set_ylim(1050, 10)\n ax[1].set_xlim(-10, 150)\n \n ax[0].set_ylabel('Pressure [hPa]')\n ax[0].set_xlabel('RH [%]')\n ax[1].set_xlabel('diabatic subsidence [hPa/day]')\n \n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n \n if save:\n plt.savefig('../plots/{:04}_uth_new.pdf'.format(profile_index))\n plt.close()\n else:\n plt.show()\n \ndef plot_new_definition_wvcolumn(Profile, iwv_threshold_1, iwv_threshold_2, profile_index, save=True):\n \"\"\" Plot illustration of the new definition of the UTH using the .\n Parameters:\n Profile: profile object\n profile index (numerical): profile number\n \"\"\"\n rh = Profile.rh\n pressure = Profile.atmosphere['p']\n temp = Profile.atmosphere['T']\n altitude = Profile.atmosphere['z']\n H2O = Profile.atmosphere['abs_species-H2O']\n iwv_above = utils.atmosphere.calc_iwv_profile(pressure, temp, altitude, H2O)\n \n p_uth_start = Profile.get_iwv_level(iwv_threshold_1)\n p_uth_end = Profile.get_iwv_level(iwv_threshold_2)\n \n plt.rcParams.update({'font.size': 15})\n fig, ax = plt.subplots(1, 2, figsize=(10, 10))\n ax[0].plot(rh * 1e2, pressure * 1e-2, color='C0')\n ax[1].plot(iwv_above, pressure * 1e-2, color='C2')\n \n ax[1].plot(np.array([0, 60]), np.ones(2) * p_uth_start * 1e-2, color='C2', alpha=0.4, linestyle='--')\n ax[1].plot(np.array([0, 60]), np.ones(2) * p_uth_end * 1e-2, color='C2', alpha=0.4, linestyle='--')\n ax[0].fill_between([-10, 150], p_uth_start * 1e-2, \\\n p_uth_end * 1e-2, alpha=0.4, color='C2', lw=0)\n \n ax[0].set_ylim(1050, 10)\n ax[0].set_xlim(0, 1e2)\n ax[1].set_ylim(1050, 10)\n #ax[1].set_xlim(0, 60)\n ax[0].set_ylabel('Pressure [hPa]')\n ax[0].set_xlabel('RH [%]')\n ax[1].set_xlabel('IWV above [kg/m$^2$]')\n \n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n \n if save:\n plt.savefig('../plots/{:04}_uth_new.pdf'.format(profile_index))\n plt.close()\n else:\n plt.show()\n \n \n \n \n \n \n \n\n ","sub_path":"radiative_transfer/uth/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":13049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"649545585","text":"\"\"\"\nChaining Multiple Conditions\n\n\nThe university gives students discounts on tuition fees depending on their performance:\n90-100 => 50%\n80-89 => 30%\n70-79 => 10%\n0-69 => 0%\nWrite a program that will take the scores from the first and second semesters, then calculate the average score, and output the discount, depending on the score.\n\nSample Input\n67\n83\n\nSample Output\n10\n\nExplanation\nAverage of 67 and 83 is 75, which is in range of 70 to 79 and gets a 10% discount. Do not include the % symbol in the output.\n\"\"\"\n\nscore1 = int(input())\nscore2 = int(input())\nif (score1+score2)//2 >= 90:\n print(50)\nelif (score1+score2)//2 >=80 and (score1+score2)//2 <=89:\n print(30)\nelif (score1+score2)//2 >= 70 and (score1+score2)//2 <= 79:\n print(10)\nelif (score1+score2)//2 <= 69:\n print(0)","sub_path":"3. Control Structures/TuitionDiscounts.py","file_name":"TuitionDiscounts.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"29169373","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nrsa_conf.py\n\n\"\"\"\nimport shlex as sh\n\n\ndef config():\n f = open('rsa.conf','r')\n for line in f:\n items = sh.split(line)\n print(items[0]+\":\"+items[1])\n \n \n \n \n \n","sub_path":"rsa_conf.py","file_name":"rsa_conf.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"578557452","text":"# Dependencies\nimport requests as req\nimport json\nimport zipcodes\nimport pandas as pd\nimport numpy as np\nimport http.client\nfrom datetime import datetime\nimport time as time\nimport os\nfrom RE_functions import get_real_estate_data\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy import Column, Integer, String, Float, Text\n\n\n###------------------------------------------###\n### START GET MARKET HEALTH\ndef get_market_health_and_extremes(zip, Market_Health, Home_sales, Rentals, session):\n market_dict = {}\n results = session.query( Market_Health.market_health_index).filter(Market_Health.zip_code == zip).all()\n if len(results) > 0:\n for mhi in results:\n market_health_index = mhi.market_health_index\n market_dict['market_health_index'] = market_health_index\n else:\n #no market health data for input zip code; store 0 as a N/A value\n market_dict[\"market_health_index\"] = 0\n print(\"Market Health: %s\" % market_dict['market_health_index'])\n\n results = session.query(Home_sales.s2017_12).all()\n all_homes = pd.DataFrame(results, columns=['2017_12'])\n\n results = session.query(Rentals.r2017_12).all()\n all_rentals = pd.DataFrame(results, columns=['2017_12'])\n\n #get median home values and rental prices\n median_home_value = all_homes['2017_12'].median()\n median_rental_price = all_rentals['2017_12'].median()\n\n market_dict[\"median_home_value\"] = median_home_value\n market_dict[\"median_rental_price\"] = median_rental_price\n return market_dict\n### END GET GET MARKET HEALTH\n###------------------------------------------###\n\n###------------------------------------------###\n### START GET CENSUS DATA\n# takes in a zip which is converted to lat/long for census block query to return a County\n# census popuations are pulled by country from 2010 through 2016\n# each year holds a result along with a column for the proceeding year's difference\ndef census_data(zip,zip_latlon, census, session):\n\n sel = [zip_latlon.zip_code, zip_latlon.lat, zip_latlon.lon]\n results = session.query(*sel).\\\n filter(zip_latlon.zip_code ==zip)\n zip_data = {}\n for result in results:\n zip_data['ZIP_CODE'] = result[0]\n zip_data['LAT'] = result[1]\n zip_data['LON'] = result[2]\n lat = zip_data['LAT']\n lng = zip_data['LON']\n\n cen_block_url = ('http://data.fcc.gov/api/block/find?format=json&latitude=%s&longitude=%s&showall=true' % (lat, lng))\n lat_lon_county = req.get(cen_block_url).json()\n state_name = lat_lon_county['State']['name']\n if state_name != 'District of Columbia':\n county_name = lat_lon_county['County']['name']+ ' County'\n else:\n county_name = lat_lon_county['County']['name']\n\n\n print(state_name)\n sel = [census.state, census.county, census.pop_2010,census.pop_2011,census.pop_2012,\n census.pop_2013, census.pop_2014, census.pop_2015,census.pop_2016]\n county_census_pop = session.query(*sel).\\\n filter(census.county == county_name)\n\n # Match County and State name to retrieve population information from 2010 through 2016\n pop_data = {}\n def diff (col1, col2):\n d = col2 - col1\n e = round(((d/col1) * 100), 2)\n return e\n for row in county_census_pop:\n pop_data['STATE'] = row[0]\n pop_data['COUNTY'] = row[1]\n pop_data['POPULATION_2010'] = row[2]\n pop_data['POPULATION_2011'] = row[3]\n pop_data['POPULATION_2012'] = row[4]\n pop_data['POPULATION_2013'] = row[5]\n pop_data['POPULATION_2014'] = row[6]\n pop_data['POPULATION_2015'] = row[7]\n pop_data['POPULATION_2016'] = row[8]\n pop_data['diff_2010_2011'] = diff(pop_data['POPULATION_2010'], pop_data['POPULATION_2011'])\n pop_data['diff_2011_2012'] = diff(pop_data['POPULATION_2011'], pop_data['POPULATION_2012'])\n pop_data['diff_2012_2013'] = diff(pop_data['POPULATION_2012'], pop_data['POPULATION_2013'])\n pop_data['diff_2013_2014'] = diff(pop_data['POPULATION_2013'], pop_data['POPULATION_2014'])\n pop_data['diff_2014_2015'] = diff(pop_data['POPULATION_2014'], pop_data['POPULATION_2015'])\n pop_data['diff_2015_2016'] = diff(pop_data['POPULATION_2015'], pop_data['POPULATION_2016'])\n pop_data['diff_2010_2016'] = diff(pop_data['POPULATION_2010'], pop_data['POPULATION_2016'])\n return pop_data\n\n### END GET CENSUS DATA\n###------------------------------------------###\n\n###------------------------------------------###\n### START GET WALKABILITY DATA\ndef get_walk(zip, zip_latlon,session):\n sel = [zip_latlon.zip_code, zip_latlon.lat, zip_latlon.lon]\n results = session.query(*sel).\\\n filter(zip_latlon.zip_code ==zip)\n zip_data = {}\n for result in results:\n zip_data['ZIP_CODE'] = result[0]\n zip_data['LAT'] = result[1]\n zip_data['LON'] = result[2]\n\n lat = zip_data['LAT']\n lng = zip_data['LON']\n\n ##### When need to execute this, put back the api key #####\n walk_api_key = \"\"\n\n walk_url = \"http://api.walkscore.com/score?format=json&\"\n # Build query URL\n query_url = walk_url + \"&lat=\" + str(lat) + \"&lon=\" + str(lng) + \"&transit=1&bike=1\" + \"&wsapikey=\" + walk_api_key\n walk_response = req.get(query_url).json()\n\n # Get the neighborhood data from the response\n walk_score = walk_response['walkscore']\n walk_description =walk_response['description']\n try:\n bike_score = walk_response['bike']['score']\n bike_description = walk_response['bike']['description']\n except:\n bike_score = 0\n bike_description = \"\"\n walk_dict = {\n \"walk_score\": walk_score,\n \"walk_description\": walk_description,\n \"bike_score\": bike_score,\n \"bike_description\": bike_description\n }\n return walk_dict\n\n\n### END GET WALK DATA\n###------------------------------------------###\n\n###------------------------------------------###\n### START GET SCHOOLS FUNCTION\n### CALLS ONBOARD API FOR RADIUS OF 5 MILES TO GATHER SCHOOLS IN THE AREA\n\ndef get_schools(zip, zip_latlon, session):\n\n sel = [zip_latlon.zip_code, zip_latlon.lat, zip_latlon.lon]\n results = session.query(*sel).\\\n filter(zip_latlon.zip_code ==zip)\n zip_data = {}\n for result in results:\n zip_data['ZIP_CODE'] = result[0]\n zip_data['LAT'] = result[1]\n zip_data['LON'] = result[2]\n lat = zip_data['LAT']\n lng = zip_data['LON']\n\n private = 0\n public = 0\n cath = 0\n other = 0\n\n page_size = 50\n #Onboard API Key\n\n #when need to execute, add the API key\n onboard_api_key = \"\"\n\n\n conn = http.client.HTTPSConnection(\"search.onboard-apis.com\")\n school_url = \"/propertyapi/v1.0.0/school/snapshot?\"\n headers = {\n 'accept': \"application/json\",\n 'apikey': onboard_api_key,\n }\n #RADIUS SET TO DEFAULT OF 5 MILES\n point = \"latitude=\" + str(lat) + \"&longitude=\" + str(lng) + \"&radius=5\"\n query_url = school_url + point + \"&pageSize=\" + str(page_size)\n\n #request the first page of school data\n conn.request(\"GET\", query_url, headers=headers)\n res = conn.getresponse()\n resp = json.loads(res.read())\n #counts for types of schools\n private = 0\n public = 0\n cath = 0\n other = 0\n\n #loop through and count up private and public schools\n total_schools = resp['status']['total']\n more_schools = True\n schools_to_get = total_schools\n page = 1\n #print(\"total schools: % s\" % total_schools)\n while more_schools:\n #determine how many results to process\n if schools_to_get - page_size >= 0:\n max_s = page_size\n schools_to_get = schools_to_get - page_size\n else:\n max_s = schools_to_get\n for i in range(0, max_s):\n #track number of types of schools\n sch_type = resp['school'][i]['School']['Filetypetext']\n if sch_type == \"PRIVATE\":\n private = private + 1\n elif sch_type == \"PUBLIC\":\n public = public + 1\n elif sch_type == \"CATHOLIC\":\n cath = cath + 1\n else:\n other = other + 1\n if total_schools - (private+public+cath+other) > 0:\n #get the next page of data\n page = page + 1\n query_url = school_url + point + \"&pageSize=\" + str(page_size) + \"&page=\"+ str(page)\n conn.request(\"GET\", query_url, headers=headers)\n res = conn.getresponse()\n resp = json.loads(res.read())\n resp\n else:\n more_schools = False\n #DICT USED IN THE FLASK APP TO JSONIFY\n school_dict = {\n \"private_school\": private,\n \"public_school\": public,\n \"catholic_school\": cath,\n \"other_school\": other\n }\n return school_dict\n### END GET SCHOOLS FUNCTION\n###------------------------------------------###\n#---------------------------------------------------------------#\n#Query Community API and return the results as JSON object 'resp'\n\n#API URL: https://developer.onboard-apis.com/docs\n#---------------------------------------------------------------#\n\n###------------------------------------------###\n### START GET POIs\ndef barfinder(zip, zip_latlon, session):\n sel = [zip_latlon.zip_code, zip_latlon.lat, zip_latlon.lon]\n results = session.query(*sel).\\\n filter(zip_latlon.zip_code ==zip)\n zip_data = {}\n for result in results:\n zip_data['ZIP_CODE'] = result[0]\n zip_data['LAT'] = result[1]\n zip_data['LON'] = result[2]\n lat = zip_data['LAT']\n lng = zip_data['LON']\n\n # Google API Key\n #when need to run this again, add the API key\n gkey = \"\"\n\n # types of points of interest we care about\n target_types = [\"liquor_store\", \"gym\", \"park\", \"shopping_mall\", \"grocery_or_supermarket\", \"movie_theater\"]\n\n #create a blank dictionary to store results\n poi_results = {}\n\n # loop through each target type and gather the number of each nearby\n for target in target_types:\n\n # set default values\n count = 0\n x = True\n\n # while loop that uses google radar to gather our numbers\n while x == True:\n\n # take in latitude and longitude, set the search radius to 5 miles (8k meters)\n target_area = {\"lat\": lat, \"lng\": lng}\n target_radius = 8000\n\n # create the target urls and use requests to gather the necessary data\n target_url = \"https://maps.googleapis.com/maps/api/place/radarsearch/json\" \\\n \"?types=%s&location=%s,%s&radius=%s&key=%s\" % (\n target, target_area[\"lat\"], target_area[\"lng\"], target_radius,\n gkey)\n\n places_data = req.get(target_url).json()\n\n # use the len function to find the count of results\n numbers = len(places_data[\"results\"])\n\n # use a series of if statments to check if we returned results. Run a second time if no results showed up as a check\n if numbers > 0:\n poi_results[target.replace(\"_\", \"\").title()] = numbers\n x = False\n elif count == 1:\n x = False\n else:\n count += 1\n\n # return the results\n if \"Liquorstore\" not in poi_results:\n poi_results[\"Liquorstore\"] = 0\n if \"Gym\" not in poi_results:\n poi_reuslts[\"Gym\"] = 0\n if \"Park\" not in poi_results:\n poi_results[\"Park\"] = 0\n if \"Shoppingmall\" not in poi_results:\n poi_results[\"Shoppingmall\"] = 0\n if \"Groceryorsupermarket\" not in poi_results:\n poi_results[\"Groceryorsupermarket\"] = 0\n if \"Movietheater\" not in poi_results:\n poi_results[\"Movietheater\"] = 0\n print(poi_results)\n return poi_results\n\n### END GET POIs\n###------------------------------------------###\n\n###------------------------------------------###\n### START GET COMMUNITY DATA FUNCTION\n### CALLS ONBOARD API FOR: ge demographics / avg Jan and Jun temps / crime rate / sales tax\ndef get_community_data(zip, census, zip_latlon, Market_Health, Home_sales, Rentals, session):\n\n #Onboard API Key\n\n #when executing this, add the real API key\n onboard_api_key = \"\"\n\n conn = http.client.HTTPSConnection(\"search.onboard-apis.com\")\n headers = {\n 'accept': \"application/json\",\n 'apikey': onboard_api_key,\n }\n\n community_url = \"/communityapi/v2.0.0/Area/Full/?\"\n queries=\"AreaId=ZI\"+zip\n query_url = community_url + queries\n conn.request(\"GET\", query_url, headers=headers)\n res = conn.getresponse()\n resp = json.loads(res.read())\n community_dict = {}\n community_dict['crime'] = resp['response']['result']['package']['item'][0]['crmcytotc']\n community_dict['sales_tax']= resp['response']['result']['package']['item'][0]['salestaxrate']\n community_dict['avg_jan'] = resp['response']['result']['package']['item'][0]['tmpavejan']\n community_dict['avg_jul'] = resp['response']['result']['package']['item'][0]['tmpavejul']\n age_columns = ['age00_04','age05_09','age10_14','age15_19','age20_24','age25_29','age30_34','age35_39','age40_44',\n 'age45_49','age50_54','age55_59','age60_64','age65_69','age70_74','age75_79','age80_84','agegt85']\n labels = []\n age_groups = []\n age_group_values = []\n county_name = resp['response']['result']['package']['item'][0]['countyname']\n for x in age_columns:\n group_name = x\n age_groups.append(x)\n route = resp['response']['result']['package']['item'][0][x]\n age_group_values.append(int(route))\n label = x.replace('age','').replace('_','-').replace('gt85',' >=85') #format labels\n labels.append(label)\n\n # Create DF with summarized age groups\n age_by_zip = {\"Groups\": age_groups, \"Count\": age_group_values}\n age_by_zip_df = pd.DataFrame(age_by_zip)\n community_dict['_0_09'] = age_by_zip_df[0:2]['Count'].sum()\n community_dict['_10_19'] = age_by_zip_df[2:4]['Count'].sum()\n community_dict['_20_29'] = age_by_zip_df[4:6]['Count'].sum()\n community_dict['_30_39'] = age_by_zip_df[6:8]['Count'].sum()\n community_dict['_40_49'] = age_by_zip_df[8:10]['Count'].sum()\n community_dict['_50_59'] = age_by_zip_df[10:12]['Count'].sum()\n community_dict['_60_69'] = age_by_zip_df[12:14]['Count'].sum()\n community_dict['_70_plus'] = age_by_zip_df[14:18]['Count'].sum()\n school = get_schools(zip, zip_latlon, session)\n market = get_market_health_and_extremes(zip, Market_Health, Home_sales, Rentals, session)\n walk = get_walk(zip, zip_latlon, session)\n poi_data = barfinder(zip, zip_latlon, session)\n\n community_dict['private_school'] = school['private_school']\n community_dict['public_school'] = school['public_school']\n community_dict['catholic_school'] = school['catholic_school']\n community_dict['other_school'] = school['other_school']\n community_dict['median_home_value'] = market['median_home_value']\n community_dict['median_rental_price'] = market['median_rental_price']\n community_dict['market_health_index'] = market['market_health_index']\n community_dict['walk_score'] = walk['walk_score']\n community_dict['walk_description'] = walk['walk_description']\n community_dict['bike_score'] = walk['bike_score']\n community_dict['bike_description'] = walk['bike_description']\n #real_estate = get_real_estate(zip, Home_sales, Rentals, session)\n\n #note: will need to jsonify REdata and re_dict\n REdata, re_dict = get_real_estate_data(zip, Home_sales, Rentals, session)\n census_dict = census_data(zip,zip_latlon, census, session)\n #Community dict used for FLASK app to jsonify\n return [community_dict, poi_data, census_dict, REdata, re_dict]\n### END GET COMMUNITY DATA FUNCTION\n###------------------------------------------###\n###------------------------------------------###\n### START GET CITY SLIP HISTORY DATA FUNCTION\n#Mirrors the city_slip sqlite database and presents all records stored\ndef cityslip_history():\n Base = declarative_base()\n engine = create_engine(\"sqlite:///city_slip.sqlite\")\n session = Session(engine)\n city_query = engine.execute(\"SELECT * FROM city_slip\").fetchall()\n print(city_query)\n citySlip_records = []\n for c in city_query:\n record_dict = {}\n record_dict['00_record'] = c[0]\n record_dict['01_zip_code'] = c[1]\n record_dict['02_city'] = c[2]\n record_dict['03_state'] = c[3]\n record_dict['04_county'] = c[4]\n record_dict['05_score_date'] = c[5]\n record_dict['06_avg_home_value'] = c[6]\n record_dict['07_avg_rent'] = c[7]\n record_dict['08_re_market_health'] = round(c[8])\n record_dict['09_avg_winter_temp'] = c[9]\n record_dict['10_avg_summer_temp'] = c[10]\n record_dict['11_total_schools'] = c[11]\n record_dict['12_total_pois'] = c[12]\n record_dict['13_pop_growth'] = c[13]\n record_dict['14_sales_tax_rate'] = c[14]\n record_dict['15_walkability'] = c[15]\n record_dict['16_crime_risk'] = c[16]\n if c[17] < 1:\n record_dict['17_score'] = round(c[17] * 100)\n else:\n record_dict['17_score'] = round(c[17])\n citySlip_records.append(record_dict)\n\n return citySlip_records\n\n### END GET CITY SLIP HISTORY DATA FUNCTION\n###------------------------------------------###\n","sub_path":"census_funcs.py","file_name":"census_funcs.py","file_ext":"py","file_size_in_byte":17462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"250024536","text":"from Crypto.Cipher import AES\n\nfrom com.sbk.hex.func import str_xor\n\n\ndef cbc_decrypt(key, cypher_text, block_size=16):\n k = bytes.fromhex(key)\n ct = bytes.fromhex(cypher_text)\n iv = ct[:block_size]\n ct1 = ct[block_size:]\n obj = AES.new(k, AES.MODE_CBC, iv)\n padded_str = obj.decrypt(ct1)\n padding_amount = ord(padded_str[len(padded_str) - 1:])\n return padded_str[:-padding_amount]\n\n\ndef my_cbc_decrypt(key, cipher_text, block_size=16):\n cipher_text_blocks = [cipher_text[i:i + (block_size * 2)] for i in range(0, len(cipher_text), (block_size * 2))]\n cipher_text_blocks_bytes = list(map(lambda x: bytes.fromhex(x), cipher_text_blocks))\n k = bytes.fromhex(key)\n\n pt = \"\"\n\n ln = len(cipher_text_blocks_bytes)\n for i in reversed(range(1, ln)):\n current_block = cipher_text_blocks_bytes[i]\n previous_block = cipher_text_blocks_bytes[i - 1]\n cipher = AES.new(k, AES.MODE_ECB).decrypt(current_block)\n plaintext = str_xor(cipher, previous_block)\n pt = plaintext + pt\n\n padding_amount = ord(pt[-1:])\n\n return pt[:-padding_amount]\n\n","sub_path":"com/sbk/cbc/cbc_decrypt.py","file_name":"cbc_decrypt.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"213864532","text":"'''\n19. 删除链表的倒数第N个节点\n执行用时: 48 ms, 在Remove Nth Node From End of List的Python3提交中击败了99.69% 的用户\n内存消耗: 6.4 MB, 在Remove Nth Node From End of List的Python3提交中击败了95.75% 的用户\n\n'''\n#两次遍历的方法\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def removeNthFromEnd(self, head: 'ListNode', n: 'int'):\n num = 0\n r = head\n s = head\n while (r != None):\n num += 1\n r = r.next\n if num == 1: # 意思其实就是num=1,n=1\n return None\n\n if num == n:\n head = head.next\n return head\n\n if num > n:\n i = 1\n while (i < num - n):\n s = s.next\n i += 1\n term = s.next\n s.next = term.next\n return head\n\n\n\n\n","sub_path":"learn_notes/leetcode/delete_element_in_lined_list.py","file_name":"delete_element_in_lined_list.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508129510","text":"import sublime_plugin\n\nclass RunBuildCvsCommand(sublime_plugin.WindowCommand):\n # helper\n # * for setting the build_system\n # * running build\n # * and then resetting the build_system to automatic\n def run(self, build_system, build_variant):\n self.window.run_command( \"set_build_system\", {\"file\": build_system } )\n self.window.run_command( \"build\", {\n \"variant\": build_variant\n })\n self.window.run_command(\"set_build_system\", {\"file\":\"\"}) # Set build_system to *automatic*\n\nclass QuickCvsCommitBuildTargetCommand(sublime_plugin.WindowCommand):\n def run(self, cmd = [], file_regex = \"\", line_regex = \"\", working_dir = \"\", encoding = \"utf-8\", env = {}, path = \"\", shell = False):\n self.execDict = {\n \"path\" : path,\n \"shell\" : shell,\n \"cmd\" : cmd,\n \"file_regex\" : file_regex,\n \"line_regex\" : line_regex,\n \"working_dir\" : working_dir,\n \"encoding\" : encoding,\n \"env\" : env\n }\n self.window.show_input_panel(\"Commit message\", \"\\\"\" + self.execDict[\"cmd\"][3] + \":\\\"\", self.on_done, None, None)\n def on_done(self, message):\n self.execDict[\"cmd\"][3] = message\n self.window.run_command('exec', self.execDict)","sub_path":"QuickCVS.py","file_name":"QuickCVS.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"126664213","text":"# Function count_upper takes a file-like object and returns the number of\n# capital ASCII letters it contains\ndef count_upper(file):\n count = 0\n for l in file.read():\n if l.isupper():\n count += 1\n file.close()\n return count\n\n\nif __name__ == '__main__':\n file = open(\"text.txt\", 'r')\n print(count_upper(file))\n","sub_path":"Python1/08/count_upper.py","file_name":"count_upper.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"166216122","text":"\"\"\"\nLoads the dialogue corpus, builds the vocabulary\n\"\"\"\n\nimport numpy as np\nimport nltk # For tokenize\nfrom tqdm import tqdm # Progress bar\nimport pickle # Saving the data\nimport math # For float comparison\nimport os # Checking file existance\nimport random # for random number\nimport pandas as pd # data analysis tool\n\nclass TextData:\n def __init__(self, args):\n self.args = args\n self.data_path = 'data/train.csv'\n self.samples_path = 'data/samples.pkl'\n\n self.padToken = -1 # Padding\n self.goToken = -1 # Start of sequence\n self.eosToken = -1 # End of sequence\n self.unknownToken = -1 # Word dropped from vocabulary\n\n self.trainingSamples = [] # 2d array containing each question and his answer [[input,target]]\n\n self.word2id = {}\n self.id2word = {} # For a rapid conversion\n\n self.loadCorpus()\n\n # Plot some stats:\n print('Loaded: {} words, {} training samples'.format(len(self.word2id), len(self.trainingSamples)))\n\n def loadCorpus(self):\n \"\"\"Load/create the conversations data\n Args:\n dirName (str): The directory where to load/save the model\n \"\"\"\n datasetExist = False\n if os.path.exists(self.samples_path):\n datasetExist = True\n\n if not datasetExist: # First time we load the database: creating all files\n print('Training samples not found. Creating dataset...')\n # Corpus creation\n self.createCorpus(self.data_path)\n\n # Saving\n print('Saving dataset...')\n self.saveDataset() # Saving tf samples\n else:\n print('Loading dataset from ...')\n self.loadDataset()\n\n assert self.padToken == 0\n\n def createCorpus(self, data_path):\n \"\"\"Extract all data from the given vocabulary\n \"\"\"\n # Add standard tokens\n self.padToken = self.getWordId(\"\") # Padding (Warning: first things to add > id=0 !!)\n self.goToken = self.getWordId(\"\") # Start of sequence\n self.eosToken = self.getWordId(\"\") # End of sequence\n self.unknownToken = self.getWordId(\"\") # Word dropped from vocabulary\n\n # Remove __eou__ and __eot__ tags\n conversation = self.removeTag(data_path)\n\n # Iterate over rows in conversation dataframe\n\n for index in tqdm(range(1,len(conversation))):\n inputWords = self.extractText(nltk.word_tokenize(conversation.iloc[index]['Context'].decode('utf8')))\n targetWords = self.extractText(nltk.word_tokenize(conversation.iloc[index]['Utterance'].decode('utf8')))\n\n if inputWords and targetWords:\n self.trainingSamples.append([inputWords, targetWords])\n\n\n def extractText(self, words):\n wordIDs = []\n for word in words:\n wordIDs.append(self.getWordId(word))\n return wordIDs\n\n def getWordId(self, word, create=True):\n word = word.lower()\n wordID = self.word2id.get(word,-1)\n\n if wordID==-1:\n if create:\n wordID = len(self.word2id)\n self.word2id[word] = wordID\n self.id2word[wordID] = word\n else:\n wordID = self.unknownToken\n\n return wordID\n\n def removeTag(self, data_path):\n df = pd.read_csv('data/train.csv',header = 0, usecols = [0,1])\n df['Context'] = df['Context'].str.replace('__eou__','')\n df['Context'] = df['Context'].str.replace('__eot__','')\n df['Utterance'] = df['Utterance'].str.replace('__eou__','')\n df['Utterance'] = df['Utterance'].str.replace('__eot__','')\n return df\n\n def saveDataset(self):\n \"\"\"Save samples to file\n Args:\n dirName (str): The directory where to load/save the model\n \"\"\"\n\n with open(self.samples_path, 'wb') as handle:\n data = { # Warning: If adding something here, also modifying loadDataset\n \"word2id\": self.word2id,\n \"id2word\": self.id2word,\n \"trainingSamples\": self.trainingSamples\n }\n pickle.dump(data, handle, -1) # Using the highest protocol available\n\n def loadDataset(self):\n \"\"\"Load samples from file\n Args:\n dirName (str): The directory where to load the model\n \"\"\"\n with open(self.samples_path, 'rb') as handle:\n data = pickle.load(handle) # Warning: If adding something here, also modifying saveDataset\n self.word2id = data[\"word2id\"]\n self.id2word = data[\"id2word\"]\n self.trainingSamples = data[\"trainingSamples\"]\n\n self.padToken = self.word2id[\"\"]\n self.goToken = self.word2id[\"\"]\n self.eosToken = self.word2id[\"\"]\n self.unknownToken = self.word2id[\"\"] # Restore special words\n'''\ndf = pd.read_csv('data/test.csv',header = 0, usecols = [0,1])\ndf['Context'] = df['Context'].str.replace('__eou__','')\ndf['Context'] = df['Context'].str.replace('__eot__','')\ndf['Utterance'] = df['Utterance'].str.replace('__eou__','')\ndf['Utterance'] = df['Utterance'].str.replace('__eot__','')\n#print df.head()\nprint df.iloc[1]['Context']\nprint nltk.word_tokenize(df.iloc[1]['Context'])'''\n\nt = TextData('play')\nt.loadCorpus()\n","sub_path":"textdata.py","file_name":"textdata.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"194628058","text":"#from __future__ import absolute_import\n\nfrom bake.path import *\nfrom bake.task import *\nfrom scheme import *\n\ntry:\n import virtualenv\nexcept ImportError:\n virtualenv = None\n\nclass VirtualEnvTask(Task):\n supported = bool(virtualenv)\n\nclass CreateVirtualEnv(VirtualEnvTask):\n name = 'virtualenv.create'\n description = 'creates a virtual environment'\n parameters = {\n 'distribute': Boolean(description='use distribute', default=True),\n 'executable': Text(description='path to virtualenv script', default='virtualenv'),\n 'isolated': Boolean(description='isolate from site packages', default=False),\n 'path': Path(description='path to destination directory', required=True),\n }\n\n def run(self, runtime):\n options = [self['executable']]\n if self['distribute']:\n options.append('--distribute')\n if self['isolated']:\n options.append('--no-site-packages')\n\n options.append(self['path'])\n runtime.shell(options)\n","sub_path":"bake/lib/virtualenv.py","file_name":"virtualenv.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"50876870","text":"from __future__ import print_function\n# import utils\nimport unittest\n\nimport caffe\nfrom caffe import layers as L, params as P, to_proto\nfrom caffe.proto import caffe_pb2 as v2\n\nimport tempfile\nimport os\nimport uuid\n\n\ndef getSGDSolver(solver):\n \"\"\"Serialize them to files, and loads them from file,\n since PyCaffe doesn't export the constructor of SGDSolver\n from SolverParameter (see _caffe.cpp:306).\n\n This is a hack, using files instead of memory is sad.\n\n For some reason, there are errors while parsing when the file\n is opened in 'w+' mode. So the function opens files in 'w+' mode,\n writes, closes, opens again in 'r' mode, reads, and closes.\n\n Writes solver and network in a unique file.\"\"\"\n\n try:\n # WRITE\n solverName = \"prototxt/\" + \"tmp_\" + uuid.uuid4().hex + \"_pycaffe_to_caffe_solver.sh\"\n solverFileTemp = open(solverName, \"w+\")\n solverFileTemp.write(str(solver))\n solverFileTemp.close()\n\n # READ\n solverFileTemp = open(solverName, \"r\")\n sgdSolver = caffe.SGDSolver(solverFileTemp.name)\n solverFileTemp.close()\n\n return sgdSolver, sgdSolver.net\n except Error as e:\n print(\"Could not write/parse to/from file.\", e.strerror)\n\n\ndef getSolverNet(solver, net):\n \"\"\"Serialize them to files, and loads them from file,\n since PyCaffe doesn't export the constructor of SGDSolver\n from SolverParameter (see _caffe.cpp:306).\n\n This is a hack, using files instead of memory is sad.\n\n For some reason, there are errors while parsing when the file\n is opened in 'w+' mode. So the function opens files in 'w+' mode,\n writes, closes, opens again in 'r' mode, reads, and closes.\n\n Writes solver and network in two seperate files.\"\"\"\n\n try:\n # WRITE\n id = uuid.uuid4().hex\n solverName = \"prototxt/\" + \"tmp_\" + id + \"_pycaffe_to_caffe_solver.sh\"\n netName = \"prototxt/\" + \"tmp_\" + id + \"_pycaffe_to_caffe_net.sh\"\n solverFileTemp = open(solverName, \"w+\")\n netFileTemp = open(netName, \"w+\")\n netFileTemp.write(str(net))\n\n solver.net = netFileTemp.name\n solverFileTemp.write(str(solver))\n\n solverFileTemp.close()\n netFileTemp.close()\n\n # READ\n solverFileTemp = open(solverName, \"r\")\n netFileTemp = open(netName, \"r\")\n\n sgdSolver = caffe.SGDSolver(solverFileTemp.name)\n\n solverFileTemp.close()\n netFileTemp.close()\n\n return sgdSolver, sgdSolver.net\n except Error as e:\n print(\"Could not write/parse to/from file.\", e.strerror)\n\n\ndef saveToFiles(name, solver, net):\n \"\"\"Serialize them to files\"\"\"\n\n try:\n # WRITE\n solverName = \"prototxt/\" + name + \"_solver.sh\"\n netName = \"prototxt/\" + name + \"_net.sh\"\n solverFileTemp = open(solverName, \"w+\")\n netFileTemp = open(netName, \"w+\")\n netFileTemp.write(str(net))\n\n solver.net = netFileTemp.name\n solverFileTemp.write(str(solver))\n\n solverFileTemp.close()\n netFileTemp.close()\n\n return (solverName, netName)\n except Error as e:\n print(\"Could not write to files: \", e.strerror)\n\nclass Param(object):\n \"\"\"Defines a hyper param to estimate\"\"\"\n def __init__(self, name, rangee, default):\n self.name = name\n self.rangee = rangee\n self.default = default\n\n\nclass Objective(object):\n def __init__(self, pretrainError, trainLoss):\n self.pretrainError = pretrainError\n self.trainLoss = trainLoss\n\n\nclass ArchDef(object):\n \"\"\"The parameters to estimate, the function to create blocks,\n objectives, ...\"\"\"\n def __init__(self, objectives, params):\n self.objectives = objectives\n self.params = params\n for key in self.params:\n self.params[key].name = key\n\n def createEncoderBlock(self, net, id, paramValues, outputMask=False):\n \"\"\"Create a block (ex: conv conv pool), and return it as a list\n of layers.\"\"\"\n layers = [net.layer[-1]]\n for i in range(paramValues[\"convLayersPerBlock\"]):\n prefix = str(id) + \"_\" + str(i) + \"_\"\n layers.append(plug(layers[-1], conv(net.layer.add(),\n prefix + \"conv\",\n ks=paramValues[\"kernelSize\"],\n nout=paramValues[\"featuresPerLayer\"],\n stride=paramValues[\"strideConv\"],\n pad=1\n )))\n layers.append(relu(layers[-1], net.layer.add()))\n\n prefix = str(id) + \"_\"\n layers.append(plug(layers[-1],\n maxPool(net.layer.add(), prefix + \"pool\",\n ks=2,\n stride=paramValues[\"stridePool\"],\n outputMask=outputMask)))\n layers.pop(0)\n return layers\n\n def createDecoderBlock(self, net, id, encoderBlock, paramValues):\n \"\"\"Create a block (ex: unpool deconv decconv), and return it\n as a list of layers.\"\"\"\n # def maxUnpool(layer, name, ks, stride=1):\n middleKernelSize = paramValues[\"inputSize\"] / (2**paramValues[\"blocks\"])\n unpool_size = middleKernelSize * (2**(paramValues[\"blocks\"] - id))\n prefix = str(id) + \"_\"\n layers = [plug(net.layer[-1],\n maxUnpool(net.layer.add(),\n name=prefix + \"unpool\",\n ks=2,\n unpool_size=unpool_size,\n stride=paramValues[\"stridePool\"]))]\n plug(encoderBlock[-1], layers[-1])\n for i in range(paramValues[\"convLayersPerBlock\"]-1, -1, -1):\n prefix = str(id) + \"_\" + str(i) + \"_\"\n layers.append(plug(layers[-1], conv(net.layer.add(),\n prefix + \"deconv\",\n ks=paramValues[\"kernelSize\"],\n nout=paramValues[\"featuresPerLayer\"],\n stride=paramValues[\"strideConv\"],\n pad=1\n )))\n layers.append(relu(layers[-1], net.layer.add()))\n return layers\n\n\nclass Model(object):\n \"\"\"A version of the network/params, An attempt to train\"\"\"\n def __init__(self, archDef):\n pass\n\n\ndef Hyperestimate(params, archDef):\n \"\"\"Iterate over values of hyperparams, and find the best.\"\"\"\n pass\n\ninteractive = {}\n\n\ndef dataLayer(layer, tops, sourcePath, meanFilePath):\n # data = net.layer.add()\n layer.name = \"data\"\n layer.type = \"Data\"\n layer.data_param.source = sourcePath\n layer.data_param.backend = caffe.proto.caffe_pb2.DataParameter.LMDB\n layer.data_param.batch_size = 64\n layer.transform_param.mean_file = meanFilePath\n\n for top in tops:\n layer.top.append(top)\n\n return layer\n\n\ndef testPhase(layer):\n include = layer.include.add()\n include.phase = v2.TEST\n return layer\n\n\ndef trainPhase(layer):\n include = layer.include.add()\n include.phase = v2.TRAIN\n return layer\n\n\ndef plug(lowerLayer, higherLayer):\n if len(lowerLayer.top) > len(higherLayer.bottom):\n higherLayer.bottom.append(lowerLayer.top[len(higherLayer.bottom)])\n else:\n higherLayer.bottom.append(lowerLayer.top[-1])\n\n return higherLayer\n\n\ndef relu(lowerLayer, layer):\n layer.type = \"ReLU\"\n layer.name = lowerLayer.name + \"_ReLU\"\n layer.bottom.append(lowerLayer.top[0])\n layer.top.append(lowerLayer.top[0])\n return layer\n\n\ndef conv(layer, name, ks, nout, stride, pad=0):\n layer.type = \"Convolution\"\n layer.name = name\n layer.top.append(name)\n\n paramWeight = layer.param.add()\n paramWeight.lr_mult = 1\n paramWeight.decay_mult = 0\n\n paramBias = layer.param.add()\n paramBias.lr_mult = 2\n paramBias.decay_mult = 0\n\n layer.convolution_param.num_output = nout\n layer.convolution_param.stride.append(stride)\n layer.convolution_param.pad.append(pad)\n layer.convolution_param.kernel_size.append(ks)\n layer.convolution_param.weight_filler.type = \"xavier\"\n layer.convolution_param.bias_filler.type = \"constant\"\n layer.convolution_param.bias_filler.value = 0\n return layer\n\n\ndef deconv(layer, name, ks, nout, stride, pad=0):\n layer.type = \"Deconvolution\"\n layer.name = name\n layer.top.append(name)\n\n paramWeight = layer.param.add()\n paramWeight.lr_mult = 1\n paramWeight.decay_mult = 0\n\n paramBias = layer.param.add()\n paramBias.lr_mult = 2\n paramBias.decay_mult = 0\n\n layer.convolution_param.num_output = nout\n layer.convolution_param.stride.append(stride)\n layer.convolution_param.pad.append(pad)\n layer.convolution_param.kernel_size.append(ks)\n layer.convolution_param.weight_filler.type = \"xavier\"\n layer.convolution_param.bias_filler.type = \"constant\"\n layer.convolution_param.bias_filler.value = 0\n return layer\n\n\ndef maxPool(layer, name, ks, stride=1, outputMask=False):\n layer.type = \"Pooling\"\n layer.name = name\n layer.top.append(name)\n if outputMask is True:\n layer.top.append(name + \"_mask\")\n layer.pooling_param.pool = v2.PoolingParameter.MAX\n layer.pooling_param.kernel_size = ks\n layer.pooling_param.stride = stride\n return layer\n\n\ndef maxUnpool(layer, name, ks, unpool_size, stride=1):\n layer.type = \"Unpooling\"\n layer.name = name\n layer.top.append(name)\n layer.unpooling_param.unpool = v2.PoolingParameter.MAX\n layer.unpooling_param.kernel_size = ks\n layer.unpooling_param.stride = stride\n layer.unpooling_param.unpool_size = unpool_size\n return layer\n\n\ndef fullyConnected(layer, name, nout):\n layer.type = \"InnerProduct\"\n layer.name = name\n layer.top.append(name)\n\n paramWeight = layer.param.add()\n paramWeight.lr_mult = 1\n paramWeight.decay_mult = 0\n\n paramBias = layer.param.add()\n paramBias.lr_mult = 2\n paramBias.decay_mult = 0\n\n layer.inner_product_param.num_output = nout\n layer.convolution_param.weight_filler.type = \"xavier\"\n layer.convolution_param.bias_filler.type = \"constant\"\n layer.convolution_param.bias_filler.value = 0\n return layer\n\n\ndef dropout(lowerLayer, layer, ratio):\n layer.type = \"Dropout\"\n layer.name = lowerLayer.name + \"_Dropout\"\n layer.bottom.append(lowerLayer.top[0])\n layer.top.append(lowerLayer.top[0])\n\n layer.dropout_param.dropout_ratio = ratio\n return layer\n\n\ndef locallyConnected(layer, name, ks, nout, stride, pad=0):\n layer.type = \"Local\"\n layer.name = name\n layer.top.append(name)\n\n paramWeight = layer.param.add()\n paramWeight.lr_mult = 1\n paramWeight.decay_mult = 0\n\n paramBias = layer.param.add()\n paramBias.lr_mult = 2\n paramBias.decay_mult = 0\n\n layer.local_param.num_output = nout\n layer.local_param.stride = stride\n layer.local_param.pad = pad\n layer.local_param.kernel_size = ks\n layer.local_param.weight_filler.type = \"xavier\"\n layer.local_param.bias_filler.type = \"constant\"\n layer.local_param.bias_filler.value = 0\n return layer\n\n\ndef softmax(layer, name=\"softmax\"):\n layer.type = \"SoftmaxWithLoss\"\n layer.name = name\n layer.top.append(name)\n return layer\n\n\ndef accuracy(layer, top_k, name=\"\"):\n if name == \"\":\n name = \"accuracy_top_\" + str(top_k)\n layer.type = \"Accuracy\"\n layer.name = name\n layer.top.append(name)\n layer.accuracy_param.top_k = top_k\n return layer\n\n\ndef euclideanLoss(layer, name=\"L2_Loss\"):\n layer.type = \"EuclideanLoss\"\n layer.name = name\n layer.top.append(name)\n return layer\n\n\n\nclass TestBasic(unittest.TestCase):\n def setUp(self):\n # self.elements = [0, 1, 2, 4, 8, 16]\n # self.values = bytearray(self.elements)\n # self.bElements = BitOver(self.elements)\n # self.bValues = BitOver(self.values)\n pass\n\n def previousReconstruct(self):\n caffe.set_mode_cpu()\n\n objectives = Objective(0.4, 500000)\n params = {\n \"featuresPerLayer\": Param(\"\", slice(4, 64, 10), 64),\n \"convLayersPerBlock\": Param(\"\", slice(1, 5, 1), 2),\n \"blocks\": Param(\"\", slice(1, 5, 1), 3),\n \"kernelSize\": Param(\"\", slice(1, 5, 1), 3),\n \"kernelSizeLocal\": Param(\"\", slice(1, 5, 1), 1),\n \"strideConv\": Param(\"\", slice(1, 1, 1), 1),\n \"stridePool\": Param(\"\", slice(1, 5, 1), 3),\n \"inputSize\": Param(\"\", slice(32, 32, 1), 32)\n }\n print(params)\n archDef = ArchDef(objectives, params)\n\n solver_param = v2.SolverParameter()\n # solver_param.test_iter.append(10)\n # solver_param.test_interval = 50\n solver_param.base_lr = 1e-08\n solver_param.display = 1\n solver_param.max_iter = 50\n solver_param.lr_policy = \"fixed\"\n solver_param.momentum = 0\n solver_param.weight_decay = 0.004\n solver_param.snapshot = 200\n solver_param.snapshot_prefix = \"snapshots/reconstructing_full\"\n solver_param.solver_mode = solver_param.CPU\n net = solver_param.net_param\n\n\n # net = caffe.proto.caffe_pb2.NetParameter()\n dataTest = testPhase(dataLayer(net.layer.add(), tops=[\"data\"],\n sourcePath=\"/dataset/cifar100_lmdb_lab/cifar100_test_lmdb\",\n meanFilePath=\"/dataset/cifar100_lmdb_lab/mean.binaryproto\"))\n dataTrain = trainPhase(dataLayer(net.layer.add(), tops=[\"data\"],\n sourcePath=\"/dataset/cifar100_lmdb_lab/cifar100_train_lmdb\",\n meanFilePath=\"/dataset/cifar100_lmdb_lab/mean.binaryproto\"))\n # top = relu(plug(dataTrain, conv(net.layer.add(), \"conv1\", ks=2, nout=50)), net.layer.add())\n # top = plug(top, maxPool(net.layer.add(), \"pool1\", ks=2))\n\n settings = {\n \"featuresPerLayer\": 64,\n \"convLayersPerBlock\": 2,\n \"blocks\": 3,\n \"kernelSize\": 3,\n \"kernelSizeLocal\": 1,\n \"strideConv\": 1,\n \"stridePool\": 2,\n \"inputSize\": 32\n }\n\n blocks = []\n for i in range(settings[\"blocks\"]):\n block = archDef.createEncoderBlock(net, i, settings, outputMask=True)\n blocks.append(block)\n\n middleKernelSize = settings[\"inputSize\"] / (2**settings[\"blocks\"])\n middleConv = plug(blocks[-1][-1], conv(net.layer.add(),\n name=\"middle_conv\",\n ks=middleKernelSize,\n nout=50,\n stride=1\n ))\n middleDeconv = plug(middleConv, deconv(net.layer.add(),\n name=\"middle_deconv\",\n ks=middleKernelSize,\n nout=settings[\"featuresPerLayer\"],\n stride=settings[\"strideConv\"]\n ))\n # print(\"blocks\", block[-1])\n # print(\"blocks\", to_proto(block[-1])) # <<\n # print(to_proto(block))\n # self.assertEqual(len(self.elements)*8, self.bElements.len())\n # self.assertEqual(len(self.values)*8, self.bValues.len())\n\n # middle = L.Convolution(block[-1], kernel_size=4,\n # num_output=50,\n # name=\"middle\")\n\n unblocks = []\n for i in range(settings[\"blocks\"]-1, -1, -1):\n unblock = archDef.createDecoderBlock(net, i, blocks[i], settings)\n unblocks.append(unblock)\n # unblock = archDef.createDecoderBlock(net, 0, block, settings)\n top = plug(unblocks[-1][-1], locallyConnected(net.layer.add(),\n name=\"reconstruct1\",\n ks=settings[\"kernelSizeLocal\"],\n nout=3,\n stride=1\n ))\n top = plug(top, locallyConnected(net.layer.add(),\n name=\"reconstruct2\",\n ks=settings[\"kernelSizeLocal\"],\n nout=3,\n stride=1\n ))\n top = plug(top, plug(dataTrain, euclideanLoss(net.layer.add())))\n\n print(\"net\\n\", net)\n # print(\"unblock\", to_proto(unblock[-1])) # <<\n\n interactive[\"blocks\"] = blocks\n interactive[\"unblocks\"] = unblocks\n interactive[\"net\"] = net\n\n print(\"blocks:\")\n for b in blocks:\n for l in b:\n print(l.name)\n print()\n print(\"unblocks:\")\n for b in unblocks:\n print(b[0].unpooling_param.unpool_size)\n for l in b:\n print(l.name)\n print()\n\n # solver_param = caffe.SGDSolver(solver_param)\n # net.state.phase = v2.TRAIN\n # [solver, net] = getSolverNet(solver_param, net)\n [solver, net] = getSGDSolver(solver_param)\n solver.step(5)\n\n def testClassify(self):\n caffe.set_mode_cpu()\n\n objectives = Objective(0.4, 500000)\n params = {\n \"featuresPerLayer\": Param(\"\", slice(4, 64, 10), 64),\n \"convLayersPerBlock\": Param(\"\", slice(1, 5, 1), 2),\n \"blocks\": Param(\"\", slice(1, 5, 1), 3),\n \"kernelSize\": Param(\"\", slice(1, 5, 1), 3),\n \"kernelSizeLocal\": Param(\"\", slice(1, 5, 1), 1),\n \"strideConv\": Param(\"\", slice(1, 1, 1), 1),\n \"stridePool\": Param(\"\", slice(1, 5, 1), 3),\n \"inputSize\": Param(\"\", slice(32, 32, 1), 32)\n }\n print(params)\n archDef = ArchDef(objectives, params)\n\n\n solver_param = v2.SolverParameter()\n # solver_param.test_iter.append(10)\n # solver_param.test_interval = 50\n solver_param.base_lr = 1\n solver_param.display = 1\n solver_param.max_iter = 50\n solver_param.lr_policy = \"fixed\"\n solver_param.momentum = 0.9\n solver_param.weight_decay = 0.004\n solver_param.snapshot = 200\n solver_param.snapshot_prefix = \"snapshots/classify\"\n solver_param.solver_mode = solver_param.CPU\n net = solver_param.net_param\n\n\n # net = caffe.proto.caffe_pb2.NetParameter()\n dataTest = testPhase(dataLayer(net.layer.add(), tops=[\"data\", \"label\"],\n sourcePath=\"/dataset/cifar100_lmdb_lab/cifar100_test_lmdb\",\n meanFilePath=\"/dataset/cifar100_lmdb_lab/mean.binaryproto\"))\n dataTrain = trainPhase(dataLayer(net.layer.add(), tops=[\"data\", \"label\"],\n sourcePath=\"/dataset/cifar100_lmdb_lab/cifar100_train_lmdb\",\n meanFilePath=\"/dataset/cifar100_lmdb_lab/mean.binaryproto\"))\n # top = relu(plug(dataTrain, conv(net.layer.add(), \"conv1\", ks=2, nout=50)), net.layer.add())\n # top = plug(top, maxPool(net.layer.add(), \"pool1\", ks=2))\n\n settings = {\n \"featuresPerLayer\": 64,\n \"convLayersPerBlock\": 2,\n \"blocks\": 3,\n \"kernelSize\": 3,\n \"kernelSizeLocal\": 1,\n \"strideConv\": 1,\n \"stridePool\": 2,\n \"inputSize\": 32\n }\n\n blocks = []\n for i in range(settings[\"blocks\"]):\n block = archDef.createEncoderBlock(net, i, settings, outputMask=False)\n blocks.append(block)\n\n middleKernelSize = settings[\"inputSize\"] / (2**settings[\"blocks\"])\n middleConv = plug(blocks[-1][-1], conv(net.layer.add(),\n name=\"middle_conv\",\n ks=middleKernelSize,\n nout=50,\n stride=1\n ))\n\n top = plug(middleConv, fullyConnected(net.layer.add(), name=\"fc1\", nout=1024))\n top = trainPhase(dropout(top, net.layer.add(), ratio=0.5))\n top = relu(top, net.layer.add())\n\n top = plug(top, fullyConnected(net.layer.add(), name=\"fc2\", nout=100))\n\n top = plug(dataTrain, plug(top, softmax(net.layer.add())))\n\n plug(dataTest, plug(top, testPhase(accuracy(net.layer.add(), 1))))\n plug(dataTest, plug(top, testPhase(accuracy(net.layer.add(), 5))))\n\n print(\"net\\n\", net)\n # print(\"unblock\", to_proto(unblock[-1])) # <<\n\n interactive[\"blocks\"] = blocks\n interactive[\"net\"] = net\n\n print(\"blocks:\")\n for b in blocks:\n for l in b:\n print(l.name)\n print()\n\n [solver, net] = getSGDSolver(solver_param)\n solver.step(5)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":20865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"198523874","text":"import os\nimport posixpath\n\nfrom spa.wrappers import Response\nfrom spa.static.smart import get_hash, add_hash_to_filepath\n\nBASE_TEMPLATE = \"\"\"\n\n\n{stylesheets}\n{extra_head}\n\n{body}\n{scripts}\n{extra_foot}\n\n\"\"\"\n\nclass HomePage(object):\n template = BASE_TEMPLATE\n rendered = None\n\n def __init__(self, static_url, static_handler, hash_paths=True,\n body='', scripts=None, stylesheets=None,\n extra_head='', extra_foot='', template=None):\n\n self.static_url = static_url\n self.static_handler = static_handler\n self.hash_paths = hash_paths\n self.body = body\n self.scripts = scripts or []\n self.stylesheets = stylesheets or []\n self.extra_head = extra_head\n self.extra_foot = extra_foot\n\n if template:\n self.template = template\n\n def build_url(self, filepath):\n if filepath.startswith('/'):\n filepath = filepath[1:]\n\n if self.hash_paths:\n abs_path = os.path.join(self.static_handler.directory, filepath)\n with open(abs_path) as f:\n file_hash = get_hash(f)\n\n filepath = add_hash_to_filepath(filepath, file_hash)\n return posixpath.join(self.static_url, filepath)\n\n def stylesheet_tag(self, stylesheet):\n tmpl = ''\n return tmpl.format(url=self.build_url(stylesheet))\n\n def script_tag(self, script):\n tmpl = ''\n return tmpl.format(url=self.build_url(script))\n\n def get_stylesheet_tags(self):\n return '\\n'.join([self.stylesheet_tag(s) for s in self.stylesheets])\n\n def get_script_tags(self):\n return '\\n'.join([self.script_tag(s) for s in self.scripts])\n\n def render(self):\n if self.rendered is None:\n self.rendered = self.template.format(\n stylesheets=self.get_stylesheet_tags(),\n extra_head=self.extra_head,\n body=self.body,\n scripts=self.get_script_tags(),\n extra_foot=self.extra_foot,\n )\n return self.rendered\n\n def __call__(self, app, req, params):\n return Response(self.render(), content_type='text/html')\n","sub_path":"spa/homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"251728128","text":"\"\"\"\nвыводит различные результаты при каждом запуске под Windows\n\"\"\"\n\nimport threading\nimport time\n\ncount = 0\n\n\ndef adder():\n global count\n count = count + 1\n # time.sleep(0.5)\n count = count + 1\n\n\nthreads = []\n\nfor i in range(1000):\n thread = threading.Thread(target=adder, args=())\n thread.start()\n threads.append(thread)\n\nprint(len(threads))\nfor thread in threads:\n thread.join()\n\nprint(count)","sub_path":"PP4E/system/Threads/thread-add-random.py","file_name":"thread-add-random.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"452086848","text":"import os\nimport pandas as pd\n\nfrom .apple_search_ads_reporter import SearchAdsReporter\nfrom .actions import SearchAdsBidAction, SearchAdsPauseKeywordAction, SearchAdsCPAGoalAction, SearchAdsNoAction\n\nfrom regla import Channel, ChannelEntity, RuleAction, RuleActionType, RuleReportType, RuleReporter, RuleReportGranularity, Rule, RuleContext\nfrom heathcliff import AppleSearchAdsCertificate\nfrom heathcliff.mutating import SearchAdsAccount, SearchAds\nfrom bson import ObjectId\nfrom datetime import datetime\nfrom typing import Optional, List, Dict\n\nclass AppleSearchAdsChannel(Channel[SearchAds, any]):\n certificate: Optional[AppleSearchAdsCertificate]=None\n parent_environ: Optional[Dict[str, any]]=None\n\n @property\n def identifier(self) -> str:\n return 'apple_search_ads'\n\n @property\n def title(self) -> str:\n return 'Apple'\n\n def connect(self, credentials: Dict[str, any]):\n self.certificate = AppleSearchAdsCertificate(certificate=credentials)\n self.certificate.connect()\n self.parent_environ = dict(os.environ)\n certs = {\n 'SEARCH-ADS-PEM': self.certificate.pem_path,\n 'SEARCH-ADS-KEY': self.certificate.key_path,\n }\n os.environ.update(certs)\n self.api = SearchAds(org_name=self.certificate.org_name)\n \n def disconnect(self):\n super().disconnect()\n os.environ.clear()\n os.environ.update(self.parent_environ)\n self.parent_environ = None\n self.certificate.disconnect()\n self.certificate = None\n\n def rule_context(self, options: Dict[str, any]={}) -> any:\n rule: Rule = options[RuleContext.rule.value]\n self.api.org_name = ''\n self.api.org_id = rule.orgID\n campaigns = self.api.get_campaigns(includeAdGroups=False, includeKeywords=False)\n matching_campaigns = [c for c in campaigns if int(c._id) == int(rule.campaignID)]\n if not matching_campaigns:\n return None\n\n campaign = matching_campaigns[0]\n if rule.adgroupID is None:\n campaign.ad_groups = self.api.get_adgroups(campaignID=campaign._id)\n else:\n campaign.ad_groups = self.api.get_adgroups(campaignID=campaign._id, includeKeywords=False)\n for ad_group in campaign.ad_groups:\n if int(ad_group._id) == int(rule.adgroupID):\n ad_group.keywords = self.api.get_keywords(campaignID=campaign._id, adGroupID=ad_group._id)\n return campaign\n\n def report_type(self, action_type: RuleActionType) -> RuleReportType:\n if action_type is RuleActionType.increaseBid or action_type is RuleActionType.decreaseBid:\n return RuleReportType.keyword\n elif action_type is RuleActionType.increaseCPAGoal or action_type is RuleActionType.decreaseCPAGoal:\n return RuleReportType.adGroup\n elif action_type is RuleActionType.pauseKeyword:\n return RuleReportType.keyword\n elif action_type is RuleActionType.noAction:\n return RuleReportType.keyword\n else:\n raise ValueError('Unsupported search ads action', action_type)\n\n def rule_reporter(self, report_type: Optional[RuleReportType]=None, ad_group_id: Optional[str]=None, rule_id: Optional[ObjectId]=None, data_check_range: Optional[int]=None, raw_report: Optional[pd.DataFrame]=None, report:Optional[pd.DataFrame]=None) -> RuleReporter:\n return SearchAdsReporter(\n reportType=report_type,\n adGroupID=ad_group_id,\n ruleID=rule_id,\n dataCheckRange=data_check_range,\n rawReport=raw_report,\n report=report\n )\n\n def rule_action(self, action_type: RuleActionType, adjustment_value: Optional[float]=None, adjustment_limit: Optional[float]=None) -> RuleAction:\n if action_type is RuleActionType.increaseBid:\n adjustment = 1.0 + adjustment_value / 100.0\n return SearchAdsBidAction(type=action_type, adjustmentValue=adjustment, adjustmentLimit=adjustment_limit)\n elif action_type is RuleActionType.decreaseBid:\n adjustment = 1.0 - adjustment_value / 100.0\n return SearchAdsBidAction(type=action_type, adjustmentValue=adjustment, adjustmentLimit=adjustment_limit)\n elif action_type is RuleActionType.increaseCPAGoal:\n adjustment = 1.0 + adjustment_value / 100.0\n return SearchAdsCPAGoalAction(type=action_type, adjustmentValue=adjustment, adjustmentLimit=adjustment_limit)\n elif action_type is RuleActionType.decreaseCPAGoal:\n adjustment = 1.0 - adjustment_value / 100.0\n return SearchAdsCPAGoalAction(type=action_type, adjustmentValue=adjustment, adjustmentLimit=adjustment_limit)\n elif action_type is RuleActionType.pauseKeyword:\n return SearchAdsPauseKeywordAction(type=action_type)\n elif action_type is RuleActionType.noAction:\n return SearchAdsNoAction(type=action_type)\n else:\n raise ValueError('Unsupported action type', action_type)\n \n def get_entities(self, entity_type: ChannelEntity, parent_ids: Dict[ChannelEntity, str]={}) -> List[Dict[str, any]]:\n if entity_type is ChannelEntity.org:\n account = SearchAdsAccount()\n orgs = []\n for api in account.apis:\n orgs.append({\n 'id' : int(api.org_id),\n 'name' : api.org_name,\n })\n return orgs\n elif entity_type is ChannelEntity.campaign:\n self.api.org_id = int(parent_ids[ChannelEntity.org])\n campaigns = []\n for campaign in self.api.get_campaigns(includeAdGroups=False, includeKeywords=False):\n campaigns.append({\n 'org_id': int(self.api.org_id),\n 'id' : int(campaign._id),\n 'name' : campaign.name,\n })\n return campaigns\n elif entity_type is ChannelEntity.ad_group:\n self.api.org_id = int(parent_ids[ChannelEntity.org])\n campaign_id = parent_ids[ChannelEntity.campaign]\n ad_groups = []\n for ad_group in self.api.get_adgroups(campaignID=campaign_id, includeKeywords=False):\n ad_groups.append({\n 'org_id': int(self.api.org_id),\n 'campaign_id': int(campaign_id),\n 'id' : int(ad_group._id),\n 'name' : ad_group.name\n })\n return ad_groups\n else:\n raise ValueError('Unsupported entity type', entity_type)\n\n def granularity_is_compatible(self, granularity: RuleReportGranularity, report_type: RuleReportType, start_date: datetime, end_date: datetime):\n now = datetime.utcnow()\n interval = end_date - start_date\n age = now - start_date\n monthInterval = end_date.month - start_date.month + (end_date.year - start_date.year) * 12\n monthAge = now.month - start_date.month + (now.year - start_date.year) * 12\n if granularity is RuleReportGranularity.hourly:\n if report_type is RuleReportType.searchTerm: return False\n if interval.days > 7: return False\n if age.days > 30: return False\n return True\n elif granularity is RuleReportGranularity.daily:\n if interval.days > 90: return False\n if age.days > 730: return False\n return True\n elif granularity is RuleReportGranularity.weekly:\n if interval.days <= 14: return False\n if interval.days > 365: return False\n if monthAge > 24: return False\n return True\n elif granularity is RuleReportGranularity.monthly:\n if monthInterval <= 3: return False\n if monthAge > 24: return False\n return True\n else:\n raise ValueError('Unsupported report granularity', granularity)","sub_path":"development_packages/apple_search_ads/regla_channels/apple_search_ads/apple_search_ads_channel.py","file_name":"apple_search_ads_channel.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"245844391","text":"from os import path\r\n\r\nDEBUG = 1\r\n\r\nQGIS_PATH = 'C:\\\\Program Files\\\\QGIS Las\\\\apps\\\\qgis'\r\n\r\nFILENAME = path.abspath(path.join(\r\n path.dirname(__file__),\r\n path.pardir,\r\n 'data',\r\n 'road.shp'\r\n))\r\n# alternatively,\r\n# FILENAME = 'C:\\\\_O\\\\GIS-assignments\\\\data\\\\road.shp'\r\n\r\n__all__ = ['DEBUG', 'QGIS_PATH', 'FILENAME']\r\n","sub_path":"road/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"350955293","text":"'''\n使用给定的参数对句子执行一次查找和替换,然后返回新句子。\n第一个参数是将要对其执行查找和替换的句子。\n第二个参数是将被替换掉的单词(替换前的单词)。\n第三个参数用于替换第二个参数(替换后的单词)。\n注意:替换时保持原单词的大小写。例如,如果你想用单词 \"dog\" 替换单词 \"Book\" ,你应该替换成 \"Dog\"。\n例:\nmyReplace(\"Let us go to the store\", \"store\", \"mall\") 应该返回 \"Let us go to the mall\"。\nmyReplace(\"He is Sleeping on the couch\", \"Sleeping\", \"sitting\") 应该返回 \"He is Sitting on the couch\"。\nmyReplace(\"This has a spellngi error\", \"spellngi\", \"spelling\") 应该返回 \"This has a spelling error\"。\nmyReplace(\"His name is Tom\", \"Tom\", \"john\") 应该返回 \"His name is John\"。\nmyReplace(\"Let us get back to more Coding\", \"Coding\", \"algorithms\") 应该返回 \"Let us get back to more Algorithms\"。\n'''\n\n# 代码如下:\ndef myreplace(str, before, after):\n list = str.split( )\n if before in list:\n if before[0].islower():\n after = after[0].lower() + after[1:]\n list[list.index(before)] = after\n elif before[0].isupper():\n after = after[0].upper() + after[1:]\n list[list.index(before)] = after\n print(\" \".join(list))\n else:\n print('句子里面没有找到要修改的词,注意大小写!')\n\nmyreplace(\"Let us go to the store\", \"store\", \"mall\")\nmyreplace(\"He is Sleeping on the couch\", \"Sleeping\", \"sitting\")\nmyreplace(\"This has a spellngi error\", \"spellngi\", \"spelling\")\nmyreplace(\"His name is Tom\", \"Tom\", \"john\")\nmyreplace(\"Let us get back to more Coding\", \"Coding\", \"algorithms\")","sub_path":"中级算法/05查询替换.py","file_name":"05查询替换.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"578224869","text":"from django.contrib import admin\r\nfrom django.utils.html import format_html\r\nfrom django.utils.encoding import force_text\r\nfrom django.utils.safestring import mark_safe\r\nfrom django.forms import TextInput, Textarea\r\nfrom apps.core.admin_globals import InlineObjectLink, AdminImagePreview\r\nfrom apps.catalogue_filters.models import CategoryAttribute\r\nfrom apps.catalogue.models import *\r\n\r\n\r\nFORMFIELD_OVERRIDES = {\r\n models.PositiveIntegerField: {'widget': TextInput(attrs={'size':'30'})},\r\n models.CharField: {'widget': TextInput(attrs={'size':'30'})},\r\n models.TextField: {'widget': Textarea(attrs={'rows':4, 'cols':61})},\r\n}\r\n\r\n\r\n\r\n@admin.register(ProductDocuments)\r\nclass ProductDocumentsAdmin(admin.ModelAdmin): pass\r\n\r\n\r\nclass ProductImagesInline(admin.TabularInline, AdminImagePreview):\r\n model = ProductImages\r\n readonly_fields = ['image_preview']\r\n extra = 0\r\n fieldsets = (\r\n ('Цена', {'fields': ('num', 'image_preview', 'image')}),\r\n )\r\n\r\n\r\nclass ProductCharacteristicsInline(admin.TabularInline):\r\n model = ProductCharacteristics\r\n extra = 1\r\n\r\n\r\nclass ProductDocumentsInline(admin.TabularInline):\r\n model = ProductDocuments\r\n extra = 1\r\n\r\n\r\nclass ProductCertificateInline(admin.TabularInline):\r\n model = ProductCertificate\r\n extra = 1\r\n\r\n\r\n@admin.register(Product)\r\nclass ProductAdmin(admin.ModelAdmin):\r\n def image_preview(self, obj=None):\r\n try:\r\n img = mark_safe(\"\"\"\r\n \"\"\".format(url = obj.get_image_s, width=120, height=120))\r\n return img\r\n except: return '-'\r\n image_preview.short_description = 'Фото'\r\n search_fields = ['name','code']\r\n readonly_fields = [\r\n 'image_preview',\r\n 'volume','volume_weight',\r\n 'get_sm_price','get_md_price','get_bg_price'\r\n ]\r\n list_display = [\r\n 'image_preview','code','name','sm_price','price','category',\r\n 'manufacturer','weight_netto','weight_brutto','volume',\r\n 'volume_weight', 'customs_code']\r\n list_filter = ['category', 'price']\r\n list_editable = ['price','sm_price']\r\n inlines = [\r\n ProductImagesInline, ProductCharacteristicsInline, ProductDocumentsInline, ProductCertificateInline\r\n ]\r\n fieldsets = (\r\n ('Продукт', \r\n {'fields': (\r\n ('image_preview','translate_childs'),\r\n 'category','manufacturer',\r\n 'code','customs_code','name')}\r\n ),\r\n ('Входная цена', {'fields': ('entry_price','calculate_price',),}),\r\n ('Розничная цена', {'fields': (\r\n ('price','price_old'),\r\n ('price_ua','price_old_ua'),\r\n 'price_box'),\r\n }),\r\n ('Малый опт', {'fields': (('sm_price', 'sm_start', 'get_sm_price','sm_whoosale'),'sm_price_ua')}),\r\n ('Средний опт', {'fields': (('md_price', 'md_start', 'get_md_price','md_whoosale'),'md_price_ua')}),\r\n ('Крупный опт', {'fields': (('bg_price', 'bg_start', 'get_bg_price','bg_whoosale'),'bg_price_ua')}),\r\n \r\n ('Коробка', \r\n {'fields': (\r\n ('box_w','box_l','box_h'),\r\n ('pieces_in_pack','pieces_in_box'),\r\n 'weight_netto','weight_brutto')}\r\n ),\r\n ('Текст', \r\n {'fields': ('description','notes')}\r\n ),\r\n )\r\n","sub_path":"apps/catalogue/admin/admin__product.py","file_name":"admin__product.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"359409923","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.forms import UserCreationForm #was used before\nfrom django.contrib import messages\nfrom .forms import UserRegisterForm,UserUpdateForm,ProfileUpdateForm\nfrom django.contrib.auth.decorators import login_required\nfrom users.models import Profile\n\ndef register_view(request):\n if request.method == 'POST':\n form=UserRegisterForm(request.POST)\n if form.is_valid() :\n username=form.cleaned_data.get('username')\n messages.success(request,f\"Account created for {username}!\\n You can now Login In\")\n form.save()\n return redirect('users-login')\n else :\n form=UserRegisterForm()\n context={\n \"form\":form,\n \"title\":\"Register\"\n }\n return render(request,'users/register.html',context)\n\n@login_required\ndef profile_view(request):\n #Profile Update part\n if request.method == 'POST' :\n u_form = UserUpdateForm(request.POST,\n instance=request.user)\n p_form = ProfileUpdateForm(request.POST,\n request.FILES, #Because we will update media files as well\n instance=request.user.profile)\n if p_form.is_valid() :\n # u_form.save()\n p_form.save()\n messages.success(request,f\"Your profile has been updated.\")\n return redirect('/profile/')\n else:\n # u_form=UserUpdateForm(instance=request.user)\n p_form=ProfileUpdateForm(instance=request.user.profile)\n #Shops you own (for sellers) part\n # shops=Shop.objects.filter(owner=request.user.id)\n context={\n 'title':'Profile',\n # 'u_form':u_form,\n 'p_form':p_form,\n # 'shops':shops,\n }\n return render(request, 'users/profile.html',context)\n\n@login_required\ndef settings_view(request):\n context={\n 'title':'Settings'\n }\n return render(request, 'users/settings.html',context)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"468984713","text":"from typing import Tuple, List\nfrom flask import Blueprint, current_app, request\n\nfrom ..common import sqs_batch_entries, pull_all\nfrom ..metadata import Netkan\nfrom .config import current_config\n\n\nspacedock_inflate = Blueprint(\n 'spacedock_inflate', __name__) # pylint: disable=invalid-name\n\n\n# For after-upload hook on SpaceDock\n# Handles: https://netkan.ksp-ckan.space/sd/inflate\n# POST form parameters:\n# mod_id: The mod's ID number on SpaceDock\n# event_type: update - New version of mod was uploaded\n# version-update - Default version changed\n# delete - Mod was deleted from SpaceDock\n@spacedock_inflate.route('/inflate/', methods=['POST'])\ndef inflate_hook(game_id: str) -> Tuple[str, int]:\n # Make sure our NetKAN and CKAN-meta repos are up to date\n pull_all(current_config.common.game(game_id).repos)\n # Get the relevant netkans\n nks = find_netkans(request.form.get('mod_id', ''), game_id)\n if nks:\n if request.form.get('event_type') == 'delete':\n # Just let the team know on Discord\n nk_msg = ', '.join(nk.identifier for nk in nks)\n current_app.logger.error(\n f'A SpaceDock mod has been deleted, affected netkans: {nk_msg}')\n return '', 204\n if request.form.get('event_type') == 'locked':\n # Just let the team know on Discord\n nk_msg = ', '.join(nk.identifier for nk in nks)\n current_app.logger.error(\n f'A SpaceDock mod has been locked, affected netkans: {nk_msg}')\n return '', 204\n if request.form.get('event_type') == 'unlocked':\n # Just let the team know on Discord\n nk_msg = ', '.join(nk.identifier for nk in nks)\n current_app.logger.error(\n f'A SpaceDock mod has been unlocked again, affected netkans: {nk_msg}')\n return '', 204\n\n # Submit them to the queue\n messages = (nk.sqs_message(\n current_config.common.game(game_id).ckanmeta_repo.highest_version(nk.identifier))\n for nk in nks)\n for batch in sqs_batch_entries(messages):\n current_config.client.send_message_batch(\n QueueUrl=current_config.inflation_queue(game_id).url,\n Entries=batch\n )\n return '', 204\n return 'No such module', 404\n\n\ndef find_netkans(sd_id: str, game_id: str) -> List[Netkan]:\n all_nk = current_config.common.game(game_id).netkan_repo.netkans()\n return [nk for nk in all_nk if nk.kref_src == 'spacedock' and nk.kref_id == sd_id]\n","sub_path":"netkan/netkan/webhooks/spacedock_inflate.py","file_name":"spacedock_inflate.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"143864738","text":"import os\n\ninput_path = \"./asd/\"\noutput_path = \"./Car/\"\n\noutput_count = len(os.listdir(output_path))\n\ncount = 0\nfor file in os.listdir(input_path):\n os.rename(input_path + file, output_path + str(output_count + count) + '.bmp')\n count += 1\n","sub_path":"Utils/rename_files.py","file_name":"rename_files.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"274047287","text":"#!/usr/bin/env python\n\nfrom pwn import *\nimport re\n\nelf_location = '/problems/got-2-learn-libc_4_526cc290dde8d914a30538d3d0ac4ef1/'\nelf = ELF(elf_location + '/vuln')\nproc = elf.process()\n\n# get the address offset between puts() and system() in gdb\ngdb_puts = 0xf75f0140\ngdb_system = 0xf75cb940\noffset = gdb_puts - gdb_system\n\n# the actual runtime address of puts() is already given\noutput = proc.recv()\nbin_sh = int(re.findall('useful_string: (.*)', output)[0], 16)\nruntime_puts = int(re.findall('puts: (.*)', output)[0], 16)\nruntime_system = runtime_puts - offset\n\nprint('[*] puts() is at: ' + hex(runtime_puts))\nprint('[*] system() is at: ' + hex(runtime_system))\n\npayload = 40 * 4 * 'A'\npayload += p32(runtime_system) # current function's return address, i.e., system()\npayload += 'AAAA' # system()'s return address, we don't care about this shit\npayload += p32(bin_sh) # system()'s first arg, i.e., \"/bin/sh\"\n\nprint('[*] serving payload')\nproc.sendline(payload)\nproc.sendline('cat {}/flag.txt'.format(elf_location))\nproc.interactive()\n","sub_path":"picoctf-2018/got-2-learn-libc/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"200977365","text":"#!/usr/bin/env python3\nimport os\nfrom multiprocessing import Process, Queue\nq = Queue()\ndef p1():\n q.put({'tan':1, 'xu':2})\n q.put('2bcd')\n q.put('3bcd')\n q.put('4bcd')\n q.put('5bcd')\n print(q.get())\n\ndef p2():\n print(q.get())\n print(q.get())\n\n\n\nif __name__ == '__main__':\n Process(target = p1).start()\n Process(target = p2).start()\n","sub_path":"w1/basic/q.py","file_name":"q.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"155286034","text":"def flipAndInvertImage(A: list) -> list:\n print(A)\n inverted = []\n flipped = [a[::-1] for a in A]\n for x in flipped:\n inverted.append(list(map(lambda x: 0 if x == 1 else 1, x)))\n\n return inverted\n\n\ninp = [[1, 1, 0], [1, 0, 1], [0, 0, 0]]\nflipAndInvertImage(inp)\n","sub_path":"leet-code/flipping-an-image.py","file_name":"flipping-an-image.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"25272526","text":"import json\nfrom django.http import HttpResponse, HttpResponseNotAllowed\nfrom django.http.response import HttpResponseBadRequest\nfrom django.views.generic import View\nfrom graphql import Source, parse, execute, validate\nfrom graphql.error import GraphQLError, format_error as format_graphql_error\nfrom graphql.execution import ExecutionResult\nfrom graphql.type.schema import GraphQLSchema\nfrom graphql.utils.get_operation_ast import get_operation_ast\nimport six\n\n\nclass HttpError(Exception):\n def __init__(self, response, message=None, *args, **kwargs):\n self.response = response\n self.message = message = message or response.content.decode()\n super(HttpError, self).__init__(message, *args, **kwargs)\n\n\nclass GraphQLView(View):\n schema = None\n executor = None\n root_value = None\n pretty = False\n\n def __init__(self, **kwargs):\n super(GraphQLView, self).__init__(**kwargs)\n assert isinstance(self.schema, GraphQLSchema), 'A Schema is required to be provided to GraphQLView.'\n\n # noinspection PyUnusedLocal\n def get_root_value(self, request):\n return self.root_value\n\n def get_context(self, request):\n return request\n\n def dispatch(self, request, *args, **kwargs):\n try:\n if request.method.lower() not in ('get', 'post'):\n raise HttpError(HttpResponseNotAllowed(['GET', 'POST'], 'GraphQL only supports GET and POST requests.'))\n\n execution_result = self.execute_graphql_request(request)\n response = {}\n\n if execution_result.errors:\n response['errors'] = [self.format_error(e) for e in execution_result.errors]\n\n if execution_result.invalid:\n status_code = 400\n else:\n status_code = 200\n response['data'] = execution_result.data\n\n return HttpResponse(\n status=status_code,\n content=self.json_encode(request, response),\n content_type='application/json'\n )\n\n except HttpError as e:\n response = e.response\n response['Content-Type'] = 'application/json'\n response.content = self.json_encode(request, {\n 'errors': [self.format_error(e)]\n })\n return response\n\n def json_encode(self, request, d):\n if not self.pretty and not request.GET.get('pretty'):\n return json.dumps(d, separators=(',', ':'))\n\n return json.dumps(d, sort_keys=True,\n indent=2, separators=(',', ': '))\n\n # noinspection PyBroadException\n def parse_body(self, request):\n content_type = self.get_content_type(request)\n\n if content_type == 'application/graphql':\n return {'query': request.body.decode()}\n\n elif content_type == 'application/json':\n try:\n request_json = json.loads(request.body.decode('utf-8'))\n assert isinstance(request_json, dict)\n return request_json\n except:\n raise HttpError(HttpResponseBadRequest('POST body sent invalid JSON.'))\n\n elif content_type in ['application/x-www-form-urlencoded', 'multipart/form-data']:\n return request.POST\n\n return {}\n\n def execute(self, *args, **kwargs):\n return execute(self.schema, *args, **kwargs)\n\n def execute_graphql_request(self, request):\n query, variables, operation_name = self.get_graphql_params(request, self.parse_body(request))\n\n if not query:\n raise HttpError(HttpResponseBadRequest('Must provide query string.'))\n\n source = Source(query, name='GraphQL request')\n\n try:\n document_ast = parse(source)\n validation_errors = validate(self.schema, document_ast)\n if validation_errors:\n return ExecutionResult(\n errors=validation_errors,\n invalid=True,\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if request.method.lower() == 'get':\n operation_ast = get_operation_ast(document_ast, operation_name)\n if operation_ast and operation_ast.operation != 'query':\n raise HttpError(HttpResponseNotAllowed(\n ['POST'], 'Can only perform a {} operation from a POST request.'.format(operation_ast.operation)\n ))\n\n try:\n return self.execute(\n document_ast,\n root_value=self.get_root_value(request),\n variable_values=variables,\n operation_name=operation_name,\n context_value=self.get_context(request)\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n @staticmethod\n def get_graphql_params(request, data):\n query = request.GET.get('query') or data.get('query')\n variables = request.GET.get('variables') or data.get('variables')\n\n if variables and isinstance(variables, six.text_type):\n try:\n variables = json.loads(variables)\n except:\n raise HttpError(HttpResponseBadRequest('Variables are invalid JSON.'))\n\n operation_name = request.GET.get('operationName') or data.get('operationName')\n\n return query, variables, operation_name\n\n @staticmethod\n def format_error(error):\n if isinstance(error, GraphQLError):\n return format_graphql_error(error)\n\n return {'message': six.text_type(error)}\n\n @staticmethod\n def get_content_type(request):\n meta = request.META\n content_type = meta.get('CONTENT_TYPE', meta.get('HTTP_CONTENT_TYPE', ''))\n return content_type.split(';', 1)[0].lower()\n\n\nclass BatchGraphQLView(GraphQLView):\n '''\n NOTE: THIS IS A WORK IN PROGRESS\n DO NOT USE UNLESS YOU WANT TO ACCEPT THAT IT DOES NOT PROPERLY\n HANDLE FAILURE AND ASSUMES EVERYTHING GOES SWIMMINGLY.\n '''\n\n def parse_body(self, request):\n content_type = self.get_content_type(request)\n\n if content_type == 'application/graphql':\n return {'query': request.body.decode()}\n\n elif content_type == 'application/json':\n try:\n request_json = json.loads(request.body.decode('utf-8'))\n # NOTE: this is the only difference between batch and non.\n assert isinstance(request_json, list)\n return request_json\n except:\n raise HttpError(HttpResponseBadRequest('POST body sent invalid JSON.'))\n\n elif content_type in ['application/x-www-form-urlencoded', 'multipart/form-data']:\n return request.POST\n\n return {}\n\n @staticmethod\n def get_graphql_params(data):\n query = data.get('query')\n variables = data.get('variables')\n _id = data.get('id')\n\n if variables and isinstance(variables, six.text_type):\n try:\n variables = json.loads(variables)\n except:\n raise HttpError(HttpResponseBadRequest('Variables are invalid JSON.'))\n\n # NOTE: operation_name does not apply?\n operation_name = None\n\n return query, variables, operation_name, _id\n\n def execute_graphql_request(self, request):\n '''\n THIS IS IMPLEMENTED IN A SUB-OPTIMAL MANNER. DO NOT...\n A.) Judge Me.\n B.) Use unless you accept that the performance here probably is miserable.\n '''\n subqueries = self.parse_body(request)\n results = []\n for subquery in subqueries:\n query, variables, operation_name, _id = self.get_graphql_params(subquery)\n\n if not query:\n raise HttpError(HttpResponseBadRequest('Must provide query string.'))\n\n source = Source(query, name='GraphQL request')\n\n try:\n document_ast = parse(source)\n validation_errors = validate(self.schema, document_ast)\n if validation_errors:\n # TODO: Do not return here. We should handle this per subquery.\n return ExecutionResult(\n errors=validation_errors,\n invalid=True,\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if request.method.lower() == 'get':\n operation_ast = get_operation_ast(document_ast, operation_name)\n if operation_ast and operation_ast.operation != 'query':\n raise HttpError(HttpResponseNotAllowed(\n ['POST'], 'Can only perform a {} operation from a POST request.'.format(operation_ast.operation)\n ))\n\n try:\n result = self.execute(\n document_ast,\n root_value=self.get_root_value(request),\n variable_values=variables,\n operation_name=operation_name,\n context_value=self.get_context(request)\n )\n # TODO: This is really optimistic.\n # We may have status, we may have errors, etc...\n # payload should be set according to graphql spec, not\n # simply \"IT WORKED Spec\".\n results.append({\n \"id\": _id,\n \"payload\": {\n \"data\": result.data,\n }\n })\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n return results\n\n def dispatch(self, request, *args, **kwargs):\n try:\n if request.method.lower() not in ('get', 'post'):\n raise HttpError(HttpResponseNotAllowed(['GET', 'POST'], 'GraphQL only supports GET and POST requests.'))\n\n execution_results = self.execute_graphql_request(request)\n response = {}\n\n # TODO: Figure out how batch errors should be handled.\n # For now, we will simply assume everything works! (HAHA. HA. HAHA. HA.)\n\n # if execution_result.errors:\n # response['errors'] = [self.format_error(e) for e in execution_result.errors]\n\n # if execution_result.invalid:\n # status_code = 400\n # else:\n\n status_code = 200\n response = execution_results\n\n return HttpResponse(\n status=status_code,\n content=self.json_encode(request, response),\n content_type='application/json'\n )\n\n except HttpError as e:\n response = e.response\n response['Content-Type'] = 'application/json'\n response.content = self.json_encode(request, {\n 'errors': [self.format_error(e)]\n })\n return response\n","sub_path":"graphql_django_view/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"407844953","text":"# a minimal tracking script - this will start all peer\n# services and attach everything appropriately\n# change parameters depending on your pan tilt, pins and\n# Arduino details\n# all commented code is not necessary but allows custom\n# options\n\nport = \"COM3\" #change COM port to your own port\nxServoPin = 22 #change this to the right servo pin if needed, for inmoov this is right\nyServoPin = 24 #change this to the right servo pin if needed, for inmoov this is right\n\ntracker = Runtime.createAndStart(\"tracker\", \"Tracking\")\n\n# set specifics on each Servo\nservoX = tracker.getX()\nservoX.setPin(xServoPin)\nservoX.setMinMax(30, 150) #minimum and maximum settings for the X servo\n\nservoY = tracker.getY()\nservoY.setPin(yServoPin)\nservoY.setMinMax(30, 150) #minimum and maximum settings for the Y servo\n\n# changing PID values change the \n# speed and \"jumpyness\" of the Servos\n#xpid = tracker.getXPID() # outdated!\n#ypid = tracker.getYPID() # outdated!\n\n# these are default setting\n# adjust to make more smooth\n# or faster\n# xpid.setPID(5.0, 5.0, 0.1)\n# ypid.setPID(5.0, 5.0, 0.1)\n\n# optional filter settings\nopencv = tracker.getOpenCV()\n\n# setting camera index to 1 default is 0\nopencv.setCameraIndex(1) \n\n# connect to the Arduino\ntracker.connect(port)\n\n# Gray & PyramidDown make face tracking\n# faster - if you dont like these filters - you\n# may remove them before you select a tracking type with\n# the following command\n# tracker.clearPreFilters()\n\n# diffrent types of tracking\n\n# simple face detection and tracking\ntracker.faceDetect()\n\n# lkpoint - click in video stream with \n# mouse and it should track\n#tracker.startLKTracking()\n\n# scans for faces - tracks if found\n#tracker.findFace() \n","sub_path":"home/erik/facetracking.py","file_name":"facetracking.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"175241250","text":"import glob\nimport json\nimport math\nimport os\nfrom os.path import abspath, dirname\n\nimport numpy as np\n\nimport hdf5_getters as GETTERS\nfrom constants import (FILE_PATH, INITIAL_OUTPUT_FILE_PATH, METHODS,\n MSD_DATA_PATH, PARS)\nfrom mice import MICE\n\n\ndef save_songs():\n '''\n Reads the song infos for all songs in the directory specified in\n MSD_DATA_PATH, imputes missing data, normalizes the data and stores it as\n a JSON file.\n '''\n # read all relevant infos from the songs and store them in a dictionary\n print('Reading song infos from the h5 files ...')\n song_dict = {}\n i = -1\n ids = set()\n for root, dirs, files in os.walk(MSD_DATA_PATH):\n files = glob.glob(os.path.join(root, '*'+'.h5'))\n for file in files:\n h5 = GETTERS.open_h5_file_read(file)\n id = GETTERS.get_song_id(h5).decode('UTF-8')\n # if there are multiple song files with the same id only the first\n # is saved\n if id not in ids:\n i += 1\n ids.add(id)\n song_dict[i] = {}\n song_dict[i]['id'] = id # save the song id\n song_dict[i]['title'] = GETTERS.get_title(\n h5).decode('UTF-8').replace(\"'\", \"\") # save the title\n song_dict[i]['artist'] = GETTERS.get_artist_name(\n h5).decode('UTF-8').replace(\"'\", \"\") # save the artist\n\n # save the remaining parameters or, whenever a value is\n # missing, save np.NaN instead\n for key, method in zip(PARS, METHODS):\n value = getattr(GETTERS, method)(h5)\n if math.isnan(value):\n song_dict[i][key] = np.NaN\n else:\n song_dict[i][key] = value\n h5.close()\n\n # impute missing values via fancyimpute MICE imputation and store the new\n # values in the dictionary\n print('Building the array for MICE ...')\n song_array = []\n for index in range(0, len(song_dict)):\n song_array.append([song_dict[index][par] for par in PARS])\n\n print('MICE ...')\n mc = MICE()\n a = mc.complete(np.array(song_array))\n\n print('Storing imputed data in song dictionary ...')\n # store the values for each parameter in a list for the normalization\n # in the next step\n val_lists = []\n for par in PARS:\n val_lists.append([])\n\n for index in range(0, len(song_dict)):\n for par, val_list, nr in zip(PARS, val_lists, range(0, len(PARS))):\n value = a[index, nr]\n song_dict[index][par] = value\n val_list.append(value)\n\n # normalize the values\n print('Normalizing the values ...')\n for par, val_list in zip(PARS, val_lists):\n max_value = -float(\"inf\")\n min_value = float(\"inf\")\n for index in song_dict:\n max_value = max(max_value, song_dict[index][par])\n min_value = min(min_value, song_dict[index][par])\n for index in song_dict:\n song_dict[index][par] = (\n song_dict[index][par] - np.mean(val_list)) / \\\n (max_value - min_value) * 100\n\n # save the data as a JSON file\n print('Saving JSON ...')\n with open(INITIAL_OUTPUT_FILE_PATH, 'w') as outfile:\n json.dump(song_dict, outfile)\n\n print('done')\n","sub_path":"src/back end/song_data_saver.py","file_name":"song_data_saver.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"400874042","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://mp.weixin.qq.com/s?timestamp=1534727794&src=3&ver=1&signature=sR4iMIZjS8mMBQVqczjml8xlMwY*zNIz7yu9twy8YFhaKRhNbrXeC0u34lTjs5yOK8rQrM7lIiHP7rh248AxfZ*OovunyNO5r*Gsy7xNu*rDMWXdg2LpVrCrIPAcXdv6IqPs-cIoM8WFx2ZswD-ddN5-QI6OidYMg0xdcOioZl8='\nresp = requests.get(url)\n# htmltxt = \"

    Hello World

    \"\nsoup = BeautifulSoup(resp.text, 'lxml')\narticle = soup.find('div', class_='rich_media_area_primary_inner')\n\nhead = soup.find('head')\n\nprint(article)\nprint('__________')\n# print(head)\n# test = soup.find('p')\n# print(soup)\n# print(test)\n","sub_path":"users/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"3035526","text":"import brewer2mpl\n# matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom Ribbon.src.RotatingChain import RotatingChain, msg\n\nparams = {\n 'axes.labelsize': 14,\n 'font.size': 14,\n 'legend.fontsize': 12,\n 'xtick.labelsize': 12,\n 'ytick.labelsize': 12,\n 'text.usetex': False,\n 'figure.figsize': [1*5, 1*4]\n }\nplt.rcParams.update(params)\n\nc = RotatingChain()\nmsg = msg()\n\nbmap = brewer2mpl.get_map('Set2', 'qualitative', 8)\ncolors = bmap.mpl_colors\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.set_color_cycle(colors)\n\nfor a in [2, 5, 10]:\n for barL in np.arange(0.1, 25, 0.1):\n msg.info(\"Calculate barL: %f\" % barL)\n dev = c.cals_derivative_of_angular_momentum_sign(a, barL)\n u, u_dot, bars = c.find_coordinate_on_S(a, barL)\n if dev == \"positive\":\n ax.scatter(bars, u_dot, color=colors[0])\n elif dev == \"negative\":\n ax.scatter(bars, u_dot, color=colors[1])\nax.set_ylabel(r\"${\\bar u}' $\")\nax.set_xlabel(r\"$\\bar s $\")\nax.grid(True)\nax.set_xlim([0, 25])\nax.set_ylim([-10, 10])\n\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n# offset the spines\nfor spine in ax.spines.values():\n spine.set_position(('outward', 5)) # Move the bottom spine outward\n\nfig.tight_layout(pad=1)\nplt.savefig('/Users/hung/git/construction/hung/RotatingChainPaper/fig/angular_momentum_sign.pdf')\n","sub_path":"Ribbon/IpythonNotebook/24_Feb_Figure_Rotational_Inertia_Plotting.py","file_name":"24_Feb_Figure_Rotational_Inertia_Plotting.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"187855054","text":"from django.shortcuts import render\nfrom .forms import TravellerForm\nfrom Traveller.models import Traveller\n# Create your views here.\n\ndef showtravellerlist(request):\n\n showtravellerlist = Traveller.objects.all()\n\n context = {\n \"traveller\":True,\n \"travellerinfo\": showtravellerlist()\n }\n return render(request, 'booking/showtravellerlist.html',context)\n\ndef addtravellerinfo(request):\n traveller = Traveller.objects.get(user=request.user)\n message = \"\"\n form= TravellerForm()\n if traveller:\n if request.method == \"POST\":\n\n form = TravellerForm(request.POST)\n\n if form.is_valid():\n traveller = Traveller.objects.get(user=request.user)\n ins=form.save(commit=False)\n ins.Traveller_ID= traveller\n ins.save()\n message = \"Your Details is inserted to Database.\"\n form = TravellerForm()\n context = {\n \"traveller\": True,\n 'form': form,\n 'message' : message\n }\n return render(request, 'traveller/addtravellerinfo.html' ,context)","sub_path":"Traveller/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"592406264","text":"import matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys \n\ninp = sys.argv[1]\n\n# hyper parameters\nepoch = 1000 # how many times to repeat\nlr = 0.001 # learning rate\ntrain_data_ratio = 0.8 # train data \n\n#read data\ndata = np.genfromtxt(inp, dtype=np.float64, skip_header=1)\n\ndata_pivot = int(len(data)*train_data_ratio)\ntrain_data = data[:data_pivot]\ntest_data = data[data_pivot:]\n\nm = len(train_data) # number of data\nk = len(train_data[0]) - 1 # number of dimension on x\n\nx_train = [list(x)[:-1] for x in train_data]\ny_train = [x[-1:] for x in train_data]\n\nx_test = [list(x)[:-1] for x in test_data]\ny_test = [x[-1:] for x in test_data]\n\n# hypothesis = w1*x1 + w2*x2 + ... + wk*xk + b\nw = np.random.rand(k, 1)\nb = np.random.random()\n\ncost_log = []\nfor i in range(epoch):\n\tpred = np.matmul(x_train, w) + b\n\tcost = 1/(2*m) * np.sum(np.square(np.subtract(pred, y_train)))\n\t# cost = (h-y_train)**2.mean()/2\n\n\tif i % (epoch//100) == 0:\n\t\tprint(i//(epoch//100), \"%\")\n\t\tcost_log.append(cost)\n\n\tw_gradient = 1/m * np.sum(np.multiply(np.subtract(pred, y_train), x_train))\n\tb_gradient = 1/m * np.sum(np.subtract(pred, y_train))\n\t\n\t# update\n\tw -= lr * w_gradient\n\tb -= 2 * lr * b_gradient\n\n# visualize\nif k == 1:\n\tplt.figure()\n\tplt.title('graph of train data')\n\tplt.scatter(x_train, y_train)\n\tpred = np.matmul(x_train, w) + b\n\tplt.plot(x_train, pred, color='green')\n\tplt.xticks(())\n\tplt.yticks(())\n\tplt.savefig('graph.png')\n\nplt.figure()\nplt.title('cost')\nplt.plot(range(100), cost_log)\nplt.savefig('cost.png')\n\n# cost for test data\npred = np.matmul(x_test, w) + b\ncost = 1/(2*len(test_data)) * np.sum(np.square(np.subtract(pred, y_test)))\nprint(\"average cost for test data : \", cost)\n\ninput()\n","sub_path":"without_display.py","file_name":"without_display.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"202259934","text":"import sqlite3\nimport numpy\n\ndef query_table(table):\n conn = sqlite3.connect('data/db/vanguard.db')\n c = conn.cursor()\n\n for row in c.execute(\"select * from {}\".format(table)):\n print(row)\n conn.commit()\n conn.close()\n\ndef create_table(table):\n if table == 'symbols':\n from tables import symbols\n symbols.run()\n elif table == 'weekly_ratio':\n from tables import weekly_ratio\n weekly_ratio.run()\n elif table == 'mac':\n from tables import mac\n mac.run()\n\ndef test_metric(metric):\n if metric == 'mac':\n from tables import mac\n mac.test()\n\ndef main():\n symbols = ['SPY', 'VCR', 'VDC', 'VDE', 'VFH', 'VHT', 'VIS', 'VGT', 'VAW', 'VNQ', 'VOX', 'VPU']\n\n action = 'query'\n table = 'symbols'\n\n if action == 'create':\n create_table(table)\n elif action == 'query':\n query_table(table)\n elif action == 'test':\n test_metric(table)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"102282596","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('people', '0007_auto_20151010_1554'),\n ('posts', '0003_auto_20151011_1046'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comments', models.TextField()),\n ('time_commented', models.DateTimeField(auto_now_add=True)),\n ('commenters', models.ForeignKey(to='people.Peopleprop')),\n ('replys', models.ForeignKey(to='posts.Reply')),\n ],\n ),\n ]\n","sub_path":"posts/migrations/0004_comment.py","file_name":"0004_comment.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"562435874","text":"# Imports from 3rd party libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom catboost import CatBoostClassifier, Pool\r\nimport pickle\r\nimport dash\r\nimport dash_bootstrap_components as dbc\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\n\r\n# import dash_design_kit as ddk\r\nimport dash_daq as daq\r\n\r\n# Imports from this application\r\nfrom app import app\r\n\r\n# loading the model and df to extract unique categories\r\nmodel = pickle.load(open(\"model/kickstarter_model.sav\", \"rb\"))\r\ndf = pickle.load(open(\"model/kickstarter_dataframe.pkl\", \"rb\"))\r\ncategories = sorted(df.category.unique())\r\n\r\n# 2 column layout. 1st column width = 4/12\r\n# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout\r\ncolumn1 = dbc.Col(\r\n [\r\n dcc.Markdown(\r\n \"\"\"\r\n\r\n ## Predictions\r\n\r\n How much are you looking to raise for your project?\r\n\r\n \"\"\"\r\n ),\r\n\r\n dcc.Slider(\r\n id='slider1',\r\n min=0,\r\n max=100000,\r\n step=1000,\r\n value=20000,\r\n marks={0: '0',\r\n 20000: '$20k',\r\n 40000: '$40k',\r\n 60000: '$60k',\r\n 80000: '$80k',\r\n 100000: '100k'},\r\n className='mb-3' # this gives margin spacing to the bottom\r\n ),\r\n\r\n dcc.Markdown(\"\", id='output1',\r\n className='mb-5'),\r\n\r\n dcc.Markdown(\"What category does your project fall under?\"),\r\n\r\n dcc.Dropdown(\r\n id='cat_dropdown',\r\n options=[\r\n {'label': i, 'value': i} for i in categories\r\n ],\r\n value='Young Adult',\r\n placeholder=\"Select a Category\",\r\n className='mb-5'\r\n ),\r\n\r\n dcc.Markdown(\"How many days will your project be open for funding?\"),\r\n\r\n dcc.Slider(\r\n id='slider2',\r\n min=0,\r\n max=60,\r\n step=1,\r\n marks={\r\n 0: '0',\r\n 10: '10 days',\r\n 20: '20 days',\r\n 30: '30 days',\r\n 40: '40 days',\r\n 50: '50 days',\r\n 60: '60+ days'\r\n },\r\n value=30,\r\n className='mb-3'\r\n ),\r\n\r\n dcc.Markdown(\"\", id='output2',\r\n className='mb-5'),\r\n\r\n dcc.Markdown(\"Are you a Staff Pick?\"),\r\n\r\n dcc.Dropdown(\r\n id='staff_pick_dropdown',\r\n options=[\r\n {'label': 'Yes', 'value': 1},\r\n {'label': 'No', 'value': 0}\r\n ],\r\n value=1,\r\n placeholder=\"Select Yes or No\",\r\n className='mb-5'\r\n\r\n) \r\n\r\n ],\r\n md=6,\r\n)\r\n\r\ncolumn2 = dbc.Col(\r\n className='mb-50'\r\n\r\n)\r\n\r\ncolumn3 = dbc.Col(\r\n [\r\n dcc.Markdown(\r\n \"\"\"\r\n Given the selected features of your project,\r\n your chance of success is:\r\n \"\"\"\r\n ),\r\n\r\n daq.Gauge(\r\n id ='pred-gauge',\r\n min=0,\r\n max=100,\r\n value=80),\r\n\r\n dcc.Markdown(\"\", id=\"predict_text\", className='mb-50'),\r\n\r\n\r\n ],\r\n className='mb-50',\r\n md=4,\r\n)\r\n\r\nlayout = dbc.Row([column1, column2, column3])\r\n\r\n\r\n@app.callback(\r\n Output(component_id='output1', component_property='children'),\r\n [Input(component_id='slider1', component_property='value')]\r\n)\r\ndef update_output_div(input_value):\r\n return '***You have selected to raise: ${} ***'.format(input_value)\r\n\r\n\r\n@app.callback(\r\n Output(component_id='output2', component_property='children'),\r\n [Input(component_id='slider2', component_property='value')]\r\n)\r\ndef update_output_div2(input_value):\r\n return '***Your project will be open {} days for funding***'.format(input_value)\r\n\r\n@app.callback(\r\n Output(component_id='pred-gauge', component_property='value'),\r\n [Input(component_id='slider1', component_property='value'),\r\n Input(component_id='cat_dropdown', component_property='value'),\r\n Input(component_id='slider2', component_property='value'),\r\n Input(component_id='staff_pick_dropdown', component_property='value')]\r\n)\r\ndef predict(goal, category, fundPeriodDays, staff_pick):\r\n \"\"\"\r\n A function that returns the likelihood of achieving a fundraising goal \r\n on Kickstarter\r\n\r\n Parameters\r\n ----------\r\n category: str (valid category)\r\n staff_pick: str (\"Yes\" or \"No\")\r\n goal: int (0-100000)\r\n fundPeriodDays: int\r\n\r\n Returns\r\n -------\r\n probability: float\r\n \"\"\"\r\n features = {\r\n 'category': category,\r\n 'staff_pick': staff_pick,\r\n 'goal': goal,\r\n 'fundPeriodDays' : fundPeriodDays\r\n }\r\n\r\n df = pd.DataFrame(features, index=[0])\r\n\r\n return round((model.predict_proba(df)[0][1] * 100), 1)\r\n\r\n@app.callback(\r\n Output(component_id='predict_text', component_property='children'),\r\n [Input(component_id='pred-gauge', component_property='value')])\r\ndef update_predict_text(gauge_val):\r\n return \"You can expect to achieve your fundraising goal {}% of the time\".format(gauge_val)","sub_path":"pages/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"452097883","text":"# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport jax.numpy as jnp\nimport numpy as np\n\nimport netket as nk\n\nfrom .. import common # noqa: F401\n\nSEED = 111\n\n\n@pytest.fixture(\n params=[pytest.param(M, id=f\"Fock(M={M})\") for M in [0, 2, 3, 4, 5, 6, 8]]\n)\ndef vstate(request):\n M = request.param\n # keep this a prime number so we get different sizes on every rank...\n hi = nk.hilbert.Fock(M, 1)\n\n ma = nk.models.RBM(\n alpha=1,\n dtype=float,\n hidden_bias_init=nk.nn.initializers.normal(),\n visible_bias_init=nk.nn.initializers.normal(),\n )\n\n return nk.vqs.MCState(\n nk.sampler.MetropolisLocal(hi),\n ma,\n )\n\n\n@pytest.mark.parametrize(\"normalize\", [True, False])\ndef test_to_array(vstate, normalize):\n psi = vstate.to_array(normalize=normalize)\n\n if normalize:\n np.testing.assert_allclose(jnp.linalg.norm(psi), 1.0)\n\n psi_norm = psi / jnp.linalg.norm(psi)\n\n assert psi.shape == (vstate.hilbert.n_states,)\n\n x = vstate.hilbert.all_states()\n psi_exact = jnp.exp(vstate.log_value(x))\n psi_exact = psi_exact / jnp.linalg.norm(psi_exact)\n\n np.testing.assert_allclose(psi_norm, psi_exact)\n\n\n@pytest.fixture(\n params=[pytest.param(M, id=f\"Fock(M={M})\") for M in [0, 2, 3, 4, 5, 6, 8]]\n)\ndef vstate_rho(request):\n M = request.param\n # keep this a prime number so we get different sizes on every rank...\n hi = nk.hilbert.Fock(M, 1)\n\n ma = nk.models.NDM()\n\n return nk.vqs.MCMixedState(\n nk.sampler.MetropolisLocal(nk.hilbert.DoubledHilbert(hi)),\n ma,\n )\n\n\n@pytest.mark.parametrize(\"normalize\", [True, False])\ndef test_to_matrix(vstate_rho, normalize):\n rho = vstate_rho.to_matrix(normalize=normalize)\n\n if normalize:\n np.testing.assert_allclose(jnp.trace(rho), 1.0)\n\n rho_norm = rho / jnp.trace(rho)\n\n assert rho.shape == (\n vstate_rho.hilbert.physical.n_states,\n vstate_rho.hilbert.physical.n_states,\n )\n\n x = vstate_rho.hilbert.all_states()\n rho_exact = jnp.exp(vstate_rho.log_value(x)).reshape(rho.shape)\n rho_exact = rho_exact / jnp.trace(rho_exact)\n\n np.testing.assert_allclose(rho_norm, rho_exact)\n","sub_path":"test/nn/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"111227067","text":"import os\nimport copy\nfrom flask import url_for\nfrom typing import List\nfrom docs.domain import Page\nfrom docs.context import get_application_config\n\nimport markdown\nimport yaml\n\n\nclass PageNotFound(Exception):\n \"\"\"Attempted to load a page that does not exist.\"\"\"\n\n\nclass PageLoadFailed(Exception):\n \"\"\"Could not load the content of a page.\"\"\"\n\n\nSITEMAP = {}\n\n\ndef _unpack(path: str, elem: dict):\n if 'content_path' in elem:\n SITEMAP[path] = elem\n if 'pages' in elem:\n for key, sub_elem in elem['pages'].items():\n _unpack(f'{path}/{key}'.strip('/'), sub_elem)\n\n\ndef _load_sitemap() -> None:\n config = get_application_config()\n site_path = config.get('SITE_PATH', 'site')\n sitemap_path = os.path.join(site_path, 'sitemap.yaml')\n with open(sitemap_path) as f:\n _unpack('', yaml.safe_load(f))\n\n\ndef _full_path(local_path: str) -> str:\n \"\"\"Generate the full path (including app and blueprint root) to a page.\"\"\"\n try:\n base_path = url_for('docs.from_sitemap').rstrip('/')\n except RuntimeError:\n base_path = '/'\n local_path = local_path.lstrip('/')\n return f'{base_path}/{local_path}'\n\n\n_load_sitemap()\n\n\ndef _find_page(path: str) -> dict:\n try:\n elem = copy.copy(SITEMAP[path.strip('/')])\n except KeyError as e:\n raise PageNotFound('Nope') from e\n elem['pages'] = elem.get('pages', {})\n return elem\n\n\ndef _load_markdown(content_path: str) -> markdown.markdown:\n try:\n with open(os.path.join('site', content_path)) as f:\n content = markdown.markdown(f.read())\n except IOError as e:\n raise PageLoadFailed('Failed to load page') from e\n return content\n\n\ndef load_page(path: str) -> Page:\n \"\"\"Load a :class:`.Page` from ``path``.\"\"\"\n elem = _find_page(path)\n elem['markdown'] = _load_markdown(elem['content_path'])\n elem['parents'] = []\n if path != '':\n fpath = ''\n for part in [''] + path.split('/'):\n fpath = f'{fpath}/{part}'\n pelem = _find_page(fpath)\n pelem['parents'] = None\n pelem['markdown'] = None\n elem['parents'].append(Page(path=_full_path(fpath), **pelem))\n return Page(path=_full_path(path), **elem)\n\n\ndef list_paths() -> List[Page]:\n return list(SITEMAP.keys())\n","sub_path":"docs/services/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"447882131","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5.Qt import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom core import *\nfrom GhostController import *\nimport math\nimport copy\nfrom contextlib import redirect_stderr\n \n#spare area around the table\nT_SPARE = 100.\n\n#playing field size\nT_WIDTH = 3000.\nT_HEIGHT = 2000.\n\nT_OMRON_WIDTH = 20.\n\n#RAL encoding \npeebleGrey = QColor(0xBDBAAB) #RAL7032\ntrafficWhite = QColor(0xFCFFFF) #RAL9016\njetBlack = QColor(0x03050A) #RAL9005\ntrafficYellow = QColor(0xFCBD1F) #RAL1023\nskyBlue = QColor(0x1761AB) #RAL5015\nardGray = QColor(66,66,66)\nardBackground = QColor(0xADAFAF)\ndarkRed = QColor(0x800000)\ntransparent = QColor(0,0,0,0)\nredTranslucid = QColor(255,0,0,80)\npink = QColor(0xFF33CC) \n\nmarkPen = QPen(ardGray)\nmarkPen.setWidth(1)\nmarkPen.setCosmetic(True)\n\n#\n\n#\n# This class is an overview of the table with robot displayed at current position\n#\nclass TableOverview(QWidget):\n \n #@param robot : the prowy providing telemetry data\n def __init__(self, parent, robotProxy):\n super().__init__(parent)\n self.layout = QHBoxLayout(self)\n self.resize(600,400)\n self.p = QPainter()\n self.table = TableWidget(self.p)\n self.ghost = GhostWidget(self.p)\n self.ghostController = GhostController()\n self.robotPen = RobotPenWidget(self.p)\n self.robotTration = RobotTrationWidget(self.p)\n self.robot = None\n self.mouseTransform = None\n self.robotProxy = robotProxy\n self.graphState = None\n self.graphNodes = None\n self.graphLinks = None\n self.drawTraj = False\n self.drawGraph = False\n self.view = QRect( - T_SPARE,\n - T_SPARE,\n T_WIDTH + 2.*T_SPARE,\n T_HEIGHT + 2.*T_SPARE)\n self.robotPose = Pose2D()\n sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.setSizePolicy(sizePolicy)\n \n def setMire(self, on):\n self.robot.displayMire = on\n self.ghost.displayMire = on\n \n def setActuators(self, on):\n self.robot.actuatorsOut = on\n self.ghost.actuatorsOut = on\n \n def paintEvent(self, event):\n self.p.begin(self)\n \n aspectH = min( self.p.device().height(), self.p.device().width() / 1.5)\n aspectW = min( self.p.device().width(), self.p.device().height() * 1.5)\n \n #---DEBUG--- print(str(self.p.device().width()) + \" \" + str(self.p.device().height()))\n self.p.setWindow(self.view)\n self.p.setViewport(0., 0., aspectW, aspectH)\n #---DEBUG--- print(self.view)\n self.table.draw(self.parent().robotState.stratInfo)\n self.p.translate(T_WIDTH/2, T_HEIGHT/2)\n self.mouseTransform, invertible = self.p.combinedTransform().inverted()\n assert invertible\n if self.robot != None and self.parent().robotConfig != None:\n x, y = self.robot.filterPosition(self.parent().robotConfig, self.robotPose.x, self.robotPose.y, math.degrees(self.robotPose.h))\n else:\n x = self.robotPose.x\n y= self.robotPose.y\n drawingPose = Pose2D()\n drawingPose.x = x\n drawingPose.y = -y\n drawingPose.h = math.degrees(-self.robotPose.h)\n if self.drawGraph:\n self._drawGraph()\n if self.robot != None and self.parent().robotConfig != None:\n self._drawTrajectory()\n self.robot.draw(drawingPose, self.parent().robotConfig, self.parent().robotState.stratInfo, self.leftArm, self.rightArm)\n self.ghost.draw(self.ghost.pose, self.parent().robotConfig, self.parent().robotState.stratInfo)\n self.p.end()\n \n def mousePressEvent(self, event):\n if self.mouseTransform == None:\n return\n \n #Convert into user coordinates\n p = self.mouseTransform.map(event.pos())\n #qDebug(str(p.x()) + \" \" + str(p.y()) + \" => \" + (str(p.x()) + \" \" + str(p.y()))) \n \n #Update Ghost controller state\n hitPoint = Point(p.x(), -p.y())\n isInCarriage = self.ghost.isMouseInCarriage(hitPoint)\n self.ghostController.mouseLeftClicCb(True, hitPoint, self.ghost.pose, isInCarriage)\n \n return QWidget.mousePressEvent(self, event)\n \n def mouseReleaseEvent(self, event):\n if self.mouseTransform == None:\n return\n \n #Convert into user coordinates\n p = self.mouseTransform.map(event.pos())\n #qDebug(str(p.x()) + \" \" + str(p.y()) + \" => \" + (str(p.x()) + \" \" + str(p.y()))) \n \n #Update Ghost controller state\n hitPoint = Point(p.x(), -p.y())\n isInCarriage = self.ghost.isMouseInCarriage(hitPoint)\n self.ghostController.mouseLeftClicCb(False, hitPoint, self.ghost.pose, isInCarriage)\n \n return QWidget.mouseReleaseEvent(self, event)\n \n def mouseMoveEvent(self, event):\n if self.mouseTransform == None:\n return\n #Convert into user coordinates\n p = self.mouseTransform.map(event.pos())\n #qDebug(str(p.x()) + \" \" + str(p.y()) + \" => \" + (str(p.x()) + \" \" + str(p.y()))) \n \n #Update ghost position\n hitPoint = Point(p.x(), -p.y())\n self.ghost.pose = self.ghostController.mouseSlideCb(hitPoint, self.ghost.pose)\n self.parent().label[\"xg\"].setText(str(self.ghost.pose.x))\n self.parent().label[\"yg\"].setText(str(self.ghost.pose.y))\n self.parent().label[\"hg\"].setText(\"%0.0f\" % math.degrees(self.ghost.pose.h))\n \n return QWidget.mouseMoveEvent(self, event)\n \n def _drawTrajectory(self):\n if self.drawTraj:\n self.p.save()\n pen = QPen(darkRed)\n pen.setWidth(1)\n pen.setCosmetic(True)\n self.p.setPen(pen)\n \n polyline = list()\n \n for i in range(len(self.robotProxy.past)):\n if i %3 == 0:\n p = self.robotProxy.past[i].nav.pos\n drawingPose = Pose2D(p.x, -p.y, 0)\n drawingPose.h = -p.h\n self.robot.draw(drawingPose, self.parent().robotConfig, self.robotProxy.past[i].stratInfo, 0, 0)\n polyline.append(QPointF(p.x, -p.y))\n \n self.p.drawPolyline(QPolygonF(polyline))\n self.p.restore()\n\n def _drawGraph(self):\n if self.graphNodes == None or self.graphLinks == None:\n return\n\n #Drawing links to other nodes\n for i in range(self.graphLinks.count):\n self.p.setPen(markPen)\n #print(ord(self.graphLinks.sources[i])-1)\n #print(ord(self.graphLinks.targets[i])-1)\n source = self.graphNodes.nodes[ord(self.graphLinks.sources[i])-1]\n target = self.graphNodes.nodes[ord(self.graphLinks.targets[i])-1]\n \n \n self.p.drawLine(source.x, -source.y, target.x, -target.y)\n\n #Drawing nodes\n for i, node in enumerate(self.graphNodes.nodes):\n self.p.setPen(markPen)\n self.p.setBrush(trafficWhite)\n drawCircle(self.p, node.x, -node.y, 10)\n pinkPen = QPen(trafficWhite)\n pinkPen.setWidth(3)\n pinkPen.setCosmetic(True)\n self.p.setPen(pinkPen)\n self.p.setFont(QFont(\"Lucida Sans\", 30, QFont.Bold))\n self.p.drawText(QRectF(node.x, -node.y, 50, 80), str(i), QTextOption(Qt.AlignCenter))\n \n #Drawing current way\n if self.graphState != None:\n \n \n \n prevNode = None\n pinkPen = QPen(Qt.darkGreen)\n pinkPen.setWidth(3)\n pinkPen.setCosmetic(True)\n for i in range(self.graphState.way_count):\n wayId = ord(self.graphState.way[i])-1\n node = self.graphNodes.nodes[wayId]\n \n if wayId == 0:\n node.x = self.graphState.startPoint.x\n node.y = self.graphState.startPoint.y\n if wayId == 1:\n node.x = self.graphState.targetPoint.x\n node.y = self.graphState.targetPoint.y\n \n #Draw line to previous point\n if prevNode != None:\n self.p.setPen(pinkPen)\n self.p.drawLine(node.x, -node.y, prevNode.x, -prevNode.y)\n #Draw way-point\n self.p.setPen(markPen)\n self.p.setBrush(Qt.green)\n drawCircle(self.p, node.x, -node.y, 20)\n \n prevNode = copy.copy(node)\n \n for i, validity in enumerate(self.graphState.valid):\n redPen = QPen(Qt.red)\n redPen.setWidth(3)\n redPen.setCosmetic(True)\n self.p.setPen(redPen)\n if not validity:\n source = self.graphNodes.nodes[self.graphLinks.source[i]]\n target = self.graphNodes.nodes[self.graphLinks.target[i]]\n self.p.drawLine(source.x, -source.y, target.x, -target.y)\n\nclass RobotWidget():\n def __init__(self, painter):\n self.p = painter\n self.actuatorsOut = False\n self.displayMire = False\n \n def draw(self, pose, cfg, stratInfo, leftArm, rightArm):\n self.p.save()\n self.p.setRenderHint(QPainter.Antialiasing)\n self.p.translate(pose.x, pose.y)\n self.p.rotate(pose.h)\n if self.displayMire:\n self.drawMarks(cfg)\n self.drawCarriage(cfg, leftArm, rightArm)\n self.drawWheels(cfg)\n self.drawObjects(stratInfo)\n self.p.restore()\n\n def drawWheels(self, cfg):\n #draw wheels\n self.p.setBrush(Qt.green)\n self.p.drawRoundedRect(QRectF(-cfg.leftWheelDiameter/2, cfg.voie/2. - 5, cfg.leftWheelDiameter, 10), 3, 3)\n self.p.drawRoundedRect(QRectF(-cfg.rightWheelDiameter/2, -cfg.voie/2. - 5, cfg.rightWheelDiameter, 10), 3, 3)\n \n def drawMarks(self, cfg):\n pen = QPen(Qt.black)\n pen.setWidth(1)\n pen.setCosmetic(True)\n pen.setDashPattern([2, 3])\n self.p.setPen(pen)\n \n mireSize = 350\n self.p.drawLine(-mireSize, 0, mireSize, 0)\n self.p.drawLine(0, mireSize, 0, -mireSize)\n \n #draw carriage safety zone\n pen = QPen(Qt.black)\n pen.setWidth(1)\n pen.setCosmetic(True)\n pen.setDashPattern([2, 3])\n self.p.setPen(pen)\n self.p.setBrush(transparent)\n drawCircle(self.p, 0, 0, 220)\n drawCircle(self.p, 0, 0, cfg.xouter)\n \n #draw avoidance area\n self.p.setPen(markPen)\n self.p.setBrush(redTranslucid)\n self.p.drawRect(cfg.xav, -cfg.yside, cfg.avoidanceDistFront, T_OMRON_WIDTH)\n self.p.drawRect(cfg.xav, cfg.yside-T_OMRON_WIDTH, cfg.avoidanceDistFront, T_OMRON_WIDTH)\n self.p.drawRect(-cfg.xar - cfg.avoidanceDistRear, -cfg.yside, cfg.avoidanceDistRear, T_OMRON_WIDTH)\n self.p.drawRect(-cfg.xar - cfg.avoidanceDistRear, cfg.yside-T_OMRON_WIDTH, cfg.avoidanceDistRear, T_OMRON_WIDTH)\n \n def drawObjects(self, stratInfo):\n if len(stratInfo.stock):\n self.p.setPen(markPen)\n if stratInfo.stock[0] == Types_pb2.MONOCOLOR:\n drawHorizontalCylinder(self.p, stratInfo.matchColor, 85, 0, 0)\n else:\n drawHorizontalCylinder(self.p, Types_pb2.UNKNOWN, 85, 0, 0)\n \n if len(stratInfo.stock) == 6:\n if stratInfo.stock[5] == Types_pb2.MONOCOLOR:\n drawVerticalCylinder(self.p, stratInfo.matchColor, 180, 45)\n else:\n drawVBicolorCylinder(self.p, 180, 0, 0)\n \n def drawCarriage(self, cfg, leftArm, rightArm):\n \n if self.actuatorsOut:\n self.drawLeftArm(cfg, 1000)\n self.drawRightArm(cfg, 1000)\n \n self.drawLeftArm(cfg, leftArm)\n self.drawRightArm(cfg, rightArm)\n \n self.p.setPen(markPen)\n self.p.setBrush(self.color)\n carriage = QPainterPath()\n mouthX = 35\n mouthY = 35\n #print(\"xar = \" + str(cfg.xar))\n carriage.moveTo(-cfg.xar, cfg.yside)\n carriage.lineTo(cfg.xav, cfg.yside)\n carriage.lineTo(cfg.xav, mouthX)\n carriage.lineTo( mouthY, mouthX)\n carriage.lineTo( mouthY,-mouthX)\n carriage.lineTo(cfg.xav,-mouthX)\n carriage.lineTo(cfg.xav,-cfg.yside)\n carriage.lineTo(-cfg.xar,-cfg.yside)\n carriage.closeSubpath()\n self.p.drawPath(carriage)\n \n self.p.save()\n if self.color == trafficWhite:\n self.p.setPen(Qt.black)\n else:\n self.p.setPen(trafficWhite)\n self.p.setFont(QFont(\"Arial\", 20, QFont.Bold))\n self.p.drawText(QRectF(0, -cfg.yside, 2*cfg.yside, 2*cfg.xar), \"8=>\", QTextOption(Qt.AlignCenter))\n self.p.setFont(QFont(\"Lucida Sans\", 20, QFont.Bold))\n self.p.drawText(QRectF(0, 40, 2*cfg.yside, 2*cfg.xar), \"A.R.D.\", QTextOption(Qt.AlignCenter))\n self.p.rotate(-90)\n self.p.setFont(QFont(\"Lucida Sans\", 30, QFont.Bold))\n self.p.drawText(QRectF(-cfg.yside, -cfg.xar, 2*cfg.yside, 2*cfg.xar), cfg.serialNumber, QTextOption(Qt.AlignCenter))\n self.p.restore()\n \n #draw front lines\n# pen = QPen(ardGray)\n# pen.setWidth(1)\n# pen.setCosmetic(True)\n# pen.setDashPattern([2, 3])\n# self.p.setPen(pen)\n# carriage = QPainterPath()\n# carriage.moveTo(cfg.xav, cfg.yside)\n# carriage.lineTo(cfg.xav, -cfg.yside)\n# carriage.moveTo(cfg.xavExtended, cfg.yside)\n# carriage.lineTo(cfg.xavExtended, -cfg.yside)\n# self.p.drawPath(carriage)\n\n\n \n def drawRightArm(self, cfg, position):\n delta = map(position, 230., 750., 110., 185.)\n self.p.setPen(markPen)\n self.p.setBrush(jetBlack)\n drawCircle(self.p, delta, 60, 22)\n self.p.setBrush(Qt.gray)\n drawCircle(self.p, delta, 60, 15)\n arm = QPainterPath()\n arm.moveTo(delta - 40 + 40, 50)\n arm.lineTo(delta - 40 - 30, 50)\n arm.lineTo(delta - 40 - 30, 70)\n arm.lineTo(delta - 40 + 40, 70)\n arm.closeSubpath()\n self.p.drawPath(arm)\n \n def drawLeftArm(self, cfg, position): \n delta = map(position, 230., 750., 110., 185.)\n self.p.setPen(markPen)\n self.p.setBrush(jetBlack)\n drawCircle(self.p, delta, -60, 22)\n self.p.setBrush(Qt.gray)\n drawCircle(self.p, delta, -60, 15)\n arm = QPainterPath()\n arm.moveTo(delta - 40 + 40, -50)\n arm.lineTo(delta - 40 - 30, -50)\n arm.lineTo(delta - 40 - 30, -70)\n arm.lineTo(delta - 40 + 40, -70)\n arm.closeSubpath()\n self.p.drawPath(arm)\n \n def filterPosition(self, cfg, x, y, h):\n xf = x\n yf = y\n \n #\n # Reacal AV\n #-------------------\n #Robot is facing up\n if fabs(h - 90) < 2:\n #Robot is recal'ing against top table border \n if fabs(x) < 790 and 1000 < y + cfg.xav :\n yf = 1000 - cfg.xav\n #Robot is recal'ing against start wall\n if 790 < fabs(x) and 618 < y + cfg.xav :\n yf = 618 - cfg.xav\n #Robot is facing down\n if fabs(h + 90) < 2:\n #Robot is recal'ing against bot table border \n if y - cfg.xav < -1000 :\n yf = -1000 + cfg.xav\n #Robot is facing left\n if 178 < fabs(h):\n #Robot is recaling against our 3 border\n if y < -200 and -200 < x and x - cfg.xav < 68:\n xf = 68 + cfg.xav\n #Robot is recaling against opponent 1 border\n if -172 < y and y < 322 and x - cfg.xav < -1500 + 108:\n xf = -1500 + 108 + cfg.xav\n #Robot is recal'ing against left table border \n if 322 < y and y < 618 and x - cfg.xav < -1500 :\n xf = -1500 + cfg.xav\n #Robot is recal'ing against start bascule\n if 618 < y and x - cfg.xav < -790 :\n xf = -790 + cfg.xav\n #Robot is facing right\n if fabs(h) < 2 :\n #Robot is recaling against opp 3 border\n if y < -200 and -68 < x + cfg.xav and x < 200:\n xf = -68 - cfg.xav\n #Robot is recaling against our 5 border\n if -172 < y and y < 322 and 1500 - 108 < x + cfg.xav:\n xf = 1500 - 108 - cfg.xav\n #Robot is recal'ing against right table border \n if 322 < y and y < 618 and 1500 < x + cfg.xav :\n xf = 1500 - cfg.xav\n #Robot is recal'ing against start bascule\n if 618 < y and 790 < x + cfg.xav :\n xf = 790 - cfg.xav \n \n #\n # Reacal AR\n #-----------------\n #Robot is facing up\n if fabs(h - 90) < 2:\n #Robot is recal'ing against bot table border \n if y - cfg.xar < -1000 :\n yf = -1000 + cfg.xar\n #Robot is facing down\n if fabs(h + 90) < 2:\n #Robot is recal'ing against top table border \n if fabs(x) < 790 and 1000 < y + cfg.xar :\n yf = 1000 - cfg.xar\n #Robot is recal'ing against start wall\n if 790 < fabs(x) and 618 < y + cfg.xar :\n yf = 618 - cfg.xar\n #Robot is facing left\n if 178 < fabs(h):\n #Robot is recaling against opp 3 border\n if y < -200 and -68 < x + cfg.xar and x < 200:\n xf = -68 - cfg.xar\n #Robot is recaling against our 5 border\n if -172 < y and y < 322 and 1500 - 108 < x + cfg.xar:\n xf = 1500 - 108 - cfg.xar\n #Robot is recal'ing against left table border \n if 322 < y and y < 618 and 1500 < x + cfg.xar :\n xf = 1500 - cfg.xar\n #Robot is recal'ing against start bascule\n if 618 < y and 790 < x + cfg.xar :\n xf = 790 - cfg.xar \n #Robot is facing right\n if fabs(h) < 2 :\n #Robot is recaling against our 3 border\n if y < -200 and -200 < x and x - cfg.xar < 68:\n xf = 68 + cfg.xar\n #Robot is recaling against opponent 1 border\n if -172 < y and y < 322 and x - cfg.xar < -1500 + 108:\n xf = -1500 + 108 + cfg.xar\n #Robot is recal'ing against right table border \n if 322 < y and y < 618 and x - cfg.xar < -1500 :\n xf = -1500 + cfg.xar\n #Robot is recal'ing against start bascule\n if 618 < y and x - cfg.xar < -790 :\n xf = -790 + cfg.xar\n \n return xf,yf\n \n \nclass RobotPenWidget(RobotWidget):\n def __init__(self, painter):\n super().__init__(painter)\n self.color = trafficWhite \n \nclass RobotTrationWidget(RobotWidget):\n def __init__(self, painter):\n super().__init__(painter)\n self.color = darkRed \n\nclass GhostWidget(RobotWidget):\n def __init__(self, painter):\n super().__init__(painter)\n self.color = ardGray \n self.pose = Pose2D(-1300,-800,0)\n self.displayMire = True\n self.actuatorsOut = True\n self.cfg = None\n \n def draw(self, pose, cfg2, stratInfo2):\n self.cfg = copy.copy(cfg2)\n self.cfg.serialNumber = \"Ghost\"\n stratInfo = CommonMsg_pb2.StratInfo2017()\n self.p.save()\n self.p.setRenderHint(QPainter.Antialiasing)\n self.p.translate(pose.x, -pose.y)\n self.p.rotate(-math.degrees(pose.h))\n if self.displayMire:\n self.drawMarks(self.cfg)\n self.drawCarriage(self.cfg, 0, 0)\n self.drawWheels(self.cfg)\n self.p.restore()\n \n def isMouseInCarriage(self, point):\n if self.cfg == None:\n return False\n \n if self.pose.dist(point) <= self.cfg.xouter:\n return True\n else:\n return False\n\nclass TableWidget():\n \n def __init__(self, painter):\n self.p = painter\n \n def draw(self, stratInfo):\n self.drawBackground()\n self.drawStartArea()\n self.drawCraters()\n self.drawContainers(stratInfo)\n self.drawBorders()\n self.drawDispensers(stratInfo)\n self.drawMarks()\n self.drawCylinders(stratInfo)\n self.drawPooedObjects(stratInfo)\n \n def drawBorders(self):\n self.p.setPen(markPen)\n self.p.setBrush(peebleGrey)\n \n path = QPainterPath()\n path.lineTo(T_WIDTH, 0)\n path.lineTo(T_WIDTH, T_HEIGHT)\n path.lineTo(0, T_HEIGHT)\n path.lineTo(0, 0)\n path.moveTo(-22, -22)\n path.lineTo(T_WIDTH+22, -22)\n path.lineTo(T_WIDTH+22, T_HEIGHT+22)\n path.lineTo(-22, T_HEIGHT+22)\n path.lineTo(-22, -22)\n self.p.drawPath(path)\n \n def drawBackground(self):\n self.p.setPen(markPen)\n self.p.setBrush(ardBackground)\n self.p.drawRect(0, 0, T_WIDTH, T_HEIGHT)\n \n def drawStartArea(self):\n #BLUE AREA\n #start area\n self.p.setBrush(skyBlue)\n self.p.setPen(QPen())\n self.p.drawRect(0, 0, 1070, 360)\n #bascule\n self.p.setBrush(peebleGrey)\n self.p.setPen(markPen)\n self.p.drawRect(360, 0, 350, 360)\n #little border\n self.p.setBrush(skyBlue)\n self.p.setPen(markPen)\n self.p.drawRect(0, 360, 710, 22)\n \n #YELLOW AREA\n #start area\n self.p.setBrush(trafficYellow)\n self.p.setPen(QPen())\n self.p.drawRect(1930, 0, 1070, 360) \n #bascule\n self.p.setBrush(peebleGrey)\n self.p.setPen(markPen)\n self.p.drawRect(2290, 0, 350, 360)\n #little border \n self.p.setBrush(trafficYellow)\n self.p.setPen(markPen) \n self.p.drawRect(2290, 360, 710, 22) \n \n def drawMarks(self):\n pen = QPen(ardGray)\n pen.setWidth(1)\n pen.setCosmetic(True)\n pen.setDashPattern([5, 10])\n \n #Table axis\n self.p.setPen(pen)\n self.p.drawLine(T_WIDTH/2, T_HEIGHT/2, T_WIDTH, T_HEIGHT/2)\n self.p.drawLine(T_WIDTH/2, T_HEIGHT/2, 0, T_HEIGHT/2)\n self.p.drawLine(T_WIDTH/2, T_HEIGHT/2, T_WIDTH/2, 0)\n self.p.drawLine(T_WIDTH/2, T_HEIGHT/2, T_WIDTH/2, T_HEIGHT)\n \n #Central container circle\n self.p.save()\n self.p.setRenderHint(QPainter.Antialiasing)\n self.p.drawArc(QRectF(T_WIDTH/2-800, T_HEIGHT-800, 2*800, 2*800), 180*16, -180*16) \n self.p.restore()\n \n \n def drawCraters(self):\n self.p.save()\n self.p.setPen(markPen)\n self.p.setBrush(trafficWhite)\n self.p.setRenderHint(QPainter.Antialiasing)\n \n #LeftCrater\n leftCrater = QPainterPath()\n leftCrater.moveTo(0, T_HEIGHT - 510)\n leftCrater.arcTo(QRectF(-510, T_HEIGHT - 510, 2*510, 2*510), 90, -90)\n leftCrater.lineTo(510 + 30, T_HEIGHT)\n leftCrater.arcTo(QRectF(-510-30, T_HEIGHT -510-30, 2*(510+30), 2*(510+30)), 0, 90)\n leftCrater.closeSubpath()\n self.p.drawPath(leftCrater)\n \n #RightCrater\n rightCrater = QPainterPath()\n rightCrater.moveTo(T_WIDTH, T_HEIGHT - 510)\n rightCrater.arcTo(QRectF(T_WIDTH-510, T_HEIGHT - 510, 2*510, 2*510), 90, 90)\n rightCrater.lineTo(T_WIDTH - 510 - 30, T_HEIGHT)\n rightCrater.arcTo(QRectF(T_WIDTH-510-30, T_HEIGHT -510-30, 2*(510+30), 2*(510+30)), 180, -90)\n rightCrater.closeSubpath()\n self.p.drawPath(rightCrater)\n self.p.restore()\n \n #Little ones\n self._drawOneCrater( 650, 540)\n self._drawOneCrater(2350, 540)\n self._drawOneCrater(1070, 1870)\n self._drawOneCrater(1930, 1870)\n \n def drawPooedObjects(self, stratInfo):\n \n #Draw Opp straight border area pooed objects\n for i in range (0, stratInfo.containerBorderOppNb):\n if stratInfo.matchColor == Types_pb2.PREF:\n drawHorizontalCylinder(self.p, stratInfo.matchColor, 40, 700 + 25 + 100/2. + i*100, 90)\n else:\n drawHorizontalCylinder(self.p, stratInfo.matchColor, T_WIDTH - 40, 700 + 25 + 100/2. + i*100, 90)\n \n #Draw Own straight border area pooed objects \n for i in range (0, stratInfo.containerBorderNb):\n if stratInfo.matchColor == Types_pb2.PREF:\n drawHorizontalCylinder(self.p, stratInfo.matchColor, T_WIDTH - 40, 700 + 25 + 100/2. + i*100, 90)\n else:\n drawHorizontalCylinder(self.p, stratInfo.matchColor, 40, 700 + 25 + 100/2. + i*100, 90)\n \n #Draw midle center pooed objects\n for i in range (0, stratInfo.containerMidleCenterNb):\n drawHorizontalCylinder(self.p, stratInfo.matchColor, T_WIDTH/2., 1250 + i*100, 90)\n \n #Draw own midle side pooed objects\n self.p.save()\n self.p.translate(T_WIDTH/2., T_HEIGHT)\n #Yellow side\n if stratInfo.matchColor == Types_pb2.PREF:\n self.p.rotate(-45)\n #Blue side\n else:\n self.p.rotate(-135)\n for i in range (0, stratInfo.containerMidleOwnNb):\n drawHorizontalCylinder(self.p, stratInfo.matchColor, 750 - i*100, 0, 0)\n self.p.restore()\n \n #Draw opp midle side pooed objects\n self.p.save()\n self.p.translate(T_WIDTH/2., T_HEIGHT)\n #Yellow side\n if stratInfo.matchColor == Types_pb2.PREF:\n self.p.rotate(-135)\n #Blue side\n else:\n self.p.rotate(-45)\n for i in range (0, stratInfo.containerMidleOppNb):\n drawHorizontalCylinder(self.p, stratInfo.matchColor, 750 - i*100, 0, 0)\n self.p.restore()\n\n #Draw start area pooed objects\n for i in range (0, stratInfo.containerStartNb):\n #Yellow side\n if stratInfo.matchColor == Types_pb2.PREF:\n if i < 4 :\n drawHorizontalCylinder(self.p, Types_pb2.UNKNOWN, 2000 + i*75, 100, 90)\n else:\n drawHorizontalCylinder(self.p, Types_pb2.UNKNOWN, 2000 + (i-4)*75, 250, 90)\n #Blue side\n else:\n if i < 4 :\n drawHorizontalCylinder(self.p, Types_pb2.UNKNOWN, 1000 - i*75, 100, 90)\n else:\n drawHorizontalCylinder(self.p, Types_pb2.UNKNOWN, 1000 - (i-4)*75, 250, 90) \n\n def _drawOneCrater(self, x, y):\n self.p.setPen(markPen)\n self.p.save()\n self.p.setBrush(trafficWhite)\n self.p.translate(x, y)\n drawCircle(self.p, 0,0, 251/2.)\n self.p.setBrush(ardBackground)\n drawCircle(self.p, 0,0, 191/2.)\n self.p.restore()\n \n def drawContainers(self, stratInfo):\n self.p.setPen(markPen)\n self.p.setBrush(trafficWhite)\n \n #left straight container\n self.p.drawRect(0, 700-22, 80, 22)\n self.p.drawRect(0, 1150, 80, 22) \n self.p.drawRect(80, 700, 28, 450) \n \n #right straight container\n self.p.drawRect(T_WIDTH-80, 700-22, 80, 22)\n self.p.drawRect(T_WIDTH-80, 1150, 80, 22) \n self.p.drawRect(T_WIDTH-80-28, 700, 28, 450) \n \n #middle central container\n self.p.save()\n self.p.setRenderHint(QPainter.Antialiasing)\n self.p.translate(T_WIDTH/2, T_HEIGHT)\n self.p.rotate(45)\n for i in [1, 2, 3]:\n self.p.drawRect(-80/2-28, -800, 28, 620)\n self.p.drawRect(80/2, -800, 28, 620)\n self.p.rotate(-45)\n self.p.restore()\n \n #central circle\n self.p.save()\n self.p.setRenderHint(QPainter.Antialiasing)\n self.p.drawPie(QRectF(T_WIDTH/2-200, T_HEIGHT-200, 2*200, 2*200), 180*16, -180*16) \n self.p.restore()\n \n def drawDispensers(self, stratInfo):\n #Bicolor lateral dispenser on Blue side\n self.p.save()\n self.p.setPen(markPen)\n self.p.setBrush(peebleGrey)\n self.p.translate(40, 1350)\n drawCircle(self.p, 0,0,40)\n self.p.restore()\n if stratInfo.dispenserBicolorNb != 0 and stratInfo.matchColor == Types_pb2.SYM:\n for i in range(0, stratInfo.dispenserBicolorNb):\n drawVBicolorCylinder(self.p, 40 - 20*i, 1350, 135)\n if stratInfo.dispenserOppNb != 0 and stratInfo.matchColor == Types_pb2.PREF:\n for i in range(0, stratInfo.dispenserOppNb):\n drawVBicolorCylinder(self.p, 40 - 20*i, 1350, 135)\n \n #Blue Top dispenser\n self.p.save()\n self.p.setPen(markPen)\n self.p.setBrush(peebleGrey)\n self.p.translate(1150, 40)\n drawCircle(self.p, 0,0,40)\n self.p.restore()\n if stratInfo.dispenserMonocolorNb != 0 and stratInfo.matchColor == Types_pb2.SYM:\n for i in range(0, stratInfo.dispenserMonocolorNb):\n drawVerticalCylinder(self.p, Types_pb2.SYM, 1150, 40 - 20*i) \n if stratInfo.matchColor == Types_pb2.PREF:\n for i in range(0, 4):\n drawVerticalCylinder(self.p, Types_pb2.SYM, 1150, 40 - 20*i) \n \n #Yellow Top dispenser\n self.p.save()\n self.p.translate(1850, 40)\n drawCircle(self.p, 0,0,40)\n self.p.restore()\n if stratInfo.dispenserMonocolorNb != 0 and stratInfo.matchColor == Types_pb2.PREF:\n for i in range(0, stratInfo.dispenserMonocolorNb):\n drawVerticalCylinder(self.p, Types_pb2.PREF, 1850, 40 - 20*i) \n if stratInfo.matchColor == Types_pb2.SYM:\n for i in range(0, 4):\n drawVerticalCylinder(self.p, Types_pb2.PREF, 1850, 40 - 20*i) \n\n #Bicolor lateral dispenser on Yellow side\n self.p.save()\n self.p.translate(T_WIDTH - 40, 1350)\n drawCircle(self.p, 0,0,40)\n self.p.restore()\n if stratInfo.dispenserBicolorNb != 0 and stratInfo.matchColor == Types_pb2.PREF:\n for i in range(0, stratInfo.dispenserBicolorNb):\n drawVBicolorCylinder(self.p, T_WIDTH - 40 + 20*i, 1350, -45)\n if stratInfo.dispenserOppNb != 0 and stratInfo.matchColor == Types_pb2.SYM:\n for i in range(0, stratInfo.dispenserOppNb):\n drawVBicolorCylinder(self.p, T_WIDTH - 40 + 20*i, 1350, -45)\n \n \n def drawCylinders(self, stratInfo):\n #\n #Yellow area\n #\n #Cylinder close to start area\n if stratInfo.cylinderStart and stratInfo.matchColor == Types_pb2.PREF \\\n or stratInfo.cylinderOppStart and stratInfo.matchColor == Types_pb2.SYM:\n drawVBicolorCylinder(self.p, 2000, 600, -45) \n \n #Cylinder between straight container and little border corner.\n if stratInfo.cylinderCorner and stratInfo.matchColor == Types_pb2.PREF \\\n or stratInfo.matchColor == Types_pb2.SYM:\n drawVerticalCylinder(self.p, Types_pb2.PREF, 2800,600) \n \n #Cylinder in the \"middle\" of the half-table\n if stratInfo.cylinderCenter and stratInfo.matchColor == Types_pb2.PREF \\\n or stratInfo.cylinderOppCenter and stratInfo.matchColor == Types_pb2.SYM:\n drawVBicolorCylinder(self.p, 2500, 1100, -45) \n \n #Cylinder blocking central container\n if stratInfo.cylinderContainer and stratInfo.matchColor == Types_pb2.PREF \\\n or stratInfo.cylinderOppContainer and stratInfo.matchColor == Types_pb2.SYM:\n drawVBicolorCylinder(self.p, 2100, 1400, -45) \n \n #Cylinder between bottom craters\n if stratInfo.cylinderCrater and stratInfo.matchColor == Types_pb2.PREF \\\n or stratInfo.matchColor == Types_pb2.SYM:\n drawVerticalCylinder(self.p, Types_pb2.PREF, 2200,1850) \n \n \n #\n #Blue area\n #\n #Cylinder close to start area\n if stratInfo.cylinderStart and stratInfo.matchColor == Types_pb2.SYM \\\n or stratInfo.cylinderOppStart and stratInfo.matchColor == Types_pb2.PREF:\n drawVBicolorCylinder(self.p, 1000, 600, 135) \n \n #Cylinder between straight container and little border corner.\n if stratInfo.cylinderCorner and stratInfo.matchColor == Types_pb2.SYM \\\n or stratInfo.matchColor == Types_pb2.PREF:\n drawVerticalCylinder(self.p, Types_pb2.SYM, 200, 600)\n \n #Cylinder in the \"middle\" of the half-table\n if stratInfo.cylinderCenter and stratInfo.matchColor == Types_pb2.SYM \\\n or stratInfo.cylinderOppCenter and stratInfo.matchColor == Types_pb2.PREF:\n drawVBicolorCylinder(self.p, 500, 1100, 135)\n \n #Cylinder blocking central container\n if stratInfo.cylinderContainer and stratInfo.matchColor == Types_pb2.SYM \\\n or stratInfo.cylinderOppContainer and stratInfo.matchColor == Types_pb2.PREF:\n drawVBicolorCylinder(self.p, 900, 1400, 135)\n \n #Cylinder between bottom craters\n if stratInfo.cylinderCrater and stratInfo.matchColor == Types_pb2.SYM \\\n or stratInfo.matchColor == Types_pb2.PREF:\n drawVerticalCylinder(self.p, Types_pb2.SYM, 800, 1850)\n \n \ndef drawCircle(painter, x, y, radius):\n painter.save()\n painter.setRenderHint(QPainter.Antialiasing)\n painter.drawEllipse(QRectF(x - radius, y - radius, 2*radius, 2*radius))\n painter.restore()\n\ndef drawVerticalCylinder(painter, color, x, y):\n painter.save()\n painter.setPen(markPen)\n if color == Types_pb2.PREF:\n painter.setBrush(trafficYellow)\n else:\n painter.setBrush(skyBlue)\n painter.translate(x,y)\n drawCircle(painter, 0,0, 63/2.)\n painter.restore()\n \ndef drawVBicolorCylinder(painter, x, y, angle):\n painter.save()\n painter.setRenderHint(QPainter.Antialiasing)\n painter.setPen(markPen)\n painter.translate(x,y)\n painter.rotate(angle)\n \n #white sections\n painter.setBrush(trafficWhite)\n path = QPainterPath()\n path.moveTo(0,0)\n path.lineTo(0,63/2.)\n path.arcTo(QRectF(-63/2., -63/2., 63, 63), -90, 90)\n path.lineTo(-63/2.,0)\n path.arcTo(QRectF(-63/2., -63/2., 63, 63), 180, -90)\n path.closeSubpath()\n painter.drawPath(path)\n \n #yellow section\n painter.setBrush(trafficYellow)\n path = QPainterPath()\n path.moveTo(0,0)\n path.lineTo(63/2.,0)\n path.arcTo(QRectF(-63/2., -63/2., 63, 63), 0, 90)\n path.closeSubpath()\n painter.drawPath(path)\n \n #blue section\n painter.setBrush(skyBlue)\n path = QPainterPath()\n path.moveTo(0,0)\n path.lineTo(-63/2.,0)\n path.arcTo(QRectF(-63/2., -63/2., 63, 63), 180, 90)\n path.closeSubpath()\n painter.drawPath(path)\n painter.restore()\n \ndef drawHorizontalCylinder(painter, color, x, y, angle): \n painter.save()\n \n painter.translate(x,y)\n painter.rotate(angle)\n \n painter.setPen(markPen)\n if color == Types_pb2.PREF:\n painter.setBrush(trafficYellow)\n elif color == Types_pb2.SYM:\n painter.setBrush(skyBlue)\n else:\n painter.setBrush(trafficWhite)\n \n cylinder = QPainterPath()\n cylinder.moveTo(-50, 0)\n cylinder.lineTo(-50, -63/2.)\n cylinder.lineTo(50, -63/2.)\n cylinder.lineTo(50, 63/2.)\n cylinder.lineTo(-50, 63/2.)\n cylinder.closeSubpath()\n painter.drawPath(cylinder)\n \n painter.restore()\n ","sub_path":"vizu/gui/TableDrawing.py","file_name":"TableDrawing.py","file_ext":"py","file_size_in_byte":36142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"649428835","text":"# 21-mergeTwoLists.py\n\n# Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.\n\n# Example:\n\n# Input: 1->2->4, 1->3->4\n# Output: 1->1->2->3->4->4\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n\n # First define a function to help add nodes\n def append_node(head_node, node_to_add):\n \"\"\"Append a node to the end of a singly linked list given head_node\"\"\"\n if not head_node.next:\n head_node.next = node_to_add\n return\n last = head_node\n while last.next:\n last = last.next\n last.next = node_to_add\n return\n\n # Loop thorugh all nodes of l1 and l2\n # at each step compare which node is smaller, then add that\n # smaller node to the output list\n\n # first initialize the first node\n if l1 and l2:\n if l1.val <= l2.val:\n output_list = ListNode(l1.val)\n l1 = l1.next\n else:\n output_list = ListNode(l2.val)\n l2 = l2.next\n\n # If given only one list or none, return appropriately:\n elif l1:\n return l1\n elif l2:\n return l2\n else:\n return None\n\n while l1 and l2:\n if l1.val <= l2.val:\n append_node(\n head_node=output_list,\n node_to_add=ListNode(l1.val)\n )\n l1 = l1.next\n else:\n append_node(\n head_node=output_list,\n node_to_add=ListNode(l2.val)\n )\n l2 = l2.next\n\n # If one list has run out of items, add rest of other list to output\n if l1:\n append_node(\n head_node=output_list,\n node_to_add=l1\n )\n elif l2:\n append_node(\n head_node=output_list,\n node_to_add=l2\n )\n\n return output_list\n\n\nif __name__ == '__main__':\n\n solution = Solution()\n\n # Input: 1->2->4, 1->3->4\n # Output: 1->1->2->3->4->4\n l1 = ListNode(1)\n l1_a = ListNode(2)\n l1_b = ListNode(4)\n l1.next = l1_a\n l1_a.next = l1_b\n\n l2 = ListNode(1)\n l2_a = ListNode(3)\n l2_b = ListNode(4)\n l2.next = l2_a\n l2_a.next = l2_b\n\n print(\"l1: \")\n printval = l1\n while printval:\n print(printval.val)\n printval = printval.next\n\n print(\"l2: \")\n printval = l2\n while printval:\n print(printval.val)\n printval = printval.next\n\n output_list = solution.mergeTwoLists(l1, l2)\n\n print(\"SOLUTION: -----------------------------\")\n while output_list:\n print(output_list.val)\n output_list = output_list.next\n\n\n print(\"Solution with None inputs\")\n output_list = solution.mergeTwoLists(None, None)\n while output_list:\n print(output_list.val)\n output_list = output_list.next\n\n # Input: 2, 1\n # Output: 1->2\n l1 = ListNode(2)\n\n l2 = ListNode(1)\n\n print(\"l1: \")\n printval = l1\n while printval:\n print(printval.val)\n printval = printval.next\n\n print(\"l2: \")\n printval = l2\n while printval:\n print(printval.val)\n printval = printval.next\n\n output_list = solution.mergeTwoLists(l1, l2)\n\n print(\"SOLUTION: -----------------------------\")\n while output_list:\n print(output_list.val)\n output_list = output_list.next","sub_path":"leetcode/21-mergeTwoLists.py","file_name":"21-mergeTwoLists.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"95503096","text":"# Exercise 3.3\n# Апроксимация непрерывной функции, 2 слоя\nfrom __future__ import print_function\nimport sys\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nsys.path.append(\"../..\")\n\nimport lab0.src.dataset5 as dataset5\nfrom lib.custom import EarlyStoppingByLossVal, custom_fit\nimport lib.gui_reporter as gr\nfrom sklearn.utils import shuffle\n\nif __name__ == '__main__':\n # 1 parameters initializing---------------------------------------------------------\n np.random.seed(18)\n train_size = 48000\n batch_size = 480\n epochs = 300\n lr = 0.05\n goal_loss = 0.0001\n loss = 'mae'\n\n neurons_number = [40, 20, 10]\n\n opt_name = \"Adam\"\n\n optimizer = Adam(lr=lr, decay=0.0001)\n\n draw_step = 10\n verbose = 1\n\n # 2 model and data initializing---------------------------------------------------------\n (x_train, y_train), (x_test, y_test) = dataset5.load_data(train_size=train_size, show=False)\n\n x_train = np.transpose(np.append(x_train, np.ones(x_train.size)).reshape(2, x_train.size))\n x_test = np.transpose(np.append(x_test, np.ones(x_test.size)).reshape(2, x_test.size))\n\n model = Sequential()\n\n model.add(Dense(neurons_number[0], input_dim=2, activation='sigmoid'))\n model.add(Dense(neurons_number[1], activation='sigmoid'))\n model.add(Dense(neurons_number[2], activation='sigmoid'))\n\n model.add(Dense(1, activation='linear'))\n\n # 3 setting stopper---------------------------------------------------------\n callbacks = [EarlyStoppingByLossVal(monitor='val_loss', value=goal_loss, verbose=0)]\n\n model.compile(optimizer=optimizer, loss=loss)\n\n # 4 model fitting---------------------------------------------------------\n\n dir_name = None\n\n compare_title = 'approximation comparison\\nlr = %.3f\\n neurons = %.d %.d %.d' % \\\n (lr, neurons_number[0], neurons_number[1], neurons_number[2])\n\n history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs,\n verbose=verbose, callbacks=callbacks, validation_data=(x_test, y_test), )\n\n plt.plot(np.transpose(x_test)[0], y_test, '.')\n plt.plot(np.transpose(x_test)[0], model.predict(x_test), '.')\n plt.legend(('function', 'approximation'), loc='upper left', shadow=True)\n plt.title(\n compare_title + \"\\nval_loss = %.4f\\nepoch = %d\" % (history.history[\"val_loss\"][history.epoch.__len__() - 1],\n epochs))\n\n plt.show()\n plt.close()\n\n gr.plot_graphic(x=history.epoch, y=np.array(history.history[\"val_loss\"]), x_label='epochs', y_label='val_loss',\n title=\"val_loss\" + ' history', save=False, show=True)\n","sub_path":"lab2/src/ex3_3.py","file_name":"ex3_3.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"31445184","text":"class Node(object):\n def __init__(self, key):\n self.left = None\n self.right = None\n self.parent = None\n self.key = key\n\n def __str__(self):\n return str(self.key)\n\n\nclass BSTree(object):\n def __init__(self):\n self.root = None\n\n def insert(self, key):\n parent = None\n node = self.root\n while node is not None:\n parent = node\n if key < node.key:\n node = node.left\n else:\n node = node.right\n new_node = Node(key)\n new_node.parent = parent\n if parent is None:\n self.root = new_node\n elif new_node.key < parent.key:\n parent.left = new_node\n else:\n parent.right = new_node\n\n def in_order(self, node):\n if node is not None:\n self.in_order(node.left)\n print(node)\n self.in_order(node.right)\n\n def pre_order(self, node):\n if node is not None:\n print(node)\n self.in_order(node.left)\n self.in_order(node.right)\n\n def post_order(self, node):\n if node is not None:\n self.in_order(node.left)\n self.in_order(node.right)\n print(node)\n\n def recursive_search(self, node, key):\n if node is None or key == node.key:\n return node\n if key < node.key:\n return self.recursive_search(node.left, key)\n else:\n return self.recursive_search(node.right, key)\n\n def iterative_search(self, key):\n node = self.root\n while node is not None and key != node.key:\n if key < node.key:\n node = node.left\n else:\n node = node.right\n return node\n\n def minimum(self):\n node = self.root\n while node.left is not None:\n node = node.left\n return node\n\n def maximum(self):\n node = self.root\n while node.right is not None:\n node = node.right\n return node\n\n def _minimum(self, node):\n while node.left is not None:\n node = node.left\n return node\n\n def _maximum(self, node):\n while node.right is not None:\n node = node.right\n return node\n\n def successor(self, key):\n node = self.iterative_search(key)\n if node is None:\n return None\n if node.right is not None:\n return self._minimum(node.right)\n else:\n parent = node.parent\n while parent is not None and node is parent.right:\n node = parent\n parent = node.parent\n return parent\n\n def predecessor(self, key):\n node = self.iterative_search(key)\n if node is None:\n return None\n if node.left is not None:\n return self._maximum(node.left)\n else:\n parent = node.parent\n while parent is not None and node is parent.left:\n node = parent\n parent = node.parent\n return parent\n\n def _transplant(self, u, v):\n if u.parent is None:\n self.root = v\n elif u is u.parent.left:\n u.parent.left = v\n else:\n u.parent.right = v\n if v is not None:\n v.parent = u.parent\n\n def delete(self, key):\n node = self.iterative_search(key)\n if node is not None:\n if node.left is None:\n self._transplant(node, node.right)\n elif node.right is None:\n self._transplant(node, node.left)\n else:\n new_node = self._minimum(node.right)\n if new_node.parent is not node:\n self._transplant(new_node, new_node.right)\n new_node.right = node.right\n new_node.right.parent = new_node\n self._transplant(node, new_node)\n new_node.left = node.left\n new_node.left.parent = new_node\n\n\ndef main():\n tree = BSTree()\n tree.insert(15)\n tree.insert(4)\n tree.insert(3)\n tree.insert(5)\n tree.insert(10)\n tree.insert(1)\n tree.insert(17)\n tree.insert(16)\n tree.insert(28)\n tree.insert(20)\n\n tree.in_order(tree.root)\n print(tree.minimum())\n print(tree.maximum())\n print(tree.successor(5))\n print(tree.predecessor(5))\n\n tree.delete(17)\n tree.delete(1)\n tree.delete(5)\n tree.delete(3)\n tree.delete(28)\n tree.delete(20)\n tree.delete(4)\n tree.in_order(tree.root)\n\nif __name__ == '__main__':\n main()\n","sub_path":"binary-search-tree.py","file_name":"binary-search-tree.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"589913824","text":"\"\"\" Variant on Gauci et al's controller for object clustering. \"\"\"\n\nimport pyglet\nfrom controller import Controller\nfrom math import fabs, pi\nfrom random import random\nfrom common import Twist, WALL_MASK, ROBOT_MASK, M_TO_PIXELS, RED_PUCK_MASK, GREEN_PUCK_MASK, BLUE_PUCK_MASK\nfrom common.angles import normalize_angle_pm_pi\n\nclass MyController(Controller):\n\n def __init__(self, acceptable_puck_mask):\n self.acceptable_puck_mask = acceptable_puck_mask\n self.current_puck_type = None\n\n def draw_circle(self, robot, red, green, blue, thickness):\n x1 = robot.body.position.x - 2\n y1 = robot.body.position.y - 2\n x2 = robot.body.position.x + 2\n y2 = robot.body.position.y + 2\n pyglet.gl.glLineWidth(thickness)\n vertices = (x1, y1, x2, y1, x2, y2, x1, y2)\n colors = (red, green, blue, red, green, blue, \\\n red, green, blue, red, green, blue)\n pyglet.graphics.draw(4, pyglet.gl.GL_QUADS,\n ('v2f', vertices),\n ('c3B', colors))\n pyglet.gl.glLineWidth(1)\n\n def react(self, this_robot, sensor_suite, visualize=False):\n twist = Twist()\n\n scan = sensor_suite.range_scan\n\n # By default we will attend to the centre of the scan\n n = len(scan.ranges)\n centre_index = n/2\n\n # ...but we will shift attention based on the compass\n # John: Uncomment to play with compass-modulated shifting.\n \"\"\"\n compass_angle = normalize_angle_pm_pi(this_robot.body.angle)\n sign_dir = 1\n if self.current_puck_type != None and (self.current_puck_type & RED_PUCK_MASK) != 0:\n sign_dir = -1\n if compass_angle < 0:\n centre_index = sign_dir * 2\n else:\n centre_index = -sign_dir * 2\n \"\"\"\n\n puck_ahead = (scan.masks[centre_index] & self.acceptable_puck_mask) != 0\n puck_mask = scan.masks[centre_index]\n\n # Set current_puck_type if uninitialized.\n if self.current_puck_type == None and puck_ahead:\n self.current_puck_type = puck_mask\n\n # With a small random probability, change current_puck_type to match\n # the puck ahead.\n # John: Uncomment to play with sorting\n \"\"\"\n if puck_ahead and random() < 0.005:\n self.current_puck_type = puck_mask\n \"\"\"\n\n # Set the two predicates 'react_to_puck' and 'react_to_robot'. Only\n # one should be true.\n react_to_puck = puck_ahead and puck_mask == self.current_puck_type\n\n react_to_robot = scan.masks[centre_index] == ROBOT_MASK\n assert not (react_to_puck and react_to_robot)\n\n # Now react...\n if react_to_puck:\n # Turn right\n twist.linear = 4\n twist.angular = 2.0\n elif react_to_robot:\n # Turn left and slow\n twist.linear = 0.5 \n twist.angular = -2.0\n else:\n # Turn left\n twist.linear = 4\n twist.angular = -2.0\n\n if visualize:\n \"\"\"\n if self.current_puck_type == RED_PUCK_MASK:\n self.draw_circle(this_robot, 255, 0, 0, 1)\n elif self.current_puck_type == GREEN_PUCK_MASK:\n self.draw_circle(this_robot, 0, 255, 0, 1)\n elif self.current_puck_type == BLUE_PUCK_MASK:\n self.draw_circle(this_robot, 0, 0, 255, 1)\n else:\n self.draw_circle(this_robot, 0, 255, 255, 255)\n \"\"\"\n if react_to_puck:\n self.draw_circle(this_robot, 255, 0, 0, 1)\n elif react_to_robot:\n self.draw_circle(this_robot, 0, 0, 255, 1)\n else:\n self.draw_circle(this_robot, 0, 255, 0, 1)\n\n return twist\n","sub_path":"controllers/mycontroller.py","file_name":"mycontroller.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"113844144","text":"#!/usr/bin/env python\n#used libraries\nimport requests\nimport json\n#Vars\ni = 1\nnum = 1\n#let's get the id of the video\nvideoid = raw_input(\"VIDEO ID::\")\n#infinite loop\nwhile(True):\n#At the end of the comments the interpreter returns an error, then we will use the Try function to treat this error\n try:\n#Use the get function of the library to be able to take the data from the date of the video\n r = requests.get(\"https://gdata.youtube.com/feeds/api/videos/\" + str(videoid) + \"/comments?v=2&alt=json&max-results=10\"+\"&start-index=\"+str(i))\n r.text\n#Assign the variable DATE all text API\n data = json.loads(r.text)\n#Item receives the values assigned inside of ENTRY\n for item in data['feed']['entry']:\n#show comments\n print (\"comment:\" + (item['content']['$t']))\n#Assign a value to variable I to change the start-index page\n i = i+9\n#basic jerry-rig\n if (i == 10):\n i = 11\n elif(i == 21):\n i = 20\n#show the next page comments\n next = raw_input(\"//////////////////////////\\n//Next Page//\\n//////////////////////////\")\n#As I said, the interpreter will return an error at the end of the comments, we're going to take care of the error using EXCEPT\n except:\n print(\"END\")\n#stop loop\n break\n \n","sub_path":"genialcoders.py","file_name":"genialcoders.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"99355513","text":"#! /usr/bin/python\n#\n# Simple script to clone or update a set of common directories that are needed as dependencies of the GPUPerfAPI.\n# This script should be run after cloning the GPA repository.\n\nimport argparse\nimport ctypes\nimport os\nimport platform\nimport shutil\nimport stat\nimport string\nimport subprocess\nimport sys\nimport time\nimport urllib\n\nMACHINE_OS = \"\"\nif \"windows\" in platform.system().lower():\n MACHINE_OS = \"Windows\"\nelif \"linux\" in platform.system().lower():\n MACHINE_OS = \"Linux\"\nelse:\n print(\"Operating system not recognized correctly\")\n sys.exit(1)\n\n# GPUPerfAPI git project to folder map definitions\n# - GitHubMapping\nfrom UpdateCommonMap import *\nif MACHINE_OS == \"Linux\":\n from UpdateCommonMap import downloadAndInstallMappingLin as downloadAndInstallMapping\nelse:\n from UpdateCommonMap import downloadAndInstallMappingWin as downloadAndInstallMapping\n\n# specify whether or not to download the Vulkan SDK (default is to download it)\nparser = argparse.ArgumentParser(description='UpdateCommon args')\nparser.add_argument('--skipvulkansdk', action='store_true', default=False, help='Prevents script from trying to install the Vulkan SDK')\nargs = parser.parse_args()\n\n# to allow the script to be run from anywhere - not just the cwd - store the absolute path to the script file\nscriptRoot = os.path.dirname(os.path.realpath(__file__))\n\n# for each GitHub dependency - test if it has already been fetched - if not, then fetch it,\n# otherwise update it to top of tree\n\nfor key in GitHubMapping:\n # convert targetPath to OS specific format\n tmppath = os.path.join(scriptRoot, \"..\", GitHubMapping[key])\n # clean up path, collapsing any ../ and converting / to \\ for Windows\n targetPath = os.path.normpath(tmppath)\n if os.path.isdir(targetPath):\n print(\"\\nDirectory \" + targetPath + \" exists, using 'git pull' to get latest\")\n p = subprocess.Popen([\"git\",\"pull\"], cwd=targetPath)\n p.wait();\n else:\n print(\"\\nDirectory \" + targetPath + \" does not exist, using 'git clone' to get latest\")\n gitamdRoot = \"https://github.com/GPUOpen-Tools/\" + key\n commandArgs = [\"git\", \"clone\", gitamdRoot, targetPath]\n p = subprocess.Popen( commandArgs )\n p.wait()\n\n# Downloads and runs an installer for a Common Dir (just used for VulkanSDK currently)\n# key is the URL to download the installer\n# value[0] is the Common directory in which to \"install\" the files\n# value[1] is the source from which to copye files (only used on Windows, currently)\n# value[2] is the destination in which to copy files (only used on Windows, currently)\ndef download_and_run(key, value):\n # convert targetPath to OS specific format\n tmppath = os.path.join(scriptRoot, \"..\", value[0])\n # clean up path, collapsing any ../ and converting / to \\ for Windows\n targetPath = os.path.normpath(tmppath)\n if False == os.path.isdir(targetPath):\n os.makedirs(targetPath)\n sdkFileName = key.split('/')[-1].split('#')[0].split('?')[0]\n sdkInstallerPath = os.path.join(targetPath, sdkFileName)\n if MACHINE_OS == \"Linux\":\n if False == os.path.isfile(sdkInstallerPath):\n print(\"\\nDownloading \" + key + \" into \" + sdkInstallerPath)\n urllib.urlretrieve(key, sdkInstallerPath)\n sdkFileName = \"./\" + sdkFileName\n print(\"\\nExecuting \" + sdkFileName + \" with cwd=\" + targetPath)\n os.chdir(targetPath)\n st = os.stat(sdkFileName)\n os.chmod(sdkFileName, st.st_mode | stat.S_IEXEC)\n p = subprocess.Popen([sdkFileName], cwd=targetPath)\n p.wait()\n else:\n if False == os.path.isdir(value[1]):\n # did not find the SDK installed, so we need to download and install it\n if False == os.path.isfile(sdkInstallerPath):\n print(\"\\nDownloading \" + key + \" into \" + sdkInstallerPath)\n urllib.urlretrieve(key, sdkInstallerPath)\n print(\"\\nExecuting \" + sdkInstallerPath)\n p = subprocess.Popen([sdkInstallerPath, \"/S\"], shell=True)\n p.wait()\n dstDir = os.path.join(scriptRoot, \"..\", value[2])\n if False == os.path.isdir(dstDir):\n print(\"\\nCopying \" + value[1] + \" to \" + dstDir)\n shutil.copytree(value[1], dstDir)\n\nif False == args.skipvulkansdk:\n for key in downloadAndInstallMapping:\n download_and_run(key, downloadAndInstallMapping[key])\n","sub_path":"Scripts/UpdateCommon.py","file_name":"UpdateCommon.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"144173822","text":"#!/usr/bin/env python\n\nimport os\nimport sys\n\nfrom parse_search_box import ParseSearchBox\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../../../metadata/utils'))\nfrom files_and_paths import Dirs\n\n\nclass UcscSearch:\n def __init__(self, epigenomes, db, dbsnps, genes, host, args, params, uid):\n self.epigenomes = epigenomes\n self.db = db\n self.dbsnps = dbsnps\n self.genes = genes\n self.host = host\n self.args = args\n self.params = params\n self.uid = uid\n self.coord = None\n\n def Coord(self):\n if self.coord:\n return str(self.coord)\n return \"None\"\n\n def parse(self, siteInfo):\n try:\n self.psb = ParseSearchBox(self.epigenomes, self.dbsnps, self.genes, self.params)\n self.coord = self.psb.search()\n self.hubNum = self.db.insertOrUpdate(siteInfo.assayType,\n self.psb.assembly,\n self.psb.assays,\n self.psb.tissue_ids,\n self.psb.loci,\n self.uid)\n except:\n raise\n if self.args.debug:\n raise\n pass\n\n def ucscParams(self):\n if self.coord:\n ucscParams = [\"db=\" + self.psb.assembly,\n \"position=\" + str(self.coord)]\n else:\n # snp or gene\n if self.psb.assembly in [\"hg19\", \"hg38\"]:\n org = \"Human\"\n elif self.psb.assembly in [\"mm10\"]:\n org = \"Mouse\"\n else:\n raise Exception(\"unknown assembly\")\n ucscParams = [\"clade=mammal\",\n \"org=\" + org,\n \"db=\" + self.psb.assembly,\n \"position=\" + self.psb.loci,\n \"hgt.positionInput=\" + self.psb.loci,\n \"hgt.suggestTrack=knownGene\",\n \"Submit=submit\"]\n if 0:\n customUrl = os.path.join(self.host,\n \"trackhub/trackhub\",\n \"trackhubCustom\",\n self.uid,\n str(self.hubNum))\n ucscParams.append(\"hgt.customText=\" + customUrl)\n if 0:\n ucscParams = [\"udcTimeout=1\"] + ucscParams\n return ucscParams\n\n def configureUcscHubLink(self):\n ucscParams = self.ucscParams()\n\n urlBase = \"https://genome.ucsc.edu/cgi-bin/hgTracks?\"\n\n self.trackhubUrl = os.path.join(self.host,\n \"trackhub/trackhub\",\n self.uid,\n \"hub_{hubNum}.txt\".format(hubNum=self.hubNum))\n ucscParams.append(\"hubClear=\" + self.trackhubUrl)\n\n self.trackdbUrl = os.path.join(self.host,\n \"trackhub/trackhub\",\n self.uid,\n self.psb.assembly,\n \"trackDb_{hubNum}.txt\".format(hubNum=self.hubNum))\n\n url = urlBase + \"&\".join(ucscParams)\n return url\n\n def configureWashuHubLink(self):\n self.trackdbUrl = os.path.join(self.host,\n \"trackhub/trackhub_washu\",\n self.uid,\n self.psb.assembly,\n \"trackDb_{hn}.json\".format(hn=self.hubNum))\n\n urlBase = \"http://epigenomegateway.wustl.edu/browser/\"\n assembly = \"?genome=\" + self.psb.assembly\n trackhub = \"&datahub=\" + self.trackdbUrl\n coord = \"&coordinate=\" + str(self.coord)\n\n url = urlBase + assembly + trackhub + coord\n return url\n","sub_path":"website/common/ucsc_search.py","file_name":"ucsc_search.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"562222251","text":"class Solution:\n def minimumTotal(self, triangle):\n\n if not triangle or not triangle[0]: return 0\n\n dp = [[0 for i in range(len(row))] for row in triangle]\n dp[0][0] = triangle[0][0]\n\n for i in range(len(triangle)):\n for j in range(len(triangle[i])):\n if j == 0:\n dp[i][j] = dp[i-1][0] + triangle[i][j]\n elif j == len(triangle[i]) - 1:\n dp[i][j] = dp[i-1][-1] + triangle[i][j]\n else:\n dp[i][j] = min(dp[i-1][j-1], dp[i-1][j]) + triangle[i][j]\n \n return min(dp[-1])","sub_path":"120_Triangle.py","file_name":"120_Triangle.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"74688637","text":"import pytest\n\nfrom semver import (\n bump_build,\n bump_major,\n bump_minor,\n bump_patch,\n bump_prerelease,\n compare,\n deprecated,\n finalize_version,\n format_version,\n match,\n max_ver,\n min_ver,\n parse,\n parse_version_info,\n replace,\n)\n\n\n@pytest.mark.parametrize(\n \"func, args, kwargs\",\n [\n (bump_build, (\"1.2.3\",), {}),\n (bump_major, (\"1.2.3\",), {}),\n (bump_minor, (\"1.2.3\",), {}),\n (bump_patch, (\"1.2.3\",), {}),\n (bump_prerelease, (\"1.2.3\",), {}),\n (compare, (\"1.2.1\", \"1.2.2\"), {}),\n (format_version, (3, 4, 5), {}),\n (finalize_version, (\"1.2.3-rc.5\",), {}),\n (match, (\"1.0.0\", \">=1.0.0\"), {}),\n (parse, (\"1.2.3\",), {}),\n (parse_version_info, (\"1.2.3\",), {}),\n (replace, (\"1.2.3\",), dict(major=2, patch=10)),\n (max_ver, (\"1.2.3\", \"1.2.4\"), {}),\n (min_ver, (\"1.2.3\", \"1.2.4\"), {}),\n ],\n)\ndef test_should_raise_deprecation_warnings(func, args, kwargs):\n with pytest.warns(\n DeprecationWarning, match=r\"Function 'semver.[_a-zA-Z]+' is deprecated.\"\n ) as record:\n func(*args, **kwargs)\n if not record:\n pytest.fail(\"Expected a DeprecationWarning for {}\".format(func.__name__))\n assert len(record), \"Expected one DeprecationWarning record\"\n\n\ndef test_deprecated_deco_without_argument():\n @deprecated\n def mock_func():\n return True\n\n with pytest.deprecated_call():\n assert mock_func()\n","sub_path":"tests/test_deprecated_functions.py","file_name":"test_deprecated_functions.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"386035127","text":"# Представлен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента.\n# Подсказка: элементы, удовлетворяющие условию, оформить в виде списка. Для формирования списка использовать генератор.\n# Пример исходного списка: [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55].\n# Результат: [12, 44, 4, 10, 78, 123].\n\nimport random # Using random library\n\noriginal_list = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55] # Creating original list\nprint(f\"Исходный список: {original_list}\")\n\nmy_list = [] # Generating random list\nfor el in original_list:\n my_list.append(el + random.randrange(1, 1000, 1)) # Using random for creativity\nprint(f\"Сгенерированный список: {my_list}\")\n\nresult_list = []\nfor i in range(0, len(my_list)-1): # Finding goal elements and creating result list\n if my_list[i+1] > my_list[i]:\n result_list.append(my_list[i+1])\nprint(f\"Результат: {result_list}\")\n","sub_path":"Task_4.2.py","file_name":"Task_4.2.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"621007120","text":"\"\"\"\nMethods for extracting Implied Vol and producing skew reports\n\n\"\"\"\nfrom collections import Counter\nimport copy\nfrom decimal import Decimal\nimport datetime as dt\nfrom dateutil.relativedelta import relativedelta\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\n# pylint: disable=invalid-name\n\nclass VolMethods():\n \"\"\"\n Methods for extracting Implied Vol and producing skew reports\n\n \"\"\"\n @classmethod\n def smooth(cls, params, tables):\n \"\"\"\n Create a column of smoothed implied vols\n\n Parameters\n ----------\n order : Int\n Polynomial order used in numpy polyfit function. The\n default is 3.\n voltype : Str\n Whether to use 'bid', 'mid', 'ask' or 'last' price. The\n default is 'last'.\n smoothopt : Int\n Minimum number of options to fit curve to. The default\n is 6.\n\n Returns\n -------\n DataFrame\n DataFrame of Option prices.\n\n \"\"\"\n\n # Create a dictionary of the number of options for each\n # maturity\n mat_dict = dict(Counter(tables['imp_vol_data']['Days']))\n\n # Create a sorted list of the different number of days to\n # maturity\n maturities = sorted(list(set(tables['imp_vol_data']['Days'])))\n\n # Create a sorted list of the different number of strikes\n strikes_full = sorted(list(set((tables['imp_vol_data'][\n 'Strike'].astype(float)))))\n\n # create copy of implied vol data\n tables['imp_vol_data_smoothed'] = copy.deepcopy(tables['imp_vol_data'])\n\n for ttm, count in mat_dict.items():\n\n # if there are less than smoothopt (default is 6) options\n # for a given maturity\n if count < params['smoothopt']:\n\n # remove that maturity from the maturities list\n maturities.remove(ttm)\n\n # and remove that maturity from the implied vol\n # DataFrame\n tables['imp_vol_data_smoothed'] = tables[\n 'imp_vol_data_smoothed'][\n tables['imp_vol_data_smoothed']['Days'] != ttm]\n\n # Create empty DataFrame with the full range of strikes as\n # index\n tables['smooth_surf'] = pd.DataFrame(index=strikes_full)\n\n # going through the maturity list (in reverse so the columns\n # created are in increasing order)\n for maturity in reversed(maturities):\n\n # Extract the strikes for this maturity\n strikes = tables['imp_vol_data'][tables['imp_vol_data'][\n 'Days']==maturity]['Strike']\n\n # And the vols (specifying the voltype)\n vols = tables['imp_vol_data'][tables['imp_vol_data'][\n 'Days']==maturity][str(\n params['vols_dict'][str(params['voltype'])])]\n\n # Fit a polynomial to this data\n curve_fit = np.polyfit(strikes, vols, params['order'])\n p = np.poly1d(curve_fit)\n\n # Create empty list to store smoothed implied vols\n iv_new = []\n\n # For each strike\n for strike in strikes_full:\n\n # Add the smoothed value to the iv_new list\n iv_new.append(p(strike))\n\n # Append this list as a new column in the smooth_surf\n # DataFrame\n tables['smooth_surf'].insert(0, str(maturity), iv_new)\n\n # Apply the _vol_map function to add smoothed vol column to\n # DataFrame\n tables['imp_vol_data_smoothed'] = (\n tables['imp_vol_data_smoothed'].apply(\n lambda x: cls._vol_map(x, tables), axis=1))\n\n return params, tables\n\n\n @staticmethod\n def _vol_map(row, tables):\n \"\"\"\n Map value calculated in smooth surface DataFrame to\n 'Smoothed Vol' column.\n\n Parameters\n ----------\n row : Array\n Each row in the DataFrame.\n\n Returns\n -------\n row : Array\n Each row in the DataFrame.\n\n \"\"\"\n row['Smoothed Vol'] = (\n tables['smooth_surf'].loc[row['Strike'], str(row['Days'])])\n\n return row\n\n\n @classmethod\n def map_vols(cls, params, tables):\n \"\"\"\n Create vol surface mapping function\n\n Parameters\n ----------\n tables : Dict\n Dictionary containing the market data tables.\n\n Returns\n -------\n vol_surface : scipy.interpolate.rbf.Rbf\n Vol surface interpolation function.\n\n \"\"\"\n params, tables = cls.smooth(params=params, tables=tables)\n data = tables['imp_vol_data_smoothed']\n t_vols_smooth = data['Smoothed Vol'] * 100\n t_vols = data['Imp Vol - Last'] * 100\n t_strikes = data['Strike']\n t_ttm = data['TTM'] * 365\n vol_surface = sp.interpolate.Rbf(\n t_strikes,\n t_ttm,\n t_vols,\n function=params['rbffunc'],\n smooth=5,\n epsilon=5)\n\n vol_surface_smoothed = sp.interpolate.Rbf(\n t_strikes,\n t_ttm,\n t_vols_smooth,\n function=params['rbffunc'],\n smooth=5,\n epsilon=5)\n\n return vol_surface, vol_surface_smoothed\n\n\n @staticmethod\n def get_vol(maturity, strike, params, surface_models):\n \"\"\"\n Return implied vol for a given maturity and strike\n\n Parameters\n ----------\n maturity : Str\n The date for the option maturity, expressed as 'YYYY-MM-DD'.\n strike : Int\n The strike expressed as a percent, where ATM = 100.\n\n Returns\n -------\n imp_vol : Float\n The implied volatility.\n\n \"\"\"\n strike_level = params['spot'] * strike / 100\n maturity_date = dt.datetime.strptime(maturity, '%Y-%m-%d')\n start_date = dt.datetime.strptime(params['start_date'], '%Y-%m-%d')\n ttm = (maturity_date - start_date).days\n if params['smoothing']:\n imp_vol = surface_models[\n 'vol_surface_smoothed'](strike_level, ttm)\n else:\n imp_vol = surface_models['vol_surface'](strike_level, ttm)\n\n return np.round(imp_vol, 2)\n\n\n @classmethod\n def create_vol_dict(cls, params, surface_models):\n \"\"\"\n Create dictionary of implied vols by tenor and strike to use in skew\n report\n\n Parameters\n ----------\n params : Dict\n Dictionary of key parameters.\n surface_models : Dict\n Dictionary of vol surfaces.\n\n Returns\n -------\n vol_dict : Dict\n Dictionary of implied vols.\n\n \"\"\"\n vol_dict = {}\n start_date = dt.datetime.strptime(params['start_date'], '%Y-%m-%d')\n for month in range(1, params['skew_months']+1):\n for strike in [80, 90, 100, 110, 120]:\n maturity = dt.datetime.strftime(\n start_date + relativedelta(months=month), '%Y-%m-%d')\n vol_dict[(month, strike)] = cls.get_vol(\n maturity=maturity, strike=strike, params=params,\n surface_models=surface_models)\n\n return vol_dict\n\n\n @classmethod\n def print_skew_report(cls, vol_dict, params):\n \"\"\"\n Print a report showing implied vols for 80%, 90% and ATM strikes and\n selected tenor length\n\n Parameters\n ----------\n vol_dict : Dict\n Dictionary of implied vols.\n params : Dict\n Dictionary of key parameters.\n\n Returns\n -------\n Prints the report to the console.\n\n \"\"\"\n # Set decimal format\n dp2 = Decimal(10) ** -2 # (equivalent to Decimal '0.01')\n\n if params['skew_direction'] == 'full':\n cls._full_skew(vol_dict=vol_dict, params=params, dp2=dp2)\n else:\n cls._header(params=params)\n\n if params['skew_direction'] == 'up':\n cls._upside_skew(vol_dict=vol_dict, params=params, dp2=dp2)\n\n else:\n cls._downside_skew(vol_dict=vol_dict, params=params, dp2=dp2)\n\n\n @staticmethod\n def _header(params):\n\n print('='*78)\n print(': {:^74} :'.format('Skew Summary'))\n print('-'*78)\n\n # Contract traded on left and period covered on right\n print(': Underlying Ticker : {:<19}{} : {} :'.format(\n params['ticker_label'],\n 'Close of Business Date',\n params['start_date']))\n print('-'*78)\n\n # Strike and skew headers\n print(': {:^12} :{:^34} : {:^23} :'.format(\n 'Maturity',\n 'Strike',\n 'Skew'))\n print('-'*78)\n\n if params['skew_direction'] == 'up':\n\n print(': {:>15}{:>7} : {:>7} : {:>7} : {:>10}'\\\n ' : {:>10} :'.format(\n ': ',\n 'ATM',\n '110%',\n '120%',\n '+10% Skew',\n '+20% Skew'))\n\n if params['skew_direction'] == 'down':\n print(': {:>15}{:>7} : {:>7} : {:>7} : {:>10}'\\\n ' : {:>10} :'.format(\n ': ',\n '80%',\n '90%',\n 'ATM',\n '-10% Skew',\n '-20% Skew'))\n\n\n @staticmethod\n def _downside_skew(vol_dict, params, dp2):\n\n # Monthly skew summary for selected number of months\n for month in range(1, params['skew_months'] + 1):\n if month < 10:\n month_label = ' '+str(month)\n else:\n month_label = str(month)\n print(': {} Month Vol : {:>7} : {:>7} : {:>7} : {:>7}'\\\n ' : {:>7} :'.format(\n month_label,\n Decimal(vol_dict[(month, 80)]).quantize(dp2),\n Decimal(vol_dict[(month, 90)]).quantize(dp2),\n Decimal(vol_dict[(month, 100)]).quantize(dp2),\n Decimal((vol_dict[(month, 90)]\n - vol_dict[(month, 100)]) / 10).quantize(dp2),\n Decimal((vol_dict[(month, 80)]\n - vol_dict[(month, 100)]) / 20).quantize(dp2)))\n\n print('-'*78)\n print('='*78)\n\n\n @staticmethod\n def _upside_skew(vol_dict, params, dp2):\n\n # Monthly skew summary for selected number of months\n for month in range(1, params['skew_months'] + 1):\n if month < 10:\n month_label = ' '+str(month)\n else:\n month_label = str(month)\n print(': {} Month Vol : {:>7} : {:>7} : {:>7} : {:>7}'\\\n ' : {:>7} :'.format(\n month_label,\n Decimal(vol_dict[(month, 100)]).quantize(dp2),\n Decimal(vol_dict[(month, 110)]).quantize(dp2),\n Decimal(vol_dict[(month, 120)]).quantize(dp2),\n Decimal((vol_dict[(month, 110)]\n - vol_dict[(month, 100)]) / 10).quantize(dp2),\n Decimal((vol_dict[(month, 120)]\n - vol_dict[(month, 100)]) / 20).quantize(dp2)))\n\n print('-'*78)\n print('='*78)\n\n\n @staticmethod\n def _full_skew(vol_dict, params, dp2):\n\n print('='*115)\n print(': {:^111} :'.format('Skew Summary'))\n print('-'*115)\n\n # Contract traded on left and period covered on right\n print(': Underlying Ticker : {:<56}{} : {} :'.format(\n params['ticker_label'],\n 'Close of Business Date',\n params['start_date']))\n print('-'*115)\n\n # Strike and skew headers\n print(': {:^13} : {:^47} : {:^45} :'.format(\n 'Maturity',\n 'Strike',\n 'Skew'))\n print('-'*115)\n\n # Header rows\n print(': {:>16}{:>6} : {:>6} : {:>6} : {:>6} : {:>6} : {:>9}'\\\n ' : {:>9} : {:>9} : {:>9} :'.format(\n ': ',\n '80%',\n '90%',\n 'ATM',\n '110%',\n '120%',\n '-20% Skew',\n '-10% Skew',\n '+10% Skew',\n '+20% Skew'))\n\n # Set decimal format\n dp2 = Decimal(10) ** -2 # (equivalent to Decimal '0.01')\n\n # Monthly skew summary for selected number of months\n for month in range(1, params['skew_months'] + 1):\n if month < 10:\n month_label = ' '+str(month)\n else:\n month_label = str(month)\n print(': {} Month Vol : {:>6} : {:>6} : {:>6} : {:>6} : '\\\n '{:>6} : {:>7} : {:>7} : {:>7} : {:>7} :'.format(\n month_label,\n Decimal(vol_dict[(month, 80)]).quantize(dp2),\n Decimal(vol_dict[(month, 90)]).quantize(dp2),\n Decimal(vol_dict[(month, 100)]).quantize(dp2),\n Decimal(vol_dict[(month, 110)]).quantize(dp2),\n Decimal(vol_dict[(month, 120)]).quantize(dp2),\n Decimal((vol_dict[(month, 80)]\n - vol_dict[(month, 100)]) / 20).quantize(dp2),\n Decimal((vol_dict[(month, 90)]\n - vol_dict[(month, 100)]) / 10).quantize(dp2),\n Decimal((vol_dict[(month, 110)]\n - vol_dict[(month, 100)]) / 10).quantize(dp2),\n Decimal((vol_dict[(month, 120)]\n - vol_dict[(month, 100)]) / 20).quantize(dp2)))\n\n print('-'*115)\n print('='*115)\n","sub_path":"volvisualizer/vol_methods.py","file_name":"vol_methods.py","file_ext":"py","file_size_in_byte":13579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"144611734","text":"from keras.layers import Input, UpSampling2D, Conv2D, BatchNormalization, Lambda, MaxPooling2D, LeakyReLU, ZeroPadding2D\nfrom keras.layers.merge import concatenate\nfrom keras.models import Model\nfrom keras.utils import plot_model\nimport tensorflow as tf\nfrom core.yolo_layer import YoloLayer\nfrom core.activation import Mish, Mish6\nfrom core.backbone import yolov4_tiny_backbone, _darknet_conv_block\n\n\nclass YOLOV4_tiny(object):\n \"\"\"Implement keras YOLOV4_tiny here\"\"\"\n\n def __init__(self, config, max_box_per_image, batch_size, warmup_batches):\n\n self.classes = config[\"model\"][\"labels\"]\n self.num_class = len(self.classes)\n self.anchors = config[\"model\"][\"anchors\"]\n self.grid_scales = config[\"train\"][\"grid_scales\"]\n self.obj_scale = config[\"train\"][\"obj_scale\"]\n self.noobj_scale = config[\"train\"][\"noobj_scale\"]\n self.xywh_scale = config[\"train\"][\"xywh_scale\"]\n self.class_scale = config[\"train\"][\"class_scale\"]\n self.iou_loss_thresh = config[\"train\"][\"iou_loss_thresh\"]\n self.iou_loss = config[\"train\"][\"iou_loss\"]\n self.max_grid = [config['model']['max_input_size'], config['model']['max_input_size']]\n self.batch_size = batch_size\n self.warmup_batches = warmup_batches\n self.max_box_per_image = max_box_per_image\n self.focal_loss = config[\"train\"][\"focal_loss\"]\n self.backbone = config[\"model\"][\"backbone_model\"]\n\n def model(self):\n input_image = Input(shape=(None, None, 3)) # net_h, net_w, 3\n true_boxes = Input(shape=(1, 1, 1, self.max_box_per_image, 4)) # xywh\n true_yolo_1 = Input(\n shape=(None, None, len(self.anchors) // 4, 4 + 1 + self.num_class)) # grid_h, grid_w, nb_anchor, 4+1+nb_class\n true_yolo_2 = Input(\n shape=(None, None, len(self.anchors) // 4, 4 + 1 + self.num_class)) # grid_h, grid_w, nb_anchor, 4+1+nb_class\n\n if self.backbone == \"YOLOV4_tiny_backbone\":\n print(\"[INFO] Backbone: YOLOV4_tiny_backbone \")\n route, input_data = yolov4_tiny_backbone(input_image)\n else:\n raise ValueError(\"Assign correct backbone model: YOLOV4_tiny_backbone\")\n\n x = _darknet_conv_block(input_data, convs=[\n {'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'activation': 'leaky', 'layer_idx': \"yolov4_tiny_1\"},\n {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'activation': 'leaky', 'layer_idx': \"yolov4_tiny_2\"}])\n pred_conv_lbbox = _darknet_conv_block(x, convs=[\n {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'activation': 'leaky', 'layer_idx': \"yolov4_tiny_3\"},\n {'filter': (3 * (5 + self.num_class)), 'kernel': 1, 'stride': 1, 'bnorm': False, 'activation': 'linear',\n 'layer_idx': \"yolov4_tiny_4\"}])\n\n loss_yolo_1 = YoloLayer(self.anchors[6:],\n [1 * num for num in self.max_grid],\n self.batch_size,\n self.warmup_batches,\n self.iou_loss_thresh,\n self.grid_scales[0],\n self.obj_scale,\n self.noobj_scale,\n self.xywh_scale,\n self.class_scale,\n self.iou_loss,\n self.focal_loss)([input_image, pred_conv_lbbox, true_yolo_1, true_boxes])\n x = _darknet_conv_block(x, convs=[\n {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'activation': 'leaky', 'layer_idx': \"yolov4_tiny_5\"}])\n x = UpSampling2D(2)(x)\n x = concatenate([x, route])\n\n pred_conv_sbbox = _darknet_conv_block(x, convs=[\n {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'activation': 'leaky', 'layer_idx': \"yolov4_tiny_6\"},\n {'filter': (3 * (5 + self.num_class)), 'kernel': 1, 'stride': 1, 'bnorm': False, 'activation': 'linear',\n 'layer_idx': \"yolo_5\"}])\n loss_yolo_2 = YoloLayer(self.anchors[:6],\n [1 * num for num in self.max_grid],\n self.batch_size,\n self.warmup_batches,\n self.iou_loss_thresh,\n self.grid_scales[1],\n self.obj_scale,\n self.noobj_scale,\n self.xywh_scale,\n self.class_scale,\n self.iou_loss,\n self.focal_loss)([input_image, pred_conv_sbbox, true_yolo_2, true_boxes])\n\n train_model = Model([input_image, true_boxes, true_yolo_1, true_yolo_2], [loss_yolo_1, loss_yolo_2])\n infer_model = Model(input_image, [pred_conv_lbbox, pred_conv_sbbox])\n train_model.summary()\n return [train_model, infer_model]\n\n\ndef dummy_loss(y_true, y_pred):\n return tf.sqrt(tf.reduce_sum(y_pred))\n\n\n","sub_path":"core/yolov4_tiny.py","file_name":"yolov4_tiny.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"242568911","text":"import csv\nimport sys\nimport sqlite3\n\ndb = sqlite3.connect(\"tweet.db\")\ndb.text_factory = str\ncur = db.cursor()\n\n\ncur.execute(\"\"\" create table usertimeline (\n id integer,\n name text,\n tweet text)\"\"\")\n\n\nfp = open(\"usertimeline.csv\")\n\ntimeline = csv.reader(fp)\n\ni = 0\nfor row in timeline:\n i += 1\n t = (i, row[1],row[0])\n cur.execute(\"insert into usertwimeline values(?,?,?)\",t)\n\ndb.commit()\ndb.close()\n","sub_path":"twitter-python/importdb/importCSVToDB_userTweet.py","file_name":"importCSVToDB_userTweet.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"415596477","text":"#Copyright ReportLab Europe Ltd. 2000-2017\n#see license.txt for license details\n#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/pdfgen/pathobject.py\n__version__='3.3.0'\n__doc__=\"\"\"\nPDFPathObject is an efficient way to draw paths on a Canvas. Do not\ninstantiate directly, obtain one from the Canvas instead.\n\nProgress Reports:\n8.83, 2000-01-13, gmcm: created from pdfgen.py\n\n\"\"\"\n\nfrom reportlab.pdfgen import pdfgeom\nfrom reportlab.lib.rl_accel import fp_str\n\n\nclass PDFPathObject:\n \"\"\"Represents a graphic path. There are certain 'modes' to PDF\n drawing, and making a separate object to expose Path operations\n ensures they are completed with no run-time overhead. Ask\n the Canvas for a PDFPath with getNewPathObject(); moveto/lineto/\n curveto wherever you want; add whole shapes; and then add it back\n into the canvas with one of the relevant operators.\n\n Path objects are probably not long, so we pack onto one line\n\n the code argument allows a canvas to get the operations appended directly so\n avoiding the final getCode\n \"\"\"\n def __init__(self,code=None):\n self._code = (code,[])[code is None]\n self._code_append = self._init_code_append\n\n def _init_code_append(self,c):\n assert c.endswith(' m') or c.endswith(' re'), 'path must start with a moveto or rect'\n code_append = self._code.append\n code_append('n')\n code_append(c)\n self._code_append = code_append\n\n def getCode(self):\n \"pack onto one line; used internally\"\n return ' '.join(self._code)\n\n def moveTo(self, x, y):\n self._code_append('%s m' % fp_str(x,y))\n\n def lineTo(self, x, y):\n self._code_append('%s l' % fp_str(x,y))\n\n def curveTo(self, x1, y1, x2, y2, x3, y3):\n self._code_append('%s c' % fp_str(x1, y1, x2, y2, x3, y3))\n\n def arc(self, x1,y1, x2,y2, startAng=0, extent=90):\n \"\"\"Contributed to piddlePDF by Robert Kern, 28/7/99.\n Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,\n starting at startAng degrees and covering extent degrees. Angles\n start with 0 to the right (+x) and increase counter-clockwise.\n These should have x1.\"\"\"\n\n self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent))\n\n def arcTo(self, x1,y1, x2,y2, startAng=0, extent=90):\n \"\"\"Like arc, but draws a line from the current point to\n the start if the start is not the current point.\"\"\"\n self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent),'lineTo')\n\n def rect(self, x, y, width, height):\n \"\"\"Adds a rectangle to the path\"\"\"\n self._code_append('%s re' % fp_str((x, y, width, height)))\n\n def ellipse(self, x, y, width, height):\n \"\"\"adds an ellipse to the path\"\"\"\n self._curves(pdfgeom.bezierArc(x, y, x + width,y + height, 0, 360))\n\n def _curves(self,curves,initial='moveTo'):\n getattr(self,initial)(*curves[0][:2])\n for curve in curves:\n self.curveTo(*curve[2:])\n\n def circle(self, x_cen, y_cen, r):\n \"\"\"adds a circle to the path\"\"\"\n x1 = x_cen - r\n y1 = y_cen - r\n width = height = 2*r\n self.ellipse(x1, y1, width, height)\n\n def roundRect(self, x, y, width, height, radius):\n \"\"\"Draws a rectangle with rounded corners. The corners are\n approximately quadrants of a circle, with the given radius.\"\"\"\n #use a precomputed set of factors for the bezier approximation\n #to a circle. There are six relevant points on the x axis and y axis.\n #sketch them and it should all make sense!\n m = 0.4472 #radius multiplier\n xhi = x,x+width\n xlo, xhi = min(xhi), max(xhi)\n yhi = y,y+height\n ylo, yhi = min(yhi), max(yhi)\n if isinstance(radius,(list,tuple)):\n r = [max(0,r) for r in radius]\n if len(r)<4: r += (4-len(r))*[0]\n self.moveTo(xlo + r[2], ylo) #start at bottom left\n self.lineTo(xhi - r[3], ylo) #bottom row\n if r[3]>0:\n t = m*r[3]\n self.curveTo(xhi - t, ylo, xhi, ylo + t, xhi, ylo + r[3]) #bottom right\n self.lineTo(xhi, yhi - r[1]) #right edge\n if r[1]>0:\n t = m*r[1]\n self.curveTo(xhi, yhi - t, xhi - t, yhi, xhi - r[1], yhi) #top right\n self.lineTo(xlo + r[0], yhi) #top row\n if r[0]>0:\n t = m*r[0]\n self.curveTo(xlo + t, yhi, xlo, yhi - t, xlo, yhi - r[0]) #top left\n self.lineTo(xlo, ylo + r[2]) #left edge\n if r[2]>0:\n t = m*r[2]\n self.curveTo(xlo, ylo + t, xlo + t, ylo, xlo + r[2], ylo) #bottom left\n # 4 radii top left top right bittom left bottom right\n else:\n t = m * radius\n self.moveTo(xlo + radius, ylo)\n self.lineTo(xhi - radius, ylo) #bottom row\n self.curveTo(xhi - t, ylo, xhi, ylo + t, xhi, ylo + radius) #bottom right\n self.lineTo(xhi, yhi - radius) #right edge\n self.curveTo(xhi, yhi - t, xhi - t, yhi, xhi - radius, yhi) #top right\n self.lineTo(xlo + radius, yhi) #top row\n self.curveTo(xlo + t, yhi, xlo, yhi - t, xlo, yhi - radius) #top left\n self.lineTo(xlo, ylo + radius) #left edge\n self.curveTo(xlo, ylo + t, xlo + t, ylo, xlo + radius, ylo) #bottom left\n self.close()\n\n def close(self):\n \"draws a line back to where it started\"\n self._code_append('h')\n","sub_path":"src/reportlab/pdfgen/pathobject.py","file_name":"pathobject.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"539552786","text":"#!/usr/bin/python\n\nimport numpy as np\n\ndef dispersion(T, h): \n L0 = 9.81*T**2/(2.*np.pi)\n L = L0\n for i in range(0,100):\n Lnew = L0 * np.tanh(2.*np.pi/L*h)\n if(abs(Lnew-L)<0.001):\n L = Lnew\n break\n L = Lnew\n return L\n\n## Piston wavemaker data ##\nH = 0.1\nT = 3.0\nh = 0.4\nphase0 = 0.\ndirection = 0.\n\nnPaddles = 1\nbLims = [0., 0.]\n\nt0 = 0.\ntEnd = 31.\ndt = 0.05\n########################\n\n# Calculations\nL = dispersion(T, h)\nk = 2.*np.pi/L\nw = 2.*np.pi/T\n\ntimes = np.linspace(t0, tEnd, round((tEnd-t0)/dt)+1)\ncoords = np.linspace(bLims[0], bLims[1], nPaddles+1)\ncoords = coords[:-1] + np.diff(coords)/2.\n\nHoS = 4. * np.sinh(k*h)**2. / (np.sinh(2.*k*h) + 2.*k*h)\nS = H/HoS\n\n# Export\nfid = open('wavemakerMovement.txt', 'w')\n\nfid.write('wavemakerType Piston;\\n')\nfid.write('tSmooth 1.5;\\n')\nfid.write('genAbs 0;\\n\\n')\n\nfid.write('timeSeries {0}(\\n'.format( len(times) ))\nfor t in times:\n fid.write('{0}\\n'.format(t))\nfid.write(');\\n\\n'.format( len(times) ))\n\nfid.write('paddlePosition {0}(\\n'.format( nPaddles ))\nfor i in range(0, nPaddles):\n fid.write('{0}(\\n'.format( len(times) ))\n for t in times:\n x = S/2. * np.cos(-w*t + np.pi/2. + phase0 + 2.*np.pi*coords[i]/L*np.sin(direction*np.pi/180.) )\n fid.write('{0}\\n'.format(x)) \n fid.write(')\\n')\nfid.write(');\\n\\n')\n\nfid.write('paddleEta {0}(\\n'.format( nPaddles ))\nfor i in range(0, nPaddles):\n fid.write('{0}(\\n'.format( len(times) ))\n for t in times:\n x = H/2. * np.cos(-w*t + phase0 + 2.*np.pi*coords[i]/L*np.sin(direction*np.pi/180.) )\n fid.write('{0}\\n'.format(x)) \n fid.write(')\\n')\nfid.write(');\\n\\n')\n\nfid.close()\n","sub_path":"tutorials/wavemakerFlume/constant/pistonWaveGen.py","file_name":"pistonWaveGen.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514531295","text":"# -*- coding: utf-8 -*-\nimport math\nfrom datetime import timedelta\nfrom datetime import datetime\nimport warnings\nimport numpy as np\nimport pandas as pd\nprint('开始分仓,请稍后')\nwarnings.filterwarnings(\"ignore\")\n# 需要修改的参数\npath_订单 = '3-23订单'\npath_库存 = '库存0323'\n订单 = pd.read_excel('D://work//曼秀雷敦//出库单发仓库//' + path_订单 + '.xls')\n库存_源数据 = pd.read_excel(\n 'D://work//曼秀雷敦//库存//' +\n path_库存 +\n '.xls',\n sheet_name='可用')\n标卡 = pd.read_excel(\n 'D://work//曼秀雷敦//最新厂价表2020.1.17副本.xlsx',\n sheet_name='SKU').fillna(0)\n# 要删除\n标卡.公司货号 = 标卡.公司货号.astype(str)\n库存_源数据 = 库存_源数据[(库存_源数据.货物状态 == 'NR') | (\n 库存_源数据.货物状态 == 'OD') | (库存_源数据.货物状态 == 'QA')]\n# 新增\n库存_源数据.商品代码 = 库存_源数据.商品代码.astype(str)\n# 修改源代码\n库存2 = pd.merge(left=库存_源数据,\n right=标卡.loc[:,\n ['公司货号',\n '商品编码',\n '条码',\n '保质期',\n '体积',\n '重量',\n '发货箱规']].drop_duplicates(),\n left_on='商品代码',\n right_on='公司货号',\n how='left').fillna('dummy')\n库存 = 库存2.pivot_table(\n index=[\n '商品编码',\n '条码',\n '商品代码',\n '商品名称',\n '库位',\n '生产批号',\n '到期日期',\n '发货箱规',\n '保质期',\n '体积',\n '重量',\n '货物状态'],\n values=['可用数量'],\n aggfunc='sum').reset_index().replace('dummy',np.nan)\n库存需要维护 = 库存[库存.商品编码 == 0]\n库存 = 库存[(库存.可用数量 != 0) & (库存.商品编码 != 0)]\n库存.loc[库存.发货箱规 == 0, '发货箱规'] = 1\n库存['散货数量'] = 库存.可用数量 - (库存.可用数量 / 库存.发货箱规).astype(int) * 库存.发货箱规\n库存['可用数量'] = 库存.可用数量 - 库存.散货数量\n库存拆分1 = 库存.drop('散货数量', axis=1)\n库存拆分2 = 库存.drop('可用数量', axis=1).rename(columns={'散货数量': '可用数量'})\n库存拆分1 = 库存拆分1[库存拆分1.可用数量 != 0]\n库存拆分1['箱柜属性'] = '整箱'\n库存拆分2 = 库存拆分2[库存拆分2.可用数量 != 0]\n库存拆分2['箱柜属性'] = '散装'\n库存拆分 = [库存拆分1, 库存拆分2]\n库存 = pd.concat(库存拆分)\n库存.loc[库存.货物状态 == 'QA', '箱柜属性'] = 'QA'\n订单_剔除 = 订单\n# 新增提示!提示订单中无标卡数据的商品编码\n标卡_提示 = 标卡.loc[:, ['商品编码']].drop_duplicates().astype('str')\n订单_提示 = 订单.loc[:, ['商品编码']].drop_duplicates().astype('str')\n库存.商品编码 = 库存.商品编码.astype(np.str)\nprint('订单采购SKU需要维护明细:')\nprint(订单_提示[订单_提示.商品编码.isin(标卡_提示.商品编码) == False])\nprint('订单采购SKU需要维护箱规:')\nprint(订单_提示[订单_提示.商品编码.isin(库存.loc[库存.发货箱规 == 1].商品编码)])\n# 新增去除采购数量为0的数据\n订单 = 订单.loc[订单.采购数量 != 0]\n订单_column = 订单.columns\n订单_column1 = list(订单_column)\n订单_column1.append('仓库实发数量')\n订单_column2 = list(订单_column)\n订单_column2.append('采购数量1')\n# 20190522 发现会引用之前的数据 顾删掉此3张表\n订单处理_最终1 = pd.DataFrame()\n订单处理_最终2 = pd.DataFrame()\n订单处理_整箱 = pd.DataFrame()\n# 修改有效期格式\n# 修改 删除\n库存.到期日期 = pd.to_datetime(库存.到期日期)\n# 修改 删除\n# 已经完成编码和条码的匹配以及聚合\n# 按照数量需要排序(升序)\n库存处理 = pd.DataFrame()\n订单.商品编码 = 订单.商品编码.astype(str)\nfor i in 库存.商品编码.unique():\n 库存1 = 库存[(库存.商品编码 == i) & (库存.箱柜属性 == 'QA')]\n # 需要修改\n 库存1 = 库存1.sort_values(['箱柜属性', '到期日期'], ascending=[True, True])\n 库存1['辅助列'] = np.arange(1, len(库存1) + 1)\n 库存处理 = 库存处理.append(库存1)\n库存处理.辅助列 = 库存处理.辅助列.astype(int).astype(str)\n库存处理.商品编码 = 库存处理.商品编码.astype(str)\n库存处理['辅助列1'] = 库存处理.商品编码 + 库存处理.辅助列\nx = 库存处理.辅助列.astype(int).max()\nif len(库存处理.辅助列) == 0:\n x = 0\ny = pd.DataFrame()\nfor i3 in np.arange(1, x + 1):\n 订单[i3] = i3\n 订单[i3] = 订单[i3].astype(str)\n 订单[i3] = 订单['商品编码'] + 订单[i3]\nz = 1\nwhile z <= x:\n 订单 = pd.merge(left=订单,\n right=库存处理.loc[:,\n ['辅助列1',\n '可用数量']],\n left_on=z,\n right_on='辅助列1',\n how='left').fillna(0)\n 订单 = 订单.drop('辅助列1', axis=1)\n 订单.可用数量 = 订单.可用数量.astype(int)\n 订单 = 订单.rename(columns={'可用数量': x + z})\n z = z + 1\n库存修改箱柜 = 库存处理.pivot_table(\n index='商品编码',\n values='发货箱规',\n aggfunc='max').reset_index()\n订单 = pd.merge(\n left=订单,\n right=库存修改箱柜,\n left_on='商品编码',\n right_on='商品编码',\n how='left')\n订单['采购数量1'] = 订单['采购数量']\n# 20190426\n订单.发货箱规 = 订单.发货箱规.fillna(1)\n订单x = 订单\n订单 = 订单.drop('发货箱规', axis=1)\nix = 1\n订单处理h = pd.DataFrame()\nwhile ix <= x and len(订单) > 0:\n 订单处理 = pd.DataFrame()\n for i2 in 订单.商品编码.unique():\n 订单1 = 订单[订单.商品编码 == i2].sort_values('采购数量1')\n 订单1['累加和'] = 订单1.采购数量1.cumsum()\n 订单处理 = 订单处理.append(订单1)\n 订单处理[x * 2 + ix] = 订单处理[x + ix] - 订单处理['累加和']\n 订单处理.loc[订单处理[x * 2 + ix] >= 0, x * 3 + 1] = 订单处理['采购数量1']\n 订单处理.loc[(订单处理[x * 2 + ix] < 0) & (订单处理.采购数量1 + 订单处理[x * 2 + ix]\n > 0), x * 3 + 1] = 订单处理.采购数量1 + 订单处理[x * 2 + ix]\n 订单处理.loc[(订单处理[x * 2 + ix] < 0) & (订单处理.采购数量1 +\n 订单处理[x * 2 + ix] <= 0), x * 3 + 1] = 0\n 订单处理1 = 订单处理[订单处理[x * 3 + 1] >= 0]\n 订单处理1.loc[订单处理1[x * 3 + 1] > 0, '调拨条码'] = 订单处理1[ix]\n 订单处理h = 订单处理h.append(订单处理1)\n 订单 = 订单处理[(订单处理[x * 3 + 1] == 0) | (订单处理[x * 3 + 1] - 订单处理.采购数量1 < 0)]\n 订单.采购数量1 = 订单.采购数量1 - 订单[x * 3 + 1]\n ix = ix + 1\n订单处理_整箱 = pd.merge(\n left=订单处理h,\n right=库存处理,\n left_on='调拨条码',\n right_on='辅助列1',\n how='left')\n订单处理_整箱 = 订单处理_整箱.reindex(\n columns=[\n '订单号',\n '分配机构',\n '仓库',\n '详细地址',\n '联系人',\n '联系方式',\n '商品编码_x',\n '条码',\n '商品名称_x',\n '采购数量',\n '采购金额',\n '库位',\n '生产批号',\n '商品代码',\n x * 3 + 1,\n '到期日期',\n '发货箱规',\n '箱数',\n '保质期',\n '体积',\n '重量'])\n订单处理_整箱 = 订单处理_整箱.rename(\n columns={\n '商品编码_x': '商品编码',\n '商品名称_x': '商品名称',\n x * 3 + 1: '仓库实发数量'})\n订单处理_整箱 = 订单处理_整箱.drop_duplicates()\n# 0426订单处理_整箱=订单处理_整箱.loc[订单处理_整箱.仓库实发数量!=0]\n订单处理_整箱数量 = 订单处理_整箱.pivot_table(\n index=[\n '订单号',\n '商品编码'],\n values='仓库实发数量',\n aggfunc='sum')\n订单_剔除.商品编码 = 订单_剔除.商品编码.astype('str')\n订单处理_整箱添加 = pd.merge(\n left=订单_剔除, right=订单处理_整箱数量, on=[\n '订单号', '商品编码'], how='left')\n# 7/18修改\n订单x = 订单处理_整箱添加\n订单x = 订单x.reindex(columns=订单_column1)\n订单x['采购数量1'] = 订单x.采购数量 - 订单x.仓库实发数量.fillna(0)\n订单 = 订单x\n# 0426 库存分完需要修改库存\n实发数量 = 订单处理_整箱.loc[订单处理_整箱.仓库实发数量 != 0, [\n '商品代码', '库位', '生产批号', '到期日期', '仓库实发数量']]\n实发数量 = 实发数量.pivot_table(\n index=[\n '商品代码',\n '库位',\n '生产批号',\n '到期日期'],\n values='仓库实发数量',\n aggfunc='sum').reset_index()\nif len(实发数量) > 0:\n 库存 = pd.merge(\n left=库存, right=实发数量, on=[\n '商品代码', '库位', '生产批号', '到期日期'], how='left')\n# 修改没有实发的数量\n 库存.loc[库存.箱柜属性 == 'QA', '可用数量'] = 库存.loc[库存.箱柜属性 == 'QA',\n '可用数量'] - 库存.loc[库存.箱柜属性 == 'QA', '仓库实发数量'].fillna(0)\n# 库存=库存.drop('仓库实发数量',axis=1)\n库存.到期日期 = pd.to_datetime(库存.到期日期)\n# 修改 删除\n# 已经完成编码和条码的匹配以及聚合\n# 按照数量需要排���(升序)\n库存处理 = pd.DataFrame()\n订单.商品编码 = 订单.商品编码.astype(str)\nfor i in 库存.商品编码.unique():\n 库存1 = 库存[(库存.商品编码 == i) & (库存.箱柜属性 == '散装')]\n # 需要修改\n 库存1 = 库存1.sort_values(['箱柜属性', '到期日期'], ascending=[True, True])\n 库存1['辅助列'] = np.arange(1, len(库存1) + 1)\n 库存处理 = 库存处理.append(库存1)\n库存处理.辅助列 = 库存处理.辅助列.astype(int).astype(str)\n库存处理.商品编码 = 库存处理.商品编码.astype(str)\n库存处理['辅助列1'] = 库存处理.商品编码 + 库存处理.辅助列\nx = 库存处理.辅助列.astype(int).max()\ny = pd.DataFrame()\nfor i3 in np.arange(1, x + 1):\n 订单[i3] = i3\n 订单[i3] = 订单[i3].astype(str)\n 订单[i3] = 订单['商品编码'] + 订单[i3]\nz = 1\nwhile z <= x:\n 订单 = pd.merge(left=订单,\n right=库存处理.loc[:,\n ['辅助列1',\n '可用数量']],\n left_on=z,\n right_on='辅助列1',\n how='left').fillna(0)\n 订单 = 订单.drop('辅助列1', axis=1)\n 订单.可用数量 = 订单.可用数量.astype(int)\n 订单 = 订单.rename(columns={'可用数量': x + z})\n z = z + 1\n库存修改箱柜 = 库存处理.pivot_table(\n index='商品编码',\n values='发货箱规',\n aggfunc='max').reset_index()\n订单 = pd.merge(\n left=订单,\n right=库存修改箱柜,\n left_on='商品编码',\n right_on='商品编码',\n how='left')\n订单_bak = 订单.reindex(columns=订单_column2)\n# 标记\n订单 = 订单.drop('发货箱规', axis=1)\nix = 1\n订单处理h = pd.DataFrame()\nwhile ix <= x and len(订单) > 0:\n 订单处理 = pd.DataFrame()\n for i2 in 订单.商品编码.unique():\n 订单1 = 订单[订单.商品编码 == i2].sort_values('采购数量1')\n 订单1['累加和'] = 订单1.采购数量1.cumsum()\n 订单处理 = 订单处理.append(订单1)\n 订单处理[x * 2 + ix] = 订单处理[x + ix] - 订单处理['累加和']\n 订单处理.loc[订单处理[x * 2 + ix] >= 0, x * 3 + 1] = 订单处理['采购数量1']\n 订单处理.loc[(订单处理[x * 2 + ix] < 0) & (订单处理.采购数量1 + 订单处理[x * 2 + ix]\n > 0), x * 3 + 1] = 订单处理.采购数量1 + 订单处理[x * 2 + ix]\n 订单处理.loc[(订单处理[x * 2 + ix] < 0) & (订单处理.采购数量1 +\n 订单处理[x * 2 + ix] <= 0), x * 3 + 1] = 0\n 订单处理1 = 订单处理[订单处理[x * 3 + 1] >= 0]\n 订单处理1.loc[订单处理1[x * 3 + 1] > 0, '调拨条码'] = 订单处理1[ix]\n 订单处理h = 订单处理h.append(订单处理1)\n 订单 = 订单处理[(订单处理[x * 3 + 1] == 0) | (订单处理[x * 3 + 1] - 订单处理.采购数量1 < 0)]\n 订单.采购数量1 = 订单.采购数量1 - 订单[x * 3 + 1]\n ix = ix + 1\n# 20190521修改\nif len(订单处理h) > 0:\n 订单处理_最终 = pd.merge(\n left=订单处理h,\n right=库存处理,\n left_on='调拨条码',\n right_on='辅助列1',\n how='left')\n 订单处理_最终 = 订单处理_最终.reindex(\n columns=[\n '订单号',\n '分配机构',\n '仓库',\n '详细地址',\n '联系人',\n '联系方式',\n '商品编码_x',\n '条码',\n '商品名称_x',\n '采购数量',\n '采购金额',\n '库位',\n '生产批号',\n '商品代码',\n x * 3 + 1,\n '到期日期',\n '发货箱规',\n '箱数',\n '保质期',\n '体积',\n '重量'])\n 订单处理_最终 = 订单处理_最终.rename(\n columns={\n '商品编码_x': '商品编码',\n '商品名称_x': '商品名称',\n x * 3 + 1: '仓库实发数量'})\n 标卡1 = 库存.loc[库存.箱柜属性 == '整箱', ['商品编码', '发货箱规']].drop_duplicates()\n print('注意!!!,整箱发货对应此次发货京东码对应的箱规有个SKU箱规有多个的sku:')\n 标卡1.商品编码.value_counts().reset_index(\n ).loc[标卡1.商品编码.value_counts().reset_index().商品编码 > 1]\n # 0429\n 标卡1 = 标卡1.pivot_table(index='商品编码', values='发货箱规', aggfunc='min')\n 订单_new2 = 订单处理_最终.loc[订单处理_最终.仓库实发数量 == 0]\n 订单_new2 = 订单_new2.drop('发货箱规', axis=1)\n 订单_new2 = pd.merge(left=订单_new2, right=标卡1, on='商品编码', how='left')\n 订单_new2 = 订单_new2.loc[订单_new2.发货箱规 > 订单_new2.采购数量]\n 订单_new2 = 订单_new2.drop_duplicates()\n print('未满一箱数据准备完毕!')\n 订单处理_最终1 = 订单��理_最终.loc[订单处理_最终.仓库实发数量 != 0]\n 订单处理_1 = 订单处理_最终.loc[订单处理_最终.仓库实发数量 != 0].pivot_table(\n index=['订单号', '商品编码'], values=['仓库实发数量'], aggfunc='sum').reset_index()\n print('散货分货完毕!')\n 订单处理_2 = 订单_new2.loc[:, ['订单号', '商品编码', '采购数量']]\n 订单处理_2.drop_duplicates(keep='first', inplace=True)\n 订单处理_2 = 订单处理_2.rename(columns={'采购数量': '仓库实发数量'})\n 订单处理_3 = 订单处理_1.append(订单处理_2)\n 订单处理_3 = 订单处理_3.pivot_table(\n index=[\n '订单号',\n '商品编码'],\n values='仓库实发数量',\n aggfunc='sum').reset_index()\n 订单_new = pd.merge(\n left=订单_bak, right=订单处理_3, left_on=[\n '商品编码', '订单号'], right_on=[\n '商品编码', '订单号'], how='left')\n # 20190523修改\n if len(订单处理_3) == 0:\n 订单_new['采购数量2'] = 订单_new.采购数量1 - 0\n else:\n 订单_new['仓库实发数量'] = 订单_new.仓库实发数量.fillna(0)\n 订单_new['采购数量2'] = 订单_new.采购数量1 - 订单_new.仓库实发数量\n# 0429 前面用的散货的箱规 这里必须重新更新箱规\n # 标记 订单_new=订单_new.drop('箱规',axis=1)\n 订单_new = pd.merge(left=订单_new, right=标卡1, on='商品编码', how='left')\n 订单 = 订单_new\n# 再来一次\n 库存处理 = pd.DataFrame()\n 订单.商品编码 = 订单.商品编码.astype(str)\n for i in 库存.商品编码.unique():\n 库存1 = 库存[(库存.商品编码 == i) & (库存.箱柜属性 == '整箱')]\n # 需要修改\n 库存1 = 库存1.sort_values(['箱柜属性', '到期日期'], ascending=[True, True])\n 库存1['辅助列'] = np.arange(1, len(库存1) + 1)\n 库存处理 = 库存处理.append(库存1)\n 库存处理.辅助列 = 库存处理.辅助列.astype(int).astype(str)\n 库存处理.商品编码 = 库存处理.商品编码.astype(str)\n 库存处理['辅助列1'] = 库存处理.商品编码 + 库存处理.辅助列\n x = 库存处理.辅助列.astype(int).max()\n y = pd.DataFrame()\n for i3 in np.arange(1, x + 1):\n 订单[i3] = i3\n 订单[i3] = 订单[i3].astype(str)\n 订单[i3] = 订单['商品编码'] + 订单[i3]\n z = 1\n while z <= x:\n 订单 = pd.merge(left=订单,\n right=库存处理.loc[:,\n ['辅助列1',\n '可用数量']],\n left_on=z,\n right_on='辅助列1',\n how='left').fillna(0)\n 订单 = 订单.drop('辅助列1', axis=1)\n 订单.可用数量 = 订单.可用数量.astype(int)\n 订单 = 订单.rename(columns={'可用数量': x + z})\n z = z + 1\n 订单.采购数量1 = 订单.采购数量2\n 订单.loc[订单.发货箱规 == 0, '发货箱规'] = 1\n 订单['采购数量1'] = (订单['采购数量1'] / 订单.发货箱规.fillna(1)\n ).astype(int) * 订单.发货箱规.fillna(1)\n 订单 = 订单.drop('发货箱规', axis=1)\n ix = 1\n 订单处理h = pd.DataFrame()\n while ix <= x and len(订单) > 0:\n 订单处理 = pd.DataFrame()\n for i2 in 订单.商品编码.unique():\n 订单1 = 订单[订单.商品编码 == i2].sort_values('采购数量1')\n 订单1['累加和'] = 订单1.采购数量1.cumsum()\n 订单处理 = 订单处理.append(订单1)\n 订单处理[x * 2 + ix] = 订单处理[x + ix] - 订单处理['累加和']\n 订单处理.loc[订单处理[x * 2 + ix] >= 0, x * 3 + 1] = 订单处理['采购数量1']\n 订单处理.loc[(订单处理[x * 2 + ix] < 0) & (订单处理.采购数量1 + 订单处理[x * 2 + ix]\n > 0), x * 3 + 1] = 订单处理.采购数量1 + 订单处理[x * 2 + ix]\n 订单处理.loc[(订单处理[x * 2 + ix] < 0) & (订单处理.采购数量1 + \\\n 订单处理[x * 2 + ix] <= 0), x * 3 + 1] = 0\n 订单处理1 = 订单处理[订单处理[x * 3 + 1] >= 0]\n 订单处理1.loc[订单处理1[x * 3 + 1] > 0, '调拨条码'] = 订单处理1[ix]\n 订单处理h = 订单处理h.append(订单处理1)\n 订单 = 订单处理[(订单处理[x * 3 + 1] == 0) | (订单处理[x * 3 + 1] - 订单处理.采购数量1 < 0)]\n 订单.采购数量1 = 订单.采购数量1 - 订单[x * 3 + 1]\n ix = ix + 1\n 订单处理_最终 = pd.merge(\n left=订单处理h,\n right=库存处理,\n left_on='调拨条码',\n right_on='辅助列1',\n how='left')\n 订单处理_最终 = 订单处理_最终.reindex(\n columns=[\n '订单号',\n '分配机构',\n '仓库',\n '详细地址',\n '联系人',\n '联系方式',\n '商品编码_x',\n '条码',\n '商品名称_x',\n '采购数量',\n '采购金额',\n '库位',\n '生产批号',\n '商品代码',\n x * 3 + 1,\n '到期日期',\n '发货箱规',\n '箱数',\n '保质期',\n '体积',\n '重量'])\n 订单处理_最终 = 订单处理_最终.rename(\n columns={\n '商品编码_x': '商品编码',\n '商品名称_x': '商品名称',\n x * 3 + 1: '仓库实发数量'})\n 订单处理_最终2 = 订单处理_最终.drop_duplicates()\n print('整箱分货完毕')\n订单处理_最终 = 订单处理_最终1.append(订单处理_最终2).append(订单处理_整箱) # .append(订单_new2)\n订单处理_最终 = 订单处理_最终.drop_duplicates()\n订单处理_最终.订单号 = 订单处理_最终.订单号.astype(str)\n订单处理_最终['数据'] = 订单处理_最终.订单号 + 订单处理_最终.商品编码\n订单处理_最终 = pd.merge(\n left=订单处理_最终,\n right=订单处理_最终.数据.value_counts().reset_index(),\n left_on='数据',\n right_on='index',\n how='left')\n订单处理_最终 = 订单处理_最终.loc[(订单处理_最终.仓库实发数量 != 0) | (\n (订单处理_最终.数据_y == 1) & (订单处理_最终.仓库实发数量 == 0))]\n订单处理_最终 = pd.merge(left=订单处理_最终,\n right=标卡.loc[:,\n ['公司货号',\n '茂浦采购价未税',\n '箱规']].drop_duplicates(),\n left_on='商品代码',\n right_on='公司货号',\n how='left')\n订单处理_最终.loc[订单处理_最终.箱规 == 0, '箱规'] = 1\n订单处理_最终.loc[订单处理_最终.仓库实发数量 != 0, '箱数'] = 订单处理_最终.loc[订单处理_最终.仓库实发数量 !=\n 0, '仓库实发数量'] / (订单处理_最终.loc[订单处理_最终.仓库实发数量 != 0, '箱规'].fillna(1))\n订单处理_最终.loc[订单处理_最终.仓库实发数量 != 0,\n '箱数'] = 订单处理_最终.loc[订单处理_最终.仓库实发数量 != 0,\n '箱数'].map(lambda x: math.ceil(x))\n订单处理_最终['总体积'] = 订单处理_最终.体积 * 订单处理_最终.箱数\n订单处理_最终['总重量'] = 订单处理_最终.重量 * 订单处理_最终.箱数\n订单处理_最终['货品总额'] = 订单处理_最终.采购金额 / 0.63 / 订单处理_最终.采购数量 * 0.56 * 订单处理_最终.仓库实发数量\n订单处理_订单汇总 = 订单处理_最终.pivot_table(\n index=[\n '订单号',\n '分配机构',\n '仓库',\n '详细地址',\n '联系方式'],\n values=[\n '仓库实发数量',\n '箱数',\n '总体积',\n '货品总额',\n '总重量'],\n aggfunc='sum').reset_index()\n订单处理_订单汇总['填开日期'] = datetime.today().strftime('%Y-%m-%d')\n订单处理_订单汇总['客户名称'] = '上海茂浦电子商务有限公司'\n订单处理_订单汇总['始发地'] = '上海'\n订单处理_订单汇总['电商名称'] = '京东商城'\n订单处理_订单汇总 = 订单处理_订单汇总.reindex(\n columns=[\n '填开日期',\n '客户名称',\n '始发地',\n '分配机构',\n '电商名称',\n '订单号',\n '仓库实发数量',\n '箱数',\n '货品总额',\n '总重量',\n '总体积',\n '预约状态',\n '预约日期',\n '送货时间段',\n '预约号',\n '仓库',\n '详细地址',\n '联系方式',\n '备注'])\n订单处理_订单汇总 = 订单处理_订单汇总.sort_values(['分配机构', '仓库', '订单号'])\n订单处理_发货明细 = 订单处理_最终.sort_values(['分配机构', '仓库', '订单号'])\n订单处理_发货明细.loc[订单处理_发货明细.到期日期.notnull(),\n '有效期'] = 订单处理_发货明细.loc[订单处理_发货明细.到期日期.notnull(),\n '到期日期'].map(lambda x: x.strftime('%Y-%m-%d'))\n订单处理_发货明细 = 订单处理_发货明细.drop('到期日期', axis=1)\n# 标记\n订单处理_发货明细1 = 订单处理_发货明细.reindex(\n columns=[\n '订单号',\n '分配机构',\n '仓库',\n '详细地址',\n '联系人',\n '联系方式',\n '商品编码',\n '条码',\n '茂浦采购价未税',\n '商品名称',\n '采购数量',\n '采购金额',\n '库位',\n '生产批号',\n '商品代码',\n '仓库实发数量',\n '有效期',\n '箱数',\n '箱规',\n '保质期',\n '体积',\n '重量',\n '总体积',\n '总重量'])\n订单处理_发货明细 = 订单处理_发货明细.reindex(\n columns=[\n '订单号',\n '分配机构',\n '仓库',\n '详细地址',\n '联系人',\n '联系方式',\n '商品编码',\n '条码',\n '商品名称',\n '采购数量',\n '采购金额',\n '库位',\n '生产批号',\n '商品代码',\n '仓库实发数量',\n '有效期',\n '箱数',\n '箱规',\n '保质期',\n '总体积',\n '总重量'])\n订单处理_发货明细.条码 = 订单处理_发货明细.条码.astype(str)\n订单处理_发货明细 = 订单处理_发货明细.sort_values(['分配机构', '仓库', '订单号', '商品编码'])\n订单箱数 = 订单处理_发货明细.pivot_table(\n index='订单号',\n values='箱数',\n aggfunc='sum').reset_index()\n订单箱数 = 订单箱数.rename(columns={'箱数': '订单箱数'})\n订单处理_发货明细 = pd.merge(left=订单处理_发货明细, right=订单箱数, on='订单号', how='left')\n订单处理_发货明细1 = pd.merge(left=订单处理_发货明细1, right=订单箱数, on='订单号', how='left')\n# 添加订单箱数\n订单处理_发货明细 = 订单处理_发货明细.set_index(['订单号',\n '分配机构',\n '仓库',\n '详细地址',\n '联系人',\n '联系方式',\n '订单箱数',\n '商品编码',\n '商品名称',\n '采购数量',\n '采购金额',\n '条码',\n '库位',\n '生产批号',\n '商品代码'])\n定义 = 订单处理_最终.保质期.fillna(0).map(lambda x: timedelta(x))\n订单处理_最终['生产日期'] = (订单处理_最终.到期日期 - 定义 + 定义 / 3).map(lambda x: x.date())\n订单处理_最终['是否超保'] = '否'\n订单处理_最终.loc[订单处理_最终.生产日期 < datetime.now().date() + timedelta(5), '是否超保'] = '超保'\n导出路径 = pd.ExcelWriter('D://work//曼秀雷敦//出库单发仓库出库明细@' + path_库存 + '(发仓库).xlsx')\n订单处理_订单汇总.to_excel(导出路径, '订单汇总', index=None)\n订单处理_发货明细.to_excel(导出路径, '发货明细')\n订单处理_最终['生产日期'] = (订单处理_最终.到期日期 - 定义).map(lambda x: x.date())\n订单处理_超保 = 订单处理_最终[(订单处理_最终.是否超保 == '超保') & (\n 订单处理_最终.仓库实发数量 != 0) & (订单处理_最终.仓库实发数量.notnull())]\n订单处理_最终.loc[订单处理_最终.是否超保 == '超保']\n订单处理_超保['采销部门'] = '个护'\n订单处理_超保明细 = 订单处理_超保.reindex(columns=['分配机构',\n '仓库',\n '采销部门',\n '订单号',\n '商品编码',\n '商品名称',\n '仓库实发数量',\n '保质期',\n '生产日期',\n '到期日期']).sort_values(['订单号',\n '分配机构',\n '仓库'])\n订单处理_超保明细['到期日期'] = 订单处理_超保明细.到期日期.map(lambda x: x.strftime('%Y-%m-%d'))\nprint('第二次分配完毕,正在正在进行导出,请稍后...')\n订单处理_超保明细.to_excel(导出路径, '超保明细', index=None)\ndata_out = 订单处理_发货明细1.reset_index()\ndata_out['标卡'] = data_out.订单号 + data_out.商品编码\ndata_out1 = data_out.groupby(['订单号', '商品编码', '商品名称', '订单箱数', '茂浦采购价未税', '箱规', '体积', '重量'])[\n '仓库实发数量', '箱数', '总体积', '总重量'].sum().reset_index()\ndata_out.仓库实发数量 = data_out.仓库实发数量.astype(np.int64).astype(str)\ndata_out.箱数 = data_out.箱数.fillna(0).astype(np.int64).astype(str)\ndata_out.生产批号 = data_out.生产批号.fillna(0).astype(str)\ndata_all = pd.DataFrame()\nfor i in data_out.标卡.unique():\n data1 = data_out.loc[data_out.标卡 == i]\n data1['生产批号cat'] = data1.生产批号.str.cat(sep=';')\n data1['库位cat'] = data1.库位.astype(str).str.cat(sep=';')\n data1['商品代码cat'] = data1.商品代码.str.cat(sep=';')\n data1['仓库实发数量cat'] = data1.仓库实发数量.str.cat(sep=';')\n data1['有效期cat'] = data1.有效期.str.cat(sep=';')\n data1['箱数cat'] = data1.箱数.str.cat(sep=';')\n data1 = data1.drop(['订单箱数',\n '商品名称',\n '生产批号',\n '库位',\n '商品代码',\n '仓库实发数量',\n '有效期',\n '箱数',\n '体积',\n '重量',\n '标卡',\n '总体积',\n '总重量',\n '茂浦采购价未税',\n '箱规'],\n axis=1)\n data_all = data_all.append(data1)\ndata_out2 = data_all.drop('index', axis=1).drop_duplicates()\ndata_out3 = pd.merge(data_out2, data_out1, on=['订单号', '商品编码'], how='left')\ndata_out3['始发仓库'] = '华夏龙'\ndata_out3['下单日期'] = datetime.today()\ndata_out3.下单日期 = data_out3.下单日期.map(lambda x: x.strftime('%Y-%m-%d'))\n#data_out3.茂浦采购价未税 = data_out3.茂浦采购价未税 * 1.13\ndata_out3['出库折扣'] = 0.63\ndata_out3 = data_out3.reindex(\n columns=[\n '订单号',\n '商品代码cat',\n '商品编码',\n '条码',\n '茂浦采购价未税',\n '商品名称',\n '箱规',\n '始发仓库',\n '分配机构',\n '下单日期',\n '',\n '库位cat',\n '生产批号cat',\n '采购数量',\n '',\n '',\n '',\n '',\n '仓库实发数量cat',\n '仓库实发数量',\n '',\n '',\n '',\n '有效期cat',\n '箱数cat',\n '箱数',\n '订单箱数',\n '',\n '',\n '',\n '',\n '',\n '',\n '',\n '出库折扣',\n '',\n '',\n '',\n '体积',\n '重量',\n '总体积',\n '总重量',\n '保质期'])\ndata_out3.to_excel(导出路径, '出库明细', index=None)\n导出路径.save()\nprint('已完成并导出')\n\n#%%\n\n\n\n\n\n","sub_path":"出库单/出库代码上海仓.py","file_name":"出库代码上海仓.py","file_ext":"py","file_size_in_byte":32110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508642545","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/23 16:09\n# @Author : lixiaofeng\n# @Site : \n# @File : urls.py\n# @Software: PyCharm\n\n# coding=utf-8\nfrom django.conf.urls import url\nfrom . import views_api, views_api_sec, views\n\nfrom django.contrib.auth.models import User\nfrom base.models import Event, Guest\nfrom rest_framework import routers, serializers, viewsets\nfrom django.conf.urls import include\n\nfrom rest_framework.schemas import get_schema_view\nfrom rest_framework_swagger.renderers import SwaggerUIRenderer, OpenAPIRenderer\n\n\n# Serializers define the API representation.\n# class UserSerializer(serializers.HyperlinkedModelSerializer):\n# class Meta:\n# model = User\n# fields = ('url', 'username', 'email', 'is_staff')\n\n\n# ViewSets define the view behavior.\n# class UserViewSet(viewsets.ModelViewSet):\n# '''\n# retrieve:\n# Return a user instance.\n#\n# list:\n# Return all users,ordered by most recent joined.\n#\n# create:\n# Create a new user.\n#\n# delete:\n# Remove a existing user.\n#\n# partial_update:\n# Update one or more fields on a existing user.\n#\n# update:\n# Update a user.\n# '''\n# queryset = User.objects.all()\n# serializer_class = UserSerializer\n\n\n# class EventSerializer(serializers.ModelSerializer):\n# # guests = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n#\n# class Meta:\n# model = Event\n# fields = '__all__'\n#\n#\n# # ViewSets define the view behavior.\n# class EventViewSet(viewsets.ModelViewSet):\n# queryset = Event.objects.all()\n# serializer_class = EventSerializer\n#\n#\n# class GuestSerializer(serializers.ModelSerializer):\n# event = EventSerializer(many=False, read_only=True)\n#\n# # event = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n#\n# class Meta:\n# model = Guest\n# fields = '__all__'\n# exclude = []\n#\n#\n# # ViewSets define the view behavior.\n# class GuestViewSet(viewsets.ModelViewSet):\n# queryset = Guest.objects.all()\n# serializer_class = GuestSerializer\n#\n#\n# # Routers provide an easy way of automatically determining the URL conf.\n# router = routers.DefaultRouter()\n# # router.register(r'users', UserViewSet)\n# router.register(r'events', EventViewSet)\n# router.register(r'guests', GuestViewSet)\n#\n# schema_view = get_schema_view(title='EasyTest 测试接口', renderer_classes=[OpenAPIRenderer, SwaggerUIRenderer], url='/')\n\napp_name = \"guest\"\nurlpatterns = [\n # web\n url(r'^index_guest/$', view=views.index, name='index_guest'), # 登录页面\n url(r'^login_action_guest/$', view=views.login_action, name='login_action_guest'), # 登录出错跳转\n url(r'^event_manage/$', view=views.event_manage, name='event_manage'), # 发布会管理\n url(r'^accounts/login/$', view=views.index), # 验证用户是否登录,未登录跳转到登录页\n url(r'^search_name/$', view=views.search_name, name='search_name'), # 发布会搜索\n url(r'^guest_manage/$', view=views.guest_manage, name='guest_manage'), # 嘉宾管理\n url(r'^search_guest/$', view=views.search_guest, name='search_guest'), # 嘉宾搜索\n url(r'^sign_index/(?P\\d+)/$', view=views.sign_index, name='sign_index'), # 签到 eid 作为参数传给视图\n url(r'^sign_index_action/(?P\\d+)/$', view=views.sign_index_action, name='sign_index_action'), # 处理签到操作\n url(r'^logout_guest/$', view=views.logout, name='logout_guest'), # 退出\n url(r'^delete_all/$', view=views.delete_all, name='delete_all'), # 清空数据\n\n # api\n url(r'^add_event/$', view=views_api.add_event, name='add_event'), # 添加发布会接口\n url(r'^update_event/$', view=views_api.update_event, name='update_event'), # 修改发布会接口\n url(r'^get_event_list/$', view=views_api.get_event_list, name='get_event_list'), # 查询发布会接口\n url(r'^add_guest/$', view=views_api.add_guest, name='add_guest'), # 添加嘉宾接口\n url(r'^get_guest_list/$', view=views_api.get_guest_list, name='get_guest_list'), # 查询嘉宾接口\n url(r'^user_sign/$', view=views_api.user_sign, name='user_sign'), # 嘉宾签到接口\n\n # 加密api\n url(r'^sec_add_event/', view=views_api_sec.sec_add_event, name='sec_add_event'), # 添加发布会加密接口\n url(r'^sec_get_event_list/', view=views_api_sec.sec_get_event_list, name='sec_get_event_list'), # 查询发布会加密接口\n url(r'^sec_get_guest_list/', view=views_api_sec.sec_get_guest_list, name='sec_get_guest_list'), # 查询嘉宾接口\n\n # url(r'^docs/$', schema_view, name='docs'),\n # url(r'^', include(router.urls)),\n # url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]\n","sub_path":"guest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"349572577","text":"from framework.analysis.Export import Export\n\n\nclass PurchasesHtml(Export):\n\n def to_file(self, output_path, result_json):\n with open('framework/analysis/purchases/html/template.html', 'r') as template:\n template_string = template.read()\n export_string = template_string.replace(\"''\", result_json)\n with open(output_path, 'w') as output_file:\n output_file.write(export_string)","sub_path":"framework/analysis/purchases/html/PurchasesHtml.py","file_name":"PurchasesHtml.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"499199122","text":"#python 3\nimport numpy as np\nimport os,argparse\nimport MDAnalysis as mda\nfrom MDAnalysis.topology.LAMMPSParser import DATAParser\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-d\", required=True,help=\"lammps data file\")\nparser.add_argument(\"-t\",required=True, help=\".dcd trajectory\")\nparser.add_argument(\"-n\",required=True,type=int, help = \"number of chains\")\nargs = parser.parse_args()\n\nLammpsData = args.d\nLammpsTrj = args.t\nNMol = args.n\n\nu = mda.Universe(LammpsData,LammpsTrj)\n\n#u = mda.Universe.empty()\n#u.load_new()\n\n#d = DATAParser(LammpsData)\n#d.writePDB(\"lammps.pdb\")\n\nprint (u.atoms)\nlist(u.atoms)\n\nAtoms_Molecule = []\nfor i in range(NMol):\n molID = i+1\n Atoms_Molecule.append(u.select_atoms(\"resid {}\".format(molID)))\n#print (Atoms_Molecule)\nRg_List = []\nRg_all = [] #radius of gyration of all chain at every frame\nfor ts in u.trajectory:\n\tRg_temp = 0\t\n\tRg_perframe = []\n\tfor molecule in Atoms_Molecule:\n\t\tRg_temp += molecule.radius_of_gyration()\n\t\tRg_perframe.append(molecule.radius_of_gyration())\n\tRg_all.append(Rg_perframe)\n\tRg_List.append((u.trajectory.time, (Rg_temp/NMol)))\nRg_List = np.array(Rg_List) \nRg_all = np.array(Rg_all)\nRg_avg = np.mean(Rg_all,axis = 0)#average Rg of each chain over all frame\nprint ('average Rg of each chain over all frame:')\nprint(Rg_avg)\nimport matplotlib.pyplot as plt\nax = plt.subplot(111)\nax.plot(Rg_List[:,0], Rg_List[:,1], 'r--', lw=2, label=r\"$R_G$\")\nax.set_xlabel(\"time (ps)\")\nax.set_ylabel(r\"radius of gyration $R_G$ ($\\AA$)\")\nax.figure.savefig(\"Rgyr.pdf\")\nplt.draw()\n\nprint ('Average Rg:')\nprint('average Rg over all chains, then average over frames {} +/- {}'.format(np.mean(Rg_List[:,1]),np.std(Rg_List[:,1]))) #average Rg over all chains, then average over frames\nprint('average Rg simultaneously over all chains and frames {} +/- {}'.format(np.mean(Rg_all),np.std(Rg_all))) #average Rg simultaneously over all chains and frames\nif NMol >1:\n\tprint('average Rg of each chain over all frames, then average over all chains {} +/- {}'.format(np.mean(Rg_avg),np.std(Rg_avg))) #average Rg of each chain over all frames, then average over all chains\n\nwith open('rg_avg.txt','w') as avg:\n\tavg.write('average Rg over all chains, then average over frames {} +/- {}'.format(np.mean(Rg_List[:,1]),np.std(Rg_List[:,1])))\n\tavg.write('\\naverage Rg simultaneously over all chains and frames {} +/- {}'.format(np.mean(Rg_all),np.std(Rg_all)))\n\tif NMol>1:\n\t\tavg.write('\\naverage Rg of each chain over all frames, then average over all chains {} +/- {}'.format(np.mean(Rg_avg),np.std(Rg_avg)))\n\tavg.write('\\naverage Rg of each chain over all frames:')\n\tfor i in range(NMol):\n\t\tavg.write('\\n {} +/- {}'.format(Rg_avg[i],np.std(Rg_all,axis = 0)[i])) \nwith open('rg.txt','w') as f:\n\tf.write('# frame ')\n\tfor i in range(NMol):\n\t\tf.write('rg{} '.format(i+1))\n\tf.write('\\n')\n\tfor i in range(len(Rg_all)):\n\t\trg_str = [str(x) for x in Rg_all[i]]\n\t\tf.write('{} {}\\n'.format(i+1,' '.join(rg_str)))\n","sub_path":"MDAnalysis_PY.py","file_name":"MDAnalysis_PY.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"107321824","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by jeffw on 2018/5/23\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# disable copy warning in pandas\npd.options.mode.chained_assignment = None # default='warn'\n# disable sklearn deprecation warning\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\n\n\n### data clean\npath='/Users/jeffw/workspace/jeffwang/github/machine-learning/project/titanic/'\ndf = pd.read_csv(path + 'train.csv').set_index('PassengerId')\n\n# feature Pclass Sex\n\n# feature Embarked\ndf.Embarked.fillna(value='S', inplace=True)\n\ndf['family_size'] = df.SibSp + df.Parch\ndf['family_size_level'] = pd.cut(df.family_size, bins=[-1,0, 3.5, 12], labels=['alone', 'middle', 'large'])\n\n# feature title_level\ndf['title'] = df.Name.apply(lambda x: x.split(', ')[1].split('. ')[0])\ndf['title'].replace(['Mme', 'Ms', 'Mlle'], ['Mrs', 'Miss', 'Miss'], inplace = True)\ndf['title_level'] = df.title.apply(lambda x: 'rare' if x not in ['Mr', 'Miss', 'Mrs', 'Master'] else x)\ndf.title[(df.title_level == 'Miss') & (df.Age < 18)] = 'Mister'\ndf.title_level[(df.title_level == 'Miss') & (df.Age < 18)] = 'Mister'\n\n# feature age_level\ndf['age_level'] = pd.cut(df.Age, bins=[0, 15, 60, 100], labels=['child', 'middle', 'older'])\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\n\n#train set\ndf_age_train = df[df.Age.notnull()]\ndf_age = df_age_train[['Pclass', 'Sex', 'family_size', 'title_level', 'age_level']]\nX_age_dummied = pd.get_dummies(df_age.drop(columns='age_level'), columns=['Pclass', 'Sex', 'title_level'])\nY_age = df_age['age_level']\n\n#train\nclf = RandomForestClassifier(random_state=0)\nparams = {'n_estimators': range(6, 14), 'max_features': [2, 3, 4] }\ngscv = GridSearchCV(estimator=clf, param_grid=params, scoring='f1_micro', n_jobs=1, cv=5)\ngscv.fit(X_age_dummied, Y_age)\ngscv.best_score_, gscv.best_params_, gscv.best_estimator_.feature_importances_\n\ndf_age_test = df[df.Age.isnull()]\nab = df_age_test[['Pclass', 'Sex', 'family_size', 'title_level', 'age_level']]\nX_age_dummied_test = pd.get_dummies(ab.drop(columns='age_level'), columns=['Pclass', 'Sex', 'title_level'])\nX_age_dummied_test['title_level_Mister'] = np.zeros(len(X_age_dummied_test))\nX_age_dummied.shape, X_age_dummied_test.shape\nX_age_dummied.columns, X_age_dummied_test.columns\n\ndf_age_test.age_level = gscv.predict(X_age_dummied_test)\ndf_final = pd.concat([df_age_test, df_age_train]).sort_index()\n\ndf_final['MPSE'] = np.ones(len(df_final))\ndf_final.MPSE[(df_final.title_level == 'Mr') & (df_final.Pclass == 3) & (df_final.Sex == 'male') & (df_final.Embarked == 'S') & (df_final.family_size_level.isin(['alone', 'middle']))] = 4\ndf_final.MPSE[(df_final.title_level == 'Mr') & (df_final.Pclass == 2) & (df_final.Sex == 'male') & (df_final.Embarked == 'S') & (df_final.family_size_level.isin(['middle', 'alone']))] = 3\ndf_final.MPSE[(df_final.title_level == 'Mr') & (df_final.Pclass == 1) & (df_final.Sex == 'male') & (df_final.Embarked == 'S') & (df_final.family_size_level.isin(['middle', 'alone']))] = 2\n\nfrom sklearn.model_selection import cross_val_score\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\n\nlr = LogisticRegression(C=8, random_state=0)\nknn = KNeighborsClassifier(n_neighbors=9)\nsvc = SVC(C=12, gamma=0.01, random_state=0)\nmlp = MLPClassifier(alpha=0.001, hidden_layer_sizes=(50, 80), solver='lbfgs', random_state=0)\nrf = RandomForestClassifier(max_depth=5, max_features=5, n_estimators=35, random_state=0)\ngbdt = GradientBoostingClassifier(learning_rate=0.1, max_depth=2, max_features=3, n_estimators=200, random_state=0)\nbagging = BaggingClassifier(lr, n_estimators=90, random_state=0)\nabc = AdaBoostClassifier(learning_rate=0.7, n_estimators=160, random_state=0)\n\nnames = ['LR', 'KNN', 'SVC', 'MLP', 'RF', 'GBDT', 'Bagging', 'AdaB']\nmodels = [lr, knn, svc, mlp, rf, gbdt, bagging, abc]\n\n\nselection = ['Pclass', 'Sex', 'Embarked', 'age_level', 'title_level', 'family_size_level', 'MPSE']\nX = df_final[selection]\nY = df_final['Survived']\nX_dummied = pd.get_dummies(X, columns=selection)\n\nresult_scores = []\nfor name, model in zip(names, models):\n scores = cross_val_score(model, X_dummied, Y, cv=5, scoring='roc_auc')\n result_scores.append(scores.mean())\n print('{} has a mean score {:.4f} based on {}'.format(name, scores.mean(), scores))\n\nfrom sklearn.ensemble import VotingClassifier\nnames = ['LR', 'KNN', 'SVC', 'MLP', 'RF', 'GBDT', 'Bagging', 'AdaB']\n\nlr = LogisticRegression(C=8, random_state=0)\nsvc = SVC(C=12, gamma=0.01, random_state=0, probability=True)\nrf = RandomForestClassifier(max_depth=5, max_features=5, n_estimators=35, random_state=0)\ngbdt = GradientBoostingClassifier(learning_rate=0.1, max_depth=2, max_features=3, n_estimators=200, random_state=0)\nbagging = BaggingClassifier(lr, n_estimators=90, random_state=0)\nabc = AdaBoostClassifier(learning_rate=0.7, n_estimators=160, random_state=0)\n\n# 直接投票,票数多的获胜。\nvc_hard = VotingClassifier(estimators=[('LR', lr), ('SVC', svc), ('GBDT', gbdt), ('Bagging', bagging), ('AdaB', abc)], voting='hard')\n# 参数里说,soft更加适用于已经调制好的base learners,基于每个learner输出的概率。知乎文章里讲,Soft一般表现的更好。\nvc_soft = VotingClassifier(estimators=[('LR', lr), ('SVC', svc), ('RF', rf), ('GBDT', gbdt), ('Bagging', bagging), ('AdaB', abc)], voting='soft')\n\n# 'vc hard:', cross_val_score(vc_hard, X_dummied, Y, cv=5, scoring='roc_auc').mean(),\\\n'vc soft:', cross_val_score(vc_soft, X_dummied, Y, cv=5, scoring='roc_auc').mean()\n\n#test\ndf_test = pd.read_csv(path + 'test.csv').set_index('PassengerId')\ndf_test.info()\n\ndf_test[df_test.Fare.isna()]\n# 由于是三等舱,这里简单的填入三等舱的平均值好了。\ndf_test.Fare[df_test.Fare.isna()] = df_test.Fare[df_test.Pclass == 3].mean()\n\ndf_test['title'] = df_test.Name.apply(lambda x: x.split(', ')[1].split('. ')[0])\n# Major, 少校;Lady,贵妇;Sir,子爵; Capt, 上尉;the Countess,伯爵夫人;Col,上校。Dr,医生?\ndf_test['title'].replace(['Mme', 'Ms', 'Mlle', 'Dona'], ['Mrs', 'Miss', 'Miss', 'Don'], inplace = True)\ndf_test['title_level'] = df_test.title.apply(lambda x: 'rare' if x not in ['Mr', 'Miss', 'Mrs', 'Master'] else x)\ndf_test.title_level[(df_test.title_level == 'Miss') & (df_test.Age < 18)] = 'Mister'\ndf_test.title[(df_test.title_level == 'Miss') & (df_test.Age < 18)] = 'Mister'\n\ndf_test['title_level'].value_counts()\n\ndf_test['family_size'] = df_test.SibSp + df_test.Parch\ndf_test['family_size_level'] = pd.cut(df_test.family_size, bins=[-1,0, 3.5, 12], labels=['alone', 'middle', 'large'])\n\n\ndf_test['age_level'] = pd.cut(df_test.Age, bins=[0, 15, 60, 100], labels=['child', 'middle', 'older'])\ndf_test.age_level.value_counts()\n\ndf_age_X_na = df_test[df_test.Age.isna()][['Pclass', 'Sex', 'family_size', 'title_level']].copy()\ndf_age_X_na_dummied = pd.get_dummies(df_age_X_na, columns=['Pclass', 'Sex', 'title_level'])\n\ndf_age_X_na_dummied['title_level_Mister'] = np.zeros(len(df_age_X_na_dummied))\ndf_age_X_na_dummied['title_level_rare'] = np.zeros(len(df_age_X_na_dummied))\n\nage_level_pred = gscv.predict(df_age_X_na_dummied)\ndf_test.age_level.fillna(pd.Series(age_level_pred, index=df_age_X_na.index), inplace=True)\ndf_test.info()\n\ndf_test['MPSE'] = np.ones(len(df_test))\ndf_test.MPSE[(df_test.title_level == 'Mr') & (df_test.Pclass == 3) & (df_test.Sex == 'male') \\\n & (df_test.Embarked == 'S') & (df_test.family_size_level.isin(['alone', 'middle']))] = 4\ndf_test.MPSE[(df_test.title_level == 'Mr') & (df_test.Pclass == 2) & (df_test.Sex == 'male') \\\n & (df_test.Embarked == 'S') & (df_test.family_size_level.isin(['middle', 'alone']))] = 3\ndf_test.MPSE[(df_test.title_level == 'Mr') & (df_test.Pclass == 1) & (df_test.Sex == 'male') \\\n & (df_test.Embarked == 'S') & (df_test.family_size_level.isin(['middle', 'alone']))] = 2\n\nselection = ['Pclass', 'Sex', 'Embarked', 'age_level', 'title_level', 'family_size_level', 'MPSE']\ndf_test_selected = df_test[selection]\ndf_test_dummied = pd.get_dummies(df_test_selected, columns=selection)\ndf_test_dummied.shape, X_dummied.shape\n\n\n## stacking\nfrom sklearn.model_selection import StratifiedKFold\nn_train=X_dummied.shape[0]\nn_test=df_test_dummied.shape[0]\nkf=StratifiedKFold(n_splits=5,random_state=1,shuffle=True)\n\n\ndef get_oof(clf, X, y, test_X):\n oof_train = np.zeros((n_train,))\n oof_test_mean = np.zeros((n_test,))\n # 5 is kf.split\n oof_test_single = np.empty((kf.get_n_splits(), n_test))\n for i, (train_index, val_index) in enumerate(kf.split(X, y)):\n kf_X_train = X.iloc[train_index]\n kf_y_train = y.iloc[train_index]\n kf_X_val = X.iloc[val_index]\n\n clf.fit(kf_X_train, kf_y_train)\n\n oof_train[val_index] = clf.predict(kf_X_val)\n oof_test_single[i, :] = clf.predict(test_X)\n # oof_test_single, 将生成一个5行*n_test列的predict value。那么mean(axis=0), 将对5行,每列的值进行求mean。然后reshape返回\n oof_test_mean = oof_test_single.mean(axis=0)\n return oof_train.reshape(-1, 1), oof_test_mean.reshape(-1, 1)\n\n\nlr = LogisticRegression(C=8, random_state=0)\nsvc = SVC(C=12, gamma=0.01, random_state=0, probability=True)\nrf = RandomForestClassifier(max_depth=5, max_features=5, n_estimators=35, random_state=0)\ngbdt = GradientBoostingClassifier(learning_rate=0.1, max_depth=2, max_features=3, n_estimators=200, random_state=0)\nbagging = BaggingClassifier(lr, n_estimators=90, random_state=0)\nabc = AdaBoostClassifier(learning_rate=0.7, n_estimators=160, random_state=0)\n\nlr_train, lr_test = get_oof(lr, X_dummied, Y, df_test_dummied)\nsvc_train, svc_test = get_oof(svc, X_dummied, Y, df_test_dummied)\nrf_train, rf_test = get_oof(rf, X_dummied, Y, df_test_dummied)\ngbdt_train, gbdt_test=get_oof(gbdt, X_dummied, Y, df_test_dummied)\nbagging_train, bagging_test = get_oof(bagging, X_dummied, Y, df_test_dummied)\nabc_train, abc_test = get_oof(abc, X_dummied, Y, df_test_dummied)\n\ny_train_pred_stack = np.concatenate([lr_train, svc_train, rf_train, gbdt_train, bagging_train, abc_train], axis=1)\ny_train_stack = Y\ny_test_pred_stack = np.concatenate([lr_test, svc_test, rf_test, gbdt_test, bagging_test, abc_test], axis=1)\n\ny_train_pred_stack.shape, y_train_stack.shape, y_test_pred_stack.shape\n\n\nscores = cross_val_score(RandomForestClassifier(random_state=0, n_estimators=50), y_train_pred_stack, y_train_stack, cv=5, scoring='roc_auc')\nscores.mean(), scores\n\n\ny_pred = RandomForestClassifier(random_state=0, n_estimators=100).fit(y_train_pred_stack, y_train_stack).predict(y_test_pred_stack)\n\n\nresult_df = pd.DataFrame({'PassengerId': df_test_dummied.index, 'Survived':y_pred}).set_index('PassengerId')\nresult_df.to_csv('predicted_survived.csv')","sub_path":"kaggle/titanic/da/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"275116828","text":"import numpy as np\nimport cv2 as cv\ncap = cv.VideoCapture('/Users/macboiiii/Documents/LearnProjects/OpenCV/PicVid/slow.flv')\n# take first frame of the video\nret,frame = cap.read()\n\n# setup initial location of window\n# r,h,c,w = 250,90,400,125 # simply hardcoded the values\n# R is the Y location of the box\nc = 300\nr = 200\nw = 100\nh = 30\ntrack_window = (c,r,w,h)\n\n# set up the ROI for tracking (Region of Interest)\nroi = frame[r:r+h, c:c+w]\nhsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)\nmask = cv.inRange(roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))\nroi_hist = cv.calcHist([roi],[0],mask,[180],[0,180])\ncv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)\n# Setup the termination criteria, either 10 iteration or move by atleast 1 pt\nterm_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )\nwhile(1):\n ret ,frame = cap.read()\n \n if ret == True:\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1)\n \n # apply meanshift to get the new location\n ret, track_window = cv.CamShift(dst, track_window, term_crit)\n \n # Draw it on image\n pts = cv.boxPoints(ret)\n pts = np.int0(pts)\n img2 = cv.polylines(frame,[pts],True, 255,2)\n cv.imshow('frame',frame)\n cv.imshow('mask',mask)\n cv.imshow('HSV',hsv_roi)\n cv.imshow('ROI',roi)\n cv.imshow('window',track_window)\n k = cv.waitKey(60) & 0xff\n \n if k == 27:\n break\n else:\n cv.imwrite(chr(k)+\".jpg\",img2)\n else:\n break\ncv.destroyAllWindows()\ncap.release()\n","sub_path":"OpenCV/MEAN and CAM shift/CamShift.py","file_name":"CamShift.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"556743841","text":"import math\nwhile True:\n try:\n cases = int(input())\n for i in range(cases):\n grid = list(map(int, input().split()))\n m, n = grid[0], grid[1]\n s = math.ceil((m-2)/3)*math.ceil((n-2)/3)\n print(s)\n except(EOFError):\n break\n","sub_path":"11044.py","file_name":"11044.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"490461153","text":"# -*- coding:utf8 -*-\nimport sys\nsys.path.append('..')\n\nimport json\nimport uuid\nimport datetime\nimport time\nimport threading\n\nimport config\n\nimport const_var\nimport exception.HTTPException as HTTPException\nfrom magic.magic import red\n\ndef generate_session_id():\n for times in range(5):\n session_id = _generate_session_id()\n if not red.exists(session_id):\n return session_id\n raise HTTPException.InternalServerError(const_var.SYSTEM_ERROR)\n\ndef _generate_session_id():\n dtime = datetime.datetime.now()\n t = time.mktime(dtime.timetuple())\n return ''.join([\n str(uuid.uuid4()),\n str(t), \n str(threading.current_thread().ident)\n ])\n\ndef set_session(red, sid, **kv):\n if 'expires' not in kv:\n expires = config.SESSION_EXPIRES_TIME\n else:\n expires = kv['expires']\n for k, v in kv.items():\n red.hset(sid, k, v)\n red.expire(sid, expires)\n\ndef get_session(red, sid, *args):\n session = {}\n if red.exists(sid):\n for k in args:\n session[k] = red.hget(sid, k)\n return session\n","sub_path":"main/utils/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"536705382","text":"\"\"\" Solution with higher complexity. Kindly assess the Assesments3.py \"\"\"\r\nsortedlist=[]\r\npriority={}\r\ndef Sort(unsorted_list,priority):\r\n\r\n for entry in unsorted_list:\r\n print(entry)\r\n if len(sortedlist)==0:\r\n sortedlist.append(entry)\r\n else:\r\n #try to find the correct position\r\n r=0\r\n for i in range(len(sortedlist)):\r\n for j in range(len(sortedlist[i])):\r\n if priority[sortedlist[i][j]]>priority[entry[j]]: #lower number indicates higher priority\r\n sortedlist.insert(i,entry)\r\n\r\n elif priority[sortedlist[i][j]] %s\" % (in_file, out_file)\r\n run_cmd(cmd, throw_error=True)\r\n return out_file\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--model_dir', type=str)\r\n parser.add_argument('--gpu', type=int, default=-1)\r\n args = parser.parse_args()\r\n ckpts = glob.glob('%s/*step_*.pt' % args.model_dir)\r\n ckpts.sort(key=extract_step)\r\n ape_subwords = [inference(ckpt, override=False, gpu=args.gpu) for ckpt in ckpts]\r\n ape_txts = [recover_tokens(x, override=True) for x in ape_subwords]\r\n bleus = [compute_BLEU(x) for x in ape_txts]\r\n ters = [compute_ter_with_shift(x) for x in ape_txts]\r\n max_bleu_index = int(np.argmax(bleus))\r\n min_ter_index = int(np.argmin(ters))\r\n for ape_txt, bleu, ter in zip(ape_txts, bleus, ters):\r\n print('TER: %f, BLEU: %f, file: %s' % (ter, bleu, ape_txt))\r\n print('\\n min TER: %f, file: %s' % (ters[min_ter_index], ape_txts[min_ter_index]))\r\n print('max BLEU: %f, file: %s' % (bleus[max_bleu_index], ape_txts[max_bleu_index]))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"wmt20/lib/find_best_model.py","file_name":"find_best_model.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"437293994","text":"\"\"\"\nPylint plugin: checks that feature toggles are properly annotated.\n\"\"\"\n\nimport re\n\nfrom pylint.checkers import BaseChecker, utils\nfrom pylint.interfaces import IAstroidChecker\n\nfrom .common import BASE_ID, check_visitors\n\n\ndef register_checkers(linter):\n \"\"\"\n Register checkers.\n \"\"\"\n linter.register_checker(FeatureToggleChecker(linter))\n\n\nclass AnnotationLines:\n \"\"\"\n AnnotationLines provides utility methods to work with a string in terms of\n lines. As an example, it can convert a Call node into a list of its contents\n separated by line breaks.\n \"\"\"\n\n # Regex searches for annotations like: # .. toggle or # .. documented_elsewhere\n _ANNOTATION_REGEX = re.compile(r\"[\\s]*#[\\s]*\\.\\.[\\s]*(toggle|documented_elsewhere)\")\n\n def __init__(self, module_node):\n \"\"\"\n Arguments:\n module_node: The visited module node.\n \"\"\"\n module_as_binary = module_node.stream().read()\n\n file_encoding = module_node.file_encoding\n if file_encoding is None:\n file_encoding = \"UTF-8\"\n\n module_as_string = module_as_binary.decode(file_encoding)\n self._list_of_string_lines = module_as_string.split(\"\\n\")\n\n def is_line_annotated(self, line_number):\n \"\"\"\n Checks if the provided line number is annotated.\n \"\"\"\n if line_number < 1 or self._line_count() < line_number:\n return False\n\n return bool(self._ANNOTATION_REGEX.match(self._get_line_contents(line_number)))\n\n def _line_count(self):\n \"\"\"\n Gets the number of lines in the string.\n \"\"\"\n return len(self._list_of_string_lines)\n\n def _get_line_contents(self, line_number):\n \"\"\"\n Gets the line of text designated by the provided line number.\n \"\"\"\n return self._list_of_string_lines[line_number - 1]\n\n\n@check_visitors\nclass FeatureToggleChecker(BaseChecker):\n \"\"\"\n Checks that feature toggles are properly annotated and best practices\n are followed.\n \"\"\"\n\n __implements__ = (IAstroidChecker,)\n\n name = \"feature-toggle-checker\"\n\n TOGGLE_NOT_ANNOTATED_MESSAGE_ID = \"feature-toggle-needs-doc\"\n ILLEGAL_WAFFLE_MESSAGE_ID = \"illegal-waffle-usage\"\n\n _CHECK_CAPITAL_REGEX = re.compile(r\"[A-Z]\")\n _WAFFLE_TOGGLE_CLASSES = (\"WaffleFlag\", \"WaffleSwitch\", \"CourseWaffleFlag\")\n _ILLEGAL_WAFFLE_FUNCTIONS = [\"flag_is_active\", \"switch_is_active\"]\n\n msgs = {\n (\"E%d40\" % BASE_ID): (\n u\"feature toggle (%s) is missing annotation\",\n TOGGLE_NOT_ANNOTATED_MESSAGE_ID,\n \"feature toggle is missing annotation\",\n ),\n (\"E%d41\" % BASE_ID): (\n u\"illegal waffle usage with (%s): use utility classes {}.\".format(\n \", \".join(_WAFFLE_TOGGLE_CLASSES)\n ),\n ILLEGAL_WAFFLE_MESSAGE_ID,\n u\"illegal waffle usage: use utility classes {}.\".format(\n \", \".join(_WAFFLE_TOGGLE_CLASSES)\n ),\n ),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._lines = None\n\n def visit_module(self, node):\n \"\"\"Parses the module code to provide access to comments.\"\"\"\n self._lines = AnnotationLines(node)\n\n def check_waffle_class_annotated(self, node):\n \"\"\"\n Check Call node for waffle class instantiation with missing annotations.\n \"\"\"\n if not hasattr(node.func, \"name\"):\n return\n\n # Looking for class instantiation, so should start with a capital letter\n starts_with_capital = self._CHECK_CAPITAL_REGEX.match(node.func.name)\n if not starts_with_capital:\n return\n\n # Search for toggle classes that require an annotation\n if not node.func.name.endswith(self._WAFFLE_TOGGLE_CLASSES):\n return\n\n if not self._lines.is_line_annotated(node.lineno - 1):\n feature_toggle_name = \"UNKNOWN\"\n\n if node.keywords is not None:\n for node_key in node.keywords:\n if node_key.arg == \"flag_name\":\n feature_toggle_name = node_key.value.value\n\n if feature_toggle_name == \"UNKNOWN\":\n if len(node.args) >= 2:\n feature_toggle_name = node.args[1].as_string()\n\n self.add_message(\n self.TOGGLE_NOT_ANNOTATED_MESSAGE_ID,\n args=(feature_toggle_name,),\n node=node,\n )\n\n def check_configuration_model_annotated(self, node):\n \"\"\"\n Checks class definitions to see if they subclass ConfigurationModel.\n If they do, they should be correctly annotated.\n \"\"\"\n if \"ConfigurationModel\" not in node.basenames:\n return\n if not self._lines.is_line_annotated(node.lineno - 1):\n config_model_subclass_name = node.name\n\n self.add_message(\n self.TOGGLE_NOT_ANNOTATED_MESSAGE_ID,\n args=(config_model_subclass_name,),\n node=node,\n )\n\n def check_django_feature_flag_annotated(self, node):\n \"\"\"\n Checks dictionary definitions to see if the django feature flags\n dict FEATURES is being set. If it is, entries should be\n correctly annotated.\n \"\"\"\n try:\n parent_target_name = node.parent.targets[0].name\n except AttributeError:\n return\n\n if parent_target_name == \"FEATURES\":\n for key, _ in node.items:\n if not self._lines.is_line_annotated(key.lineno - 1):\n django_feature_toggle_name = key.value\n\n self.add_message(\n self.TOGGLE_NOT_ANNOTATED_MESSAGE_ID,\n args=(django_feature_toggle_name,),\n node=node,\n )\n\n def check_illegal_waffle_usage(self, node):\n \"\"\"\n Check Call node for illegal waffle calls.\n \"\"\"\n if not hasattr(node.func, \"name\"):\n return\n\n if node.func.name in self._ILLEGAL_WAFFLE_FUNCTIONS:\n feature_toggle_name = \"UNKNOWN\"\n if len(node.args) >= 1:\n feature_toggle_name = node.args[0].as_string()\n\n self.add_message(\n self.ILLEGAL_WAFFLE_MESSAGE_ID, args=(feature_toggle_name,), node=node\n )\n\n @utils.check_messages(TOGGLE_NOT_ANNOTATED_MESSAGE_ID, ILLEGAL_WAFFLE_MESSAGE_ID)\n def visit_call(self, node):\n \"\"\"\n Performs various checks on Call nodes.\n \"\"\"\n self.check_waffle_class_annotated(node)\n self.check_illegal_waffle_usage(node)\n\n @utils.check_messages(TOGGLE_NOT_ANNOTATED_MESSAGE_ID)\n def visit_classdef(self, node):\n \"\"\"\n Checks class definitions for potential ConfigurationModel\n implementations.\n \"\"\"\n self.check_configuration_model_annotated(node)\n\n @utils.check_messages(TOGGLE_NOT_ANNOTATED_MESSAGE_ID)\n def visit_dict(self, node):\n \"\"\"\n Checks Dict nodes in case a Django FEATURES dictionary is being\n initialized.\n \"\"\"\n self.check_django_feature_flag_annotated(node)\n","sub_path":"edx_lint/pylint/feature_toggle_check.py","file_name":"feature_toggle_check.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"595540953","text":"import pymysql\r\nimport os,json\r\n\r\nfile = os.path.abspath('D:\\yelp-dataset\\yelp_academic_dataset_user.json')\r\njson_obj = []\r\nwith open(file, encoding='utf-8') as f:\r\n for line in f:\r\n json_obj.append(json.loads(line))\r\n\r\n\r\ndef val(value):\r\n if value is not None:\r\n if type(value) is int:\r\n return int(value)\r\n else:\r\n return value\r\n\r\n\r\ncon = pymysql.connect(host='localhost', user='root', passwd='Skywalker@1993', db='yelpDB')\r\ncursor = con.cursor()\r\n\r\nfor i, item in enumerate(json_obj):\r\n user_id = val(item.get(\"user_id\", None))\r\n user_name = val(item.get(\"name\", None))\r\n user_since = val(item.get(\"yelping_since\", None))\r\n\r\n cursor.execute('INSERT INTO Users(user_id,user_name,user_since) VALUES (%s, %s, %s)',\r\n (user_id, user_name, user_since))\r\n print(\"Done\", i)\r\ncon.commit()\r\ncon.close()","sub_path":"Yelp_Project/Users.py","file_name":"Users.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"578727922","text":"import h5py\nimport numpy as np\nimport sys\nsys.path.append(\"utils/\")\nsys.path.append(\"../\")\nfrom color_histograms_utils import feat_extract\nfrom scipy.stats import mode\n\nfrom project.metrics import *\n\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image\n\nwith h5py.File(\"./data/assorted_images/trucks_and_planes.h5\",\"r\") as hf:\n X_cifar = hf[\"data\"][...] / 255.\n y_cifar = hf[\"labels\"][...]\n\nwith h5py.File(\"./data/assorted_images/satellite_images.h5\",\"r\") as hf:\n X_satel = hf[\"data\"][...] / 255.\n y_satel = hf[\"labels\"][...]\n\nT = np.load(\"./data/features/transfer_features.npy\")\nuga_mean = T.mean(axis=0)\nuga_std = T.std(axis=0)\n\n# Train / Test split\nnp.random.seed(0)\nassign = np.random.permutation(len(X_satel))\nsatel_cutoff = int(0.8 * len(X_satel))\nsatel_train_assign = assign[:satel_cutoff]\nsatel_test_assign = assign[satel_cutoff:]\n\nnp.random.seed(0)\nassign = np.random.permutation(len(X_cifar))\ncifar_cutoff = int(0.7 * len(X_cifar))\ncifar_train_assign = assign[:cifar_cutoff]\ncifar_test_assign = assign[cifar_cutoff:]\n\ndef load_trucksplanes_labels():\n return y_cifar[cifar_train_assign][...,None]\n\ndef load_satellite_labels():\n return y_satel[satel_train_assign][...,None]\n\ndef load_satellite_labels_test():\n return y_satel[satel_test_assign][...,None]\n\ndef shuffle_data(H, y):\n np.random.seed(0)\n p = np.random.permutation(len(H))\n return H[p], y[p]\n\ndef extract_trucksplanes_histograms(bins, use_hsv):\n X = X_cifar[cifar_train_assign]\n\n H = []\n for i, img in enumerate(X):\n if (i + 1) % 500 == 0:\n print(\"Extracted {} of {} histograms\".format(i+1,len(X)))\n h = feat_extract(img, bins=bins, use_hsv=use_hsv)\n H.append( h )\n\n print(\"Extracted {} of {} histograms\".format(len(X),len(X)))\n print(\"Done!\")\n\n H = np.row_stack(H)\n H = np.column_stack([H,np.ones(len(H))])\n return H\n\ndef extract_satellite_histograms(bins, use_hsv):\n X = X_satel[cifar_train_assign]\n\n H = []\n for i, img in enumerate(X):\n if (i + 1) % (len(X)/10) == 0:\n print(\"Extracted {} of {} histograms\".format(i+1,len(X)))\n h = feat_extract(img, bins=bins, use_hsv=use_hsv)\n H.append( h )\n\n print(\"Extracted {} of {} histograms\".format(len(X),len(X)))\n print(\"Done!\")\n\n H = np.row_stack(H)\n H = np.column_stack([H,np.ones(len(H))])\n return H\n\n#def extract_imagenet_features():\n# H = np.load(\"./data/features/imagenet_features.npy\")\n# return H[satel_train_assign]\n#\n#def extract_nightlights_features():\n# H = np.load(\"./data/features/nightlights_features.npy\")\n# return H[satel_train_assign]\n#\n#def extract_survey_features():\n# H = np.load(\"./data/features/survey_features.npy\")\n# return H[satel_train_assign]\n\ndef extract_uganda_features():\n H = np.load(\"./data/features/transfer_features.npy\")\n H -= uga_mean\n H /= (uga_mean + 1e-8)\n H = np.column_stack([H,np.ones(len(H))])\n return H[satel_train_assign]\n\ndef extract_uganda_features_test():\n H = np.load(\"./data/features/transfer_features.npy\")\n H -= uga_mean\n H /= (uga_mean + 1e-8)\n H = np.column_stack([H,np.ones(len(H))])\n return H[satel_test_assign]\n\ndef check_training_progress(weights, epoch, epochs):\n if epoch in np.linspace(10,epochs + 10,10).astype(\"int32\"):\n print(\"Epoch: {}/{}\".format(epoch,epochs))\n exploded = np.isnan(np.sum(weights))\n if exploded:\n s = \"Your model exploded! \"\n s += \"Try a smaller learning or regularization rate\"\n print(s)\n return exploded\n\ndef compute_final_results(hyperparameters, models, data=None, labels=None, use_satellite= False):\n if use_satellite:\n X_t = extract_uganda_features()\n y_t = load_satellite_labels()\n X_v = extract_uganda_features_test()\n y_v = load_satellite_labels_test()\n else:\n assert (data is not None) and (labels is not None)\n cutoff = int(0.7 * len(data))\n X_t = data[:cutoff]\n y_t = labels[:cutoff]\n X_v = data[cutoff:]\n y_v = labels[cutoff:]\n\n num_features = X_t.shape[-1]\n print(\"Retraining model on ALL training data\")\n trained_models = []\n for model_class in models:\n model = model_class(\n num_features,\n hyperparameters[\"learning_rate\"],\n hyperparameters[\"regularization_rate\"],\n hyperparameters[\"batch_size\"],\n hyperparameters[\"epochs\"]\n )\n model.train(X_t,y_t)\n acc_train = compute_accuracy(model, X_t, y_t)\n acc_test = compute_accuracy(model, X_v, y_v)\n\n print(\"TRAINING RESULTS: \")\n compute_all_scores(model, X_t, y_t)\n\n print(\"TESTING RESULTS: \")\n compute_all_scores(model, X_v, y_v)\n\n trained_models.append(model)\n\n return trained_models\n\ndef get_locs():\n with open(\"./data/survey_data/uga_2011_locs.txt\",\"r\") as f:\n lines = f.readlines()\n lines = [list(map(float,(line.strip()).split(' '))) for line in lines]\n return np.array(lines)\n\ndef rotate(x, t):\n theta = np.radians(t)\n c, s = np.cos(theta), np.sin(theta)\n R = np.array(np.matrix('{} {}; {} {}'.format(c, -s, s, c)))\n return np.dot(x, R)\n\ndef change_range(x, newMin, newMax):\n #oldMax = x.max(); oldMin = x.min()\n oldMin = np.array([x[:,0].min(),x[:,1].min()])\n oldMax = np.array([x[:,0].max(),x[:,1].max()])\n\n oldRange = (oldMax - oldMin)\n newRange = (newMax - newMin)\n y = (((x - oldMin) * newRange) / oldRange) + newMin\n\n #y = newMax - y\n return y\n\ndef uganda_map(trained_models):\n k = 25\n f, axs = plt.subplots(1,len(trained_models)+1, figsize=(k, k/4))\n img = Image.open(\"./sample_images/ugout.gif\")\n r, c = np.array(img).shape\n LOCS = get_locs()\n LOCS = rotate(LOCS, 95)\n LOCS = change_range(LOCS, np.array([60.,60.]), np.array([500.,500.]))\n\n X_t = extract_uganda_features()\n X_v = extract_uganda_features_test()\n\n y_t = load_satellite_labels()\n y_v = load_satellite_labels_test()\n X = np.row_stack([X_t, X_v])\n y = np.concatenate([y_t, y_v])\n for ax in axs:\n ax.imshow(img)\n\n for i, model in enumerate(trained_models):\n i += 1\n pred = np.squeeze(model.predict(X))\n C = np.column_stack([pred, np.zeros_like(pred), np.zeros_like(pred)])\n axs[i].scatter(LOCS[:,0], LOCS[:,1], c= C)\n\n axs[i].set_title(model.name)\n axs[i].set_xticklabels([])\n axs[i].set_yticklabels([])\n\n C = np.column_stack([y, np.zeros_like(y), np.zeros_like(y)])\n axs[0].scatter(LOCS[:,0],LOCS[:,1],c= C)\n axs[0].set_title(\"Labels\")\n axs[0].set_xticklabels([])\n axs[0].set_yticklabels([])\n plt.show()\n\nif __name__ == \"__main__\":\n uganda_map([None, None, None])\n\n","sub_path":"solutions/utils/logistic_regression_utils.py","file_name":"logistic_regression_utils.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"476628401","text":"import requests\nimport pandas as pd\nimport openpyxl\n\n\nyear = list(range(2002, 2016, 2))\nsheets = ['%s US House & Senate Results'] % (year) # add corresponding sheet names\ndfs = []\nfor (year,sheet) in zip(years,sheets):\n xls ='https://transition.fec.gov/pubrec/fe'+ year + '/federalelections' + year + '.xls'\n resp = requests.get(xls)\n filename = 'file' + year + '.xls'\n output = open(filename,'wb')\n output.write(resp.content)\n output.close()\n\n df = pd.read_excel(filename,sheetname=sheet)\n df['YEAR'] = len(df) * [year]\n newfilename = 'new' + year + '.xlsx'\n df.to_excel(newfilename,index=False)\n\n # need to rename some columns for each year: use dictionary format\n # so when you concat the dataframes they match up correctly\n # currently columns from dif--ferent years that have the same context are not\n # concatenating as the same column\n dfs.append(df)\n\n\nmaster = pd.concat(dfs)\nmaster.to_excel('master_file.xlsx',index=False)\n","sub_path":"Ingestion/congressional_results.py","file_name":"congressional_results.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"216235352","text":"import numpy as np\nfrom sklearn.utils.validation import check_random_state\n\nfrom cartesian.algorithm import oneplus\nfrom cartesian.cgp import *\n\nrng = check_random_state(1337)\nprimitives = [\n Primitive(\"add\", np.add, 2),\n Primitive(\"mul\", np.multiply, 2),\n Symbol(\"x_0\"),\n Symbol(\"x_1\"),\n Ephemeral(\"erc\", rng.normal),\n]\npset = PrimitiveSet.create(primitives)\nx = rng.normal(size=(100, 2))\ny = x[:, 1] * x[:, 0] + 0.3\ny += 0.05 * rng.normal(size=y.shape)\n\n\ndef func(individual):\n f = compile(individual)\n yhat = f(*x.T)\n return np.sqrt(np.mean((y - yhat) ** 2)) / (y.max() - y.min())\n\n\nMyCartesian = Cartesian(\"MyCartesian\", pset, n_rows=2, n_columns=3, n_out=1, n_back=1)\nres = oneplus(func, cls=MyCartesian, f_tol=0.01, random_state=rng, maxfev=50000, n_jobs=1)\nprint(res)\n","sub_path":"examples/ephemeral_constant.py","file_name":"ephemeral_constant.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"438153611","text":"from graphics import *\nfrom dieview2 import DieView\nfrom button import Button\n\n\nclass GraphicsInterface:\n def __init__(self):\n self.win = GraphWin (\"Craps Game\", 600, 400)\n self.win.setBackground('green3')\n\n banner = Text(Point(300,30), \"Craps Table\")\n banner.setSize(24)\n banner.setFill(\"yellow2\")\n banner.setStyle(\"bold\")\n banner.draw(self.win)\n\n self.createDice(Point(300, 100), 75)\n\n self.msg = Text(Point(300,275), \"Welcome to the Craps Table.\")\n self.msg.setSize(18)\n self.msg.draw(self.win)\n self.msg1 = Text(Point(240,275), \"\")\n self.msg1.setSize(18)\n self.msg1.draw(self.win)\n moneyMsg = Text(Point(280, 375), \"You have:\")\n moneyMsg.setSize(18)\n moneyMsg.draw(self.win)\n self.money = Text(Point(360, 375), \"$100\")\n self.money.setSize(18)\n self.money.draw(self.win)\n betText = Text(Point(320, 100), 'Place a Bet: $')\n betText.setSize(16)\n betText.draw(self.win)\n self.input = Entry(Point(460, 100), 20)\n self.input.draw(self.win)\n\n self.rollDiceButton = Button(self.win, Point(125, 200), 200, 40, \"Roll Dice\")\n self.rollDiceButton.activate()\n self.quitButton = Button(self.win, Point(570, 375), 40, 30, \"Quit\")\n self.quitButton.activate()\n self.helpButton = Button(self.win, Point(35, 375), 40, 30, \"Help\")\n self.helpButton.activate()\n\n def createDice(self, center, size):\n center.move(-3*size, 0)\n self.dice = []\n for i in range(2):\n view = DieView(self.win, center, size)\n self.dice.append(view)\n center.move(1.5*size, 0)\n\n def setDice(self, values):\n for i in range(2):\n self.dice[i].setValue(values[i])\n\n def getBetAmount(self):\n return self.input.getText()\n\n def showResult(self, result):\n if result == 'win':\n text = 'You won!'\n else:\n text = 'Sorry, you lost.'\n self.msg.setText(text)\n\n def wantToPlay(self):\n ans = self.choose()\n self.msg.setText(\"\")\n self.msg1.setText(\"\")\n return ans\n\n def setMoney(self, amt):\n self.money.setText(\"${0}\".format(amt))\n\n def choose(self):\n while True:\n p = self.win.getMouse()\n if self.rollDiceButton.clicked(p):\n return self.rollDiceButton.getLabel()\n elif self.helpButton.clicked(p):\n return self.helpButton.getLabel()\n else:\n return self.quitButton.getLabel()\n\n def help(self):\n helpWin = GraphWin(\"Help\", 600, 400)\n helpWin.setBackground(\"blue1\")\n\n banner = Text(Point(275, 75), \"The Game of Craps:\")\n banner.setSize(24)\n banner.setStyle(\"bold\")\n banner.draw(helpWin)\n\n explanation = [Text(Point(305, 140), \"Both dice are always rolled at the same time.\"),\n Text(Point(305, 180), \"A player places a bet, and then rolls the dice.\"),\n Text(Point(300, 220), \"A 7 or 11 on the first roll is a win, while a 2, 3, or 12 is a loss.\"),\n Text(Point(305, 260), \"Otherwise, the player keeps rolling until:\"),\n Text(Point(305, 300), \"They re-roll their initial roll (this is a win)\"),\n Text(Point(300, 340), \"Or\"),\n Text(Point(300, 380), \"They roll a 7 (this is a loss)\")]\n for sentence in explanation:\n sentence.draw(helpWin)\n sentence.setSize(16)\n \n closeButton = Button(helpWin, Point(65, 30), 110, 40, \"Close Window\")\n closeButton.activate()\n p = helpWin.getMouse()\n if closeButton.clicked(p):\n helpWin.close()\n\n def close(self):\n self.win.close()\n\n def getMouse(self):\n return self.win.getMouse()\n","sub_path":"CH12/HW/barry/graphicsInterface.py","file_name":"graphicsInterface.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"635151520","text":"from django.urls import path, include\n\nfrom . import views\n\n\nurlpatterns = [\n\tpath('', views.BookingPageView.as_view(number=False), name = 'success'),\t\n\tpath('check_in', views.check_in),\n\tpath('clientsList', views.clientsView.as_view()),\n\tpath('clientsList_all', views.clients_allView.as_view()),\n\tpath('success_client', views.thanks),\n]","sub_path":"booking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"256837541","text":"from kazoo.client import KazooClient, KazooState\n\nimport sys\nimport operator\nimport time\nimport random\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\n\nfrom config.config import *\nfrom . import mymysql\nfrom . import fenci\nimport logging\nsetup_logging()\nlogger = logging.getLogger(\"root\")\n\nclass TrainCBox(object):\n def __init__(self, type, name):\n self._Type = type\n self._Name = \"%s%s\" % (type, str(name))\n self._VName = HOST['name']\n self._Path = \"/%s/%s/%s\" % (self._Type, self._VName, self._Name)\n self._ZKHost = ZKSERVERS['hosts']\n self._IP = HOST['ip']\n self._Port = HOST['port']\n self._ID = int(name)\n self._ZK = None\n self._Chatbot = None\n self._Current_mina_master = ''\n self._Master_path = '/MinAMaster/%s' % self._VName\n self._MonitorRunning = False\n self._MinARunning = False\n self._Conn = None\n logger.debug('create a %s box named %s in VM %s.' % (self._Type, self._Name, self._VName))\n\n def connectZK(self):\n self._ZK = KazooClient(hosts=self._ZKHost)\n logger.info('%s is connecting ZK server.' % self._Path)\n\n def getType(self):\n return self._Type\n\n def getName(self):\n return self._Name\n\n def getVName(self):\n return self._VName\n\n def setZK(self,zk):\n self._ZK = zk\n\n def getZK(self):\n return self._ZK\n\n def startZK(self):\n self._ZK.start()\n logger.debug('start one connection with ZK server by a %s box named %s in VM %s' % (self._Type, self._Name, self._VName))\n\n def stopZK(self):\n self._ZK.stop()\n logger.debug('stop connection with ZK server by a %s box named %s in VM %s' % (self._Type, self._Name, self._VName))\n\n def addZKListener(self):\n def my_listenser(state):\n if state == KazooState.LOST:\n self.startZK()\n elif state == KazooState.SUSPENDED:\n self.connectZK()\n self.startZK()\n else:\n self.startMonitor()\n self._ZK.add_listener(my_listenser)\n\n def startMonitor(self):\n #self._MonitorRunning = True\n # scribe Node Data Changes\n @self._ZK.DataWatch(self._Path)\n def watch_node_data_change(data, stat, path):\n #parse data\n #update the corresponding training status of train-task-sheet in MySQL database\n #you can invoke other class to complete this task above\n #then you also continue to instance a training chatterbot object for doing the key train task really\n\n if data:\n temp_str = eval(data.decode(\"utf-8\"))['Target']\n if self._Conn is None:\n self._Conn = mymysql.myconnect(KBDATABASES)\n param = (temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 0)\n temp_status = mymysql.myselectstatus(self._Conn, param)\n # logger.error(temp_status)\n if operator.ne(temp_str, 'Null') and temp_status[0] == 3:\n logger.info('Watch one node %s with data %s is not Null' %(self._Path, temp_str))\n param = (1, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 3)\n paramkg = (1, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 0)\n logger.debug('untrained kb %s is ready to train in mysql' % temp_str.split(\"_\")[0])\n if mymysql.myupdate(self._Conn, param):\n logger.info('untrained kb %s in mysql turns from status 3 to 1.' % temp_str.split(\"_\")[0])\n mymysql.myupdatekg(self._Conn, paramkg)\n if self.trainkb(temp_str):\n param = (2, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 1)\n if mymysql.myupdate(self._Conn, param):\n logger.info('success: trained kb %s in mysql turns from status 1 to 2.' % temp_str.split(\"_\")[0])\n mymysql.myupdatekg(self._Conn, param)\n else:\n param = (0, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 1)\n if mymysql.myupdate(self._Conn, param):\n logger.info('failure: untrained kb %s in mysql turns from status 1 to 0.' % temp_str.split(\"_\")[0])\n mymysql.myupdatekg(self._Conn, param)\n\n self.deleteZKnodedata()\n mymysql.myclose(self._Conn)\n self._Conn = None\n\n def addtraintaskMonitor(self):\n # monitor Node Data Changes\n if self._ZK.exists(self._Path):\n data, _ = self._ZK.get(self._Path)\n logger.debug('Monitor one A Box node with data: %s, path: %s' % (data.decode(\"utf-8\"), self._Path))\n if data:\n temp_str = eval(data.decode(\"utf-8\"))['Target']\n if self._Conn is None:\n self._Conn = mymysql.myconnect(KBDATABASES)\n param = (temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 0)\n temp_status = mymysql.myselectstatus(self._Conn, param)\n if operator.ne(temp_str, 'Null') and temp_status[0] == 3:\n logger.debug('Watch one node %s with data %s is not Null' % (self._Path, temp_str))\n param = (1, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 3)\n paramkg = (1, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 0)\n logger.info('untrained kb %s is ready to train in mysql' % temp_str.split(\"_\")[0])\n if mymysql.myupdate(self._Conn, param):\n logger.info('untrained kb %s in mysql turns from status 3 to 1.' % temp_str.split(\"_\")[0])\n mymysql.myupdatekg(self._Conn, paramkg)\n if self.trainkb(temp_str):\n param = (2, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 1)\n if mymysql.myupdate(self._Conn, param):\n logger.info('success: from status 1 to 2.' )\n mymysql.myupdatekg(self._Conn, param)\n else:\n param = (0, temp_str.split(\"_\")[0], temp_str.split(\"_\")[-1], 1)\n if mymysql.myupdate(self._Conn, param):\n logger.info('failure: from status 1 to 0.')\n mymysql.myupdatekg(self._Conn, param)\n self.deleteZKnodedata()\n mymysql.myclose(self._Conn)\n self._Conn = None\n else:\n self.InitialABOXNode()\n\n def addMinAMasterMonitor(self):\n #self._MonitorRunning = True\n # scribe Master Node Data Removed\n if self._ZK.exists(self._Master_path) is None:\n self._ZK.create(self._Master_path, None, None, ephemeral=False, sequence=False, makepath=True)\n else:\n self._ZK.set(self._Master_path, None)\n @self._ZK.DataWatch(self._Master_path)\n def watch_node_data_removed(data, stat, path):\n if data is None:# show this is remove event\n self._Current_mina_master = ''\n self.searchIdleAwithId()\n\n def searchIdleAwithId(self):\n # List all children of A type BOX under the condition of zk connection\n children = self._ZK.get_children(\"/%s/%s\" % (self._Type, self._VName))\n if children is not None:\n temp_mina = sys.maxsize\n logger.debug(\"search idle A box %d with names %s\" % (len(children), children))\n random.shuffle(children)\n for child in children:\n child_path = \"/%s/%s/%s\" % (self._Type, self._VName, child)\n data, stat = self._ZK.get(child_path)\n if eval(data.decode(\"utf-8\"))['Target'] == 'Null':\n temp = child.split(\"A\")[-1]\n if temp_mina > int(temp):\n temp_mina = int(temp)\n logger.debug(\"mina=%d, id = %d\"%(temp_mina, self._ID))\n if temp_mina == self._ID:\n if self.takeMinAMaster():\n logger.info(\"MinA = %d, Node = %s\" % (temp_mina, self._Name))\n self.processTrainTaskAssign()\n\n def searchIdleAboxassign(self):\n box_list = self._ZK.get_children(\"/%s/%s\" % (self._Type, self._VName))\n idle_box = []\n random.shuffle(box_list)\n for box in box_list:\n node = \"/%s/%s/%s\" % (self._Type, self._VName, box)\n data, _ = self._ZK.get(node)\n if eval(data.decode(\"utf-8\"))['Target'] == 'Null':\n idle_box.append(box)\n logger.debug('idle A box is %s.' % box)\n logger.info(idle_box)\n if idle_box:\n self.processtraintask(idle_box)\n\n def processtraintask(self, box):\n while True:\n datas = self.startMonitorMySQL()\n if len(datas):\n for aboxdata in datas:\n oneabox = box[0]\n param = (3, aboxdata.split(\"_\")[0], aboxdata.split(\"_\")[-1], 0)\n logger.debug('untrained kb %s is ready for assigned in a A idle box called %s' % (aboxdata.split(\"_\")[0], oneabox))\n if mymysql.myupdate(self._Conn, param):\n label = self.assignOneTrainTasktoABox(oneabox, aboxdata)\n if not label:\n param = (0, aboxdata.split(\"_\")[0], aboxdata.split(\"_\")[-1], 3)\n mymysql.myupdate(self._Conn, param)\n logger.info('failure for update training mission %s. so from status 3 to 0.' % aboxdata.split(\"_\")[0])\n else:\n box.pop(0)\n logger.info('success for update training mission %s from status 0 to 3.' % aboxdata.split(\"_\")[0])\n\n if not box:\n mymysql.myclose(self._Conn)\n self._Conn = None\n logger.debug('delete ZK node %s' % self._Master_path)\n return True\n time.sleep(10)\n\n def takeMinAMaster(self):\n if self._Type == 'A':\n address = \"{\\\"Name\\\":\\\"%s\\\"}\" % self._Name\n address = address.encode('utf-8')\n try:\n self._ZK.create(self._Master_path, address, None, ephemeral=False, sequence=False, makepath=True)\n except self._ZK.NodeExistsError:\n data, stat = self._ZK.get(self._Master_path)\n if data:\n self._Current_mina_master = eval(data.decode(\"utf-8\"))['Name']\n logger.debug('current mina is %s' % self._Current_mina_master)\n return False\n #break\n finally:\n self._Current_mina_master = self._Name\n logger.debug('current mina is itself i.e %s.' % self._Name)\n return True\n\n def processTrainTaskAssign(self):\n while True:\n datas = self.startMonitorMySQL()\n if len(datas):\n for aboxdata in datas:\n oneabox = self.findOneIdleABox()\n if oneabox == '':\n param = (3, aboxdata.split(\"_\")[0], aboxdata.split(\"_\")[-1], 0)\n logger.info('untrained kb %s is ready for assigned in min A box called %s' % (aboxdata.split(\"_\")[0], self._Name))\n if mymysql.myupdate(self._Conn, param):\n logger.info('success for update training mission %s from status 0 to 3.' % aboxdata.split(\"_\")[0])\n if self.assignOneTrainTasktoABox(self._Name, aboxdata):\n mymysql.myclose(self._Conn)\n self._Conn = None\n self._ZK.delete(self._Master_path, recursive=True)\n logger.debug('delete ZK node %s' % self._Master_path)\n return True\n else:\n param = (0, aboxdata.split(\"_\")[0], aboxdata.split(\"_\")[-1], 3)\n mymysql.myupdate(self._Conn, param)\n logger.info('failure for update training mission %s. so from status 3 to 0.' % aboxdata.split(\"_\")[0])\n else:\n param = (3, aboxdata.split(\"_\")[0], aboxdata.split(\"_\")[-1], 0)\n logger.info('untrained kb %s is ready for assigned in a A idle box called %s' % (aboxdata.split(\"_\")[0], oneabox))\n if mymysql.myupdate(self._Conn, param):\n logger.info('success for update training mission %s from status 0 to 3.' % aboxdata.split(\"_\")[0])\n label = self.assignOneTrainTasktoABox(oneabox, aboxdata)\n if not label:\n param = (0, aboxdata.split(\"_\")[0], aboxdata.split(\"_\")[-1], 3)\n mymysql.myupdate(self._Conn, param)\n logger.info('failure for update training mission %s. so from status 3 to 0.' % aboxdata.split(\"_\")[0])\n time.sleep(10)\n\n def findOneIdleABox(self):\n oneidleabox = ''\n children = self._ZK.get_children(\"/%s/%s\" % (self._Type, self._VName))\n logger.debug(\"find one idle A box There are %s children with names %s\" % (len(children), children))\n random.shuffle(children)\n for child in children:\n child_path = \"/%s/%s/%s\" % (self._Type, self._VName, child)\n data, stat = self._ZK.get(child_path)\n if eval(data.decode(\"utf-8\"))['Target'] == 'Null':\n if operator.ne(child, self._Name):\n oneidleabox = child\n logger.debug('one idle A box is %s.' % oneidleabox)\n break\n return oneidleabox\n\n def assignOneTrainTasktoABox(self, oneabox, aboxdata):\n oneabox_path = \"/%s/%s/%s\" % (self._Type, self._VName, oneabox)\n oneabox_address = \"{\\\"Target\\\":\\\"%s\\\"}\" % aboxdata\n oneabox_address = oneabox_address.encode('utf-8')\n data, stat = self._ZK.get(oneabox_path)\n if eval(data.decode(\"utf-8\"))['Target'] == 'Null':\n self._ZK.set(oneabox_path, oneabox_address)\n logger.info('assign kb %s to A box %s' % (aboxdata, oneabox))\n return True\n else:\n logger.error('Error:A box %s is busy and can not be assigned.' % oneabox)\n return False\n\n def InitialMinANode(self):\n if self._ZK.exists(self._Master_path):\n self._ZK.delete(self._Master_path, recursive=True)\n logger.info('ZK node %s is deleted.' % self._Master_path)\n\n def InitialABOXNode(self):\n address = \"{\\\"Target\\\":\\\"Null\\\"}\"\n address = address.encode('utf-8')\n if self._ZK.exists(self._Path):\n self._ZK.delete(self._Path, recursive=True)\n self._ZK.create(self._Path, address, None, ephemeral=False, sequence=False, makepath=True)\n logger.info('create a A box node: %s, data: %s' % (self._Path, address.decode(\"utf-8\")))\n\n def InitialBBOXNode(self):\n address = \"{\\\"Target\\\":\\\"Null\\\",\\\"Add\\\":\\\"%s:%s/%s\\\",\\\"status\\\":\\\"0\\\",\\\"update_time\\\":\\\"%f\\\"}\" % (self._IP, self._Port,self._ID,time.time())\n address = address.encode('utf-8')\n if self._ZK.exists(self._Path):\n self._ZK.delete(self._Path, recursive=True)\n self._ZK.create(self._Path, address, None, ephemeral=False, sequence=False, makepath=True)\n logger.info('create a B box node: %s, data: %s' % (self._Path, address.decode(\"utf-8\")))\n vmknode = \"/%s/%s\" % (HOST['name'], CBOX['Bk'])\n if self._ZK.exists(vmknode):\n self._ZK.delete(vmknode, recursive=True)\n self._ZK.create(vmknode, None, None, ephemeral=False, sequence=False, makepath=True)\n logger.info('create a VM/k node: %s, not data. ' % vmknode)\n\n def startMonitorMySQL(self):\n self._Conn = mymysql.myconnect(KBDATABASES)\n train_tasks = []\n param = (0, 0)\n selectresult = mymysql.myselect(self._Conn, param)\n for row in selectresult:\n if str(row[1]).isdigit():\n train_tasks.append(\"%s_%s\" % (row[0], row[1]))\n logger.info('current train tasks is %s from mysql.' % train_tasks)\n return train_tasks\n\n def updateselfZKBBox(self, status):\n oneabox_path = self._Path\n data, _ = self._ZK.get(oneabox_path)\n oneabox_address = \"{\\\"Target\\\":\\\"%s\\\",\\\"Add\\\":\\\"%s\\\",\\\"status\\\":\\\"%s\\\",\\\"update_time\\\":\\\"%f\\\"}\" % (eval(data.decode(\"utf-8\"))['Target'], eval(data.decode(\"utf-8\"))['Add'],str(status),time.time())\n oneabox_address = oneabox_address.encode('utf-8')\n self._ZK.set(oneabox_path, oneabox_address)\n logger.info('success update B Box node %s with data %s.' % (oneabox_path, oneabox_address))\n\n def updateselfZKBBoxTarget(self, target, status):\n # updating 'oneabox' B tpye node data in ZK Server\n oneabox_path = self._Path\n data, _ = self._ZK.get(oneabox_path)\n oneabox_address = \"{\\\"Target\\\":\\\"%s\\\",\\\"Add\\\":\\\"%s\\\",\\\"status\\\":\\\"%s\\\",\\\"update_time\\\":\\\"%f\\\"}\" % (target, eval(data.decode(\"utf-8\"))['Add'],str(status),time.time())\n oneabox_address = oneabox_address.encode('utf-8')\n self._ZK.set(oneabox_path, oneabox_address)\n logger.info('success update B')\n\n def stop(self):\n self._MonitorRunning = False\n self._MinARunning = False\n self.stopZK()\n\n def initcbot(self, kbname, onlyread=False):\n try:\n self._Chatbot = ChatBot(self._Name,\n storage_adapter=CHATTERBOT['storage_adapter'],\n filters=['chatterbot.filters.RepetitiveResponseFilter'],\n database_uri=KGDATABASES['database_uri'],\n database='ai_%s' % kbname,\n read_only=onlyread,)\n # logger.info(self._Chatbot)\n except Exception as msg:\n logger.info('Failure to initialize Chatterbot.', exc_info=True)\n logger.error(msg)\n\n def preprocess(self, sentence, companyid=None):\n if ISFENCI:\n if ISSYMS:\n return fenci.symp_sentence(sentence, companyid)\n else:\n return sentence\n else:\n return sentence\n\n def trainkb(self, kbname):\n self.initcbot(kbname)\n try:\n logger.info(\"start set trainer\")\n self._Chatbot.set_trainer(ListTrainer)\n except Exception as msg:\n logger.error(msg)\n logger.info(\"start set trainer\")\n a = 0\n param = (kbname.split(\"_\")[0])\n selectresult = mymysql.myselectqas(self._Conn, param)\n company_id = mymysql.myselectcpid(self._Conn, param)\n logger.debug('start training the knowdata: %s and the companyid: %s.' % (kbname, company_id[0]))\n b = len(selectresult)\n for row in selectresult:\n answer = \"%s@%s\" % (row[2], row[0])\n question = self.preprocess(row[1], company_id[0])\n self._Chatbot.train([question,answer])\n logger.debug('Train: %d, %s --> %s.' % (a,question,answer))\n a = a + 1\n\n if a >= b:\n logger.info('success training.')\n return True\n else:\n logger.info('failure training.')\n return False\n\n def deleteZKnodedata(self):\n #oneabox_path = \"/%s/%s/%s\" % (self._Type, self._VName, self._Name)\n oneabox_path = self._Path\n oneabox_address = \"{\\\"Target\\\":\\\"Null\\\"}\"\n oneabox_address = oneabox_address.encode('utf-8')\n self._ZK.set(oneabox_path, oneabox_address)\n logger.info('A box %s turns busy into idle.' % self._Path)\n\n def startBZKmonitor(self):\n children = self._ZK.get_children(\"/%s/%s\" % (self._Type, self._VName))\n #list(map(lambda child: self._ZK.get(child_path), children))\n for child in children:\n child_path = \"/%s/%s/%s\" % (self._Type, self._VName, child)\n data, stat = self._ZK.get(child_path)\n # if eval(data.decode(\"utf-8\"))['status'] == str(0):\n kbid = eval(data.decode(\"utf-8\"))['Target']\n if operator.ne(kbid,'Null'):\n if time.time() > float(eval(data.decode(\"utf-8\"))['update_time']) + TIMERHOURS:\n onebbox_address = \"{\\\"Target\\\":\\\"Null\\\",\\\"Add\\\":\\\"%s\\\",\\\"status\\\":\\\"0\\\",\\\"update_time\\\":\\\"%f\\\"}\" % (eval(data.decode(\"utf-8\"))['Add'], time.time())\n onebbox_address = onebbox_address.encode('utf-8')\n self._ZK.set(child_path, onebbox_address)\n logger.info('set B Box node %s is null and status 0 because of timeout a half hour.' % child_path)\n tmp_node = \"/%s/%s/%s/%s\" % (self._VName, CBOX['Bk'], kbid, child)\n if self._ZK.exists(tmp_node):\n transaction = self._ZK.transaction()\n transaction.delete(tmp_node)\n transaction.commit()\n logger.info('delete a VM/k/kb/Box node %s.' % tmp_node)","sub_path":"common/traincbox.py","file_name":"traincbox.py","file_ext":"py","file_size_in_byte":21440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"141412594","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n# from PyQt5.QtGui import\n\nPADDING = QtCore.QMargins(10,10,110,20)\nBACK_PADDING = QtCore.QMargins(0,0,100,10)\nFONT=QtGui.QFont('Arial',10)\nUSER_TEXT=QtGui.QColor('white')\nUSER_BACK=QtGui.QColor('#217eff')\nBOT_TEXT=QtGui.QColor('black')\nBOT_BACK=QtGui.QColor('#c8d0e3')\n\nclass MessageDelegate(QtWidgets.QStyledItemDelegate): \n \"\"\"\n -----------------------------------------------------------------------------------\n Lớp MessageDelegate:\n \n - Kế thừa từ lớp QStyledItemDelegate, tham khảo tại: https://doc.qt.io/qt-5/qstyleditemdelegate.html \n - QStyledItemDelegate là lớp trừu tượng được thư viện pyqt5 cung cấp dùng để hiển thị đối tượng trong model dữ liệu\n Cần implement lại cụ thể để phủ hợp với mục đích sử dụng\n -----------------------------------------------------------------------------------\n \"\"\"\n\n def paint(self, painter, option, index):\n sender_name, Text=index.model().data(index, QtCore.Qt.DisplayRole)\n painter.setFont(QtGui.QFont(FONT))\n painter.setPen(QtCore.Qt.NoPen)\n if sender_name=='user':\n option.displayAlignment = QtCore.Qt.AlignRight\n if sender_name=='user':\n painter.setBrush(USER_BACK)\n elif sender_name=='bot':\n painter.setBrush(BOT_BACK)\n textrect=option.rect.marginsRemoved(PADDING)\n backrect=option.rect.marginsRemoved(BACK_PADDING)\n painter.drawRect(backrect)\n if sender_name=='user':\n painter.setPen(USER_TEXT)\n elif sender_name=='bot':\n painter.setPen(BOT_TEXT)\n painter.drawText(textrect,QtCore.Qt.TextWordWrap,Text,)\n def sizeHint(self, option, index):\n _, text = index.model().data(index, QtCore.Qt.DisplayRole)\n metrics = QtGui.QFontMetrics(FONT)\n rect = option.rect\n rect = metrics.boundingRect(rect, QtCore.Qt.TextWordWrap, text)\n rect = rect.marginsAdded(PADDING)\n return rect.size()\n\n\n##################################################################\n\nclass MessageModel(QtCore.QAbstractListModel):\n \"\"\"\n -----------------------------------------------------------------------------------\n Lớp MessageDelegate:\n \n - Kế thừa từ lớp QAbstractListModel, tham khảo tại: https://doc.qt.io/qt-5/qabstractlistmodel.html\n - QAbstractListModel là lớp trừu tượng được thư viện pyqt5 cung cấp dùng để lưu và xử lí dữ liệu của View\n Cần implement lại cụ thể để phủ hợp với mục đích sử dụng\n -----------------------------------------------------------------------------------\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MessageModel, self).__init__(*args, **kwargs)\n self.Messages=[]\n def data(self, index, role):\n if (role == QtCore.Qt.DisplayRole):\n return self.Messages[index.row()]\n def rowCount(self, index):\n return len(self.Messages)\n def addMessage(self, sender_name, message):\n if message:\n self.Messages.append((sender_name, message))\n self.layoutChanged.emit()\n def count(self):\n return len(self.Messages)\n ","sub_path":"Interface/MessageBubble.py","file_name":"MessageBubble.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"130490677","text":"import logging\nimport sys\n\nfrom database import get_collections, insert_document, get_documents\nimport util\n\n# get a new logging utility for current module\n_logger = logging.getLogger(__name__)\n\n# main index logic\n# this function will be executed when user type \"$ carrers\" without any arguments\n\n\ndef run():\n print(\"principal functionality\")\n\n# executed when the given action does not exist\n\n\ndef not_found(options):\n print(\"La acción ingresada no existe. Por favor revisa la ayuda (-h --help) para ver las acciones disponibles.\")\n\n# View Information\n# this function will list documents depending of the option value\n# e.g. Input: $ consultor view courses\n# output: A list of every courses\ndef view(options):\n _logger.debug(\".view.start\")\n\n # reference to local actions (functions)\n local_actions = sys.modules[__name__]\n\n _logger.debug(\"local_actions: {}\".format(local_actions))\n\n if options.option is None:\n # execute view default function\n return view_default(options)\n\n view_action_name = \"view_{}\".format(options.option)\n\n _logger.debug(\"view_action_name = {}\".format(view_action_name))\n\n view_action = getattr(local_actions, view_action_name, view_default)\n\n _logger.debug(\"view_action {}\".format(view_action))\n\n # execute View Option Action function\n return view_action(options)\n\n# view default action\ndef view_default(options):\n print(\"default view option action\")\n\n# view colleges\ndef view_colleges(options):\n _logger.debug(\".view_colleges.start\")\n\n colleges = get_colleges()\n\n display_colleges(colleges)\n\n _logger.debug(\".view_colleges.end\")\n\n# view courses\ndef view_courses(options):\n _logger.debug(\".view_colleges.start\")\n\n courses = get_courses()\n\n _logger.debug(\"courses:\\n {}\".format(courses))\n\n display_courses(courses)\n\n _logger.debug(\".view_colleges.end\")\n\n# view careers\ndef view_careers(options):\n _logger.debug(\".view_consultor.start\")\n\n careers = get_careers()\n\n display_careers(careers)\n\n _logger.debug(\".view_consultor.end\")\n\ndef get_careers(filters = None):\n return get_documents(\"ctr_careers\", filters)\n\ndef get_colleges(filters = None):\n _logger.debug(\".search.colleges filters: {}\".format(filters))\n return get_documents(\"ctr_colleges\", filters)\n\ndef get_courses(filters = None):\n return get_documents(\"ctr_courses\", filters)\n\n# display careers in the shell\ndef display_careers(careers):\n # configuring table\n table_headers = [\"ID\", \"Nombre\", \"Descripción\"]\n table_rows = list(\n map(lambda course: [course[\"id_career\"],\n course[\"name\"], course[\"description\"]], careers)\n )\n\n # show info\n util.show_title(\"Resultado de Carreras\")\n util.show_table(table_rows, table_headers)\n\n# display courses in the shell\ndef display_courses(courses):\n # configuring table\n table_headers = [\"ID\", \"Nombre\", \"Descripción\"]\n table_rows = list(\n map(lambda course: [course[\"id_course\"],\n course[\"name\"], course[\"description\"]], courses)\n )\n\n # show info\n util.show_title(\"Resultado de Cursos\")\n util.show_table(table_rows, table_headers)\n\n# display colleges in the shell\ndef display_colleges(colleges):\n # configure table\n table_headers = [\"ID\", \"Nombre\", \"Descripción\"]\n table_rows = list(\n map(lambda college: [college[\"id_college\"], college[\"name\"], college[\"description\"]], colleges))\n\n # show info\n util.show_title(\"Resultado de Universidades\")\n util.show_table(table_rows, table_headers)\n\n# search in all collections\ndef search(options):\n _logger.debug(\".search.start\")\n\n # reference to local actions (functions)\n local_actions = sys.modules[__name__]\n\n _logger.debug(\"local_actions: {}\".format(local_actions))\n\n if options.option is None:\n # execute view default function\n print('Por favor ingresa una opción para realizar la busqueda. ejemplo: $ consultor buscar \"matematica\"')\n return\n\n _logger.debug(\".search.filter: {}\".format(options.option))\n\n filters = {\n \"$or\": [\n {\n \"name\": {\n \"$regex\": options.option,\n \"$options\": \"i\"\n },\n },\n {\n \"description\": {\n \"$regex\": options.option,\n \"$options\": \"i\"\n }\n }\n ]\n }\n\n colleges = get_colleges(filters)\n _logger.debug('.search.colleges')\n\n courses = get_courses(filters)\n _logger.debug('.search.courses')\n\n careers = get_careers(filters)\n _logger.debug('.search.careers')\n\n\n # display results\n display_colleges(colleges)\n display_careers(careers)\n display_courses(courses)\n","sub_path":"src/actions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"516534600","text":"from django.contrib import admin\n\nfrom .models import ActivityStream\n\n\n@admin.register(ActivityStream)\nclass ActivityStreamAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'actor',\n 'actor_id',\n 'verb',\n 'object',\n 'object_id',\n 'target',\n 'target_id',\n 'deleted',\n 'exchange',\n 'routing_key',\n 'ack',\n 'created_at',\n 'updated_at',\n )\n list_filter = ('created_at', 'updated_at')\n date_hierarchy = 'created_at'\n","sub_path":"apps/activitystream/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"434917098","text":"from cmd import Cmd\nfrom hetio_graph import HetioGraph\nfrom hetio_mongo import HetioMongo\n\n'''\n@class Terminal\n\nDriver class for the Hetio Database. If you want to use different files to run\ndifferent test cases change the them in the __init__ method.\n'''\nclass Terminal(Cmd):\n \n def __init__(self):\n super().__init__()\n self.prompt = 'HetioDB> '\n self.is_initialized = False\n self.mongo_db = HetioMongo('nodes.tsv', 'edges.tsv')\n self.graph_db = HetioGraph(password='Temp1234$')\n print('Welcome to the Hetio Database type \\'init\\' to begin')\n\n def do_init(self, args):\n if self.is_initialized:\n print('Hetio is already initialized')\n else: \n self.mongo_db.initialize_mongo()\n self.graph_db.initialize_graph()\n print('The Hetio Database is initialized')\n self.is_initialized = True\n self.help()\n return\n \n def do_discover(self, args):\n if self.is_initialized:\n self.graph_db.discover_new_treatments() \n else:\n print('The HetioDB is not initialized') \n\n def do_id(self, args):\n if self.is_initialized:\n self.mongo_db.find_disease('id', args)\n else: \n print('The HetioDB is not initialized')\n\n def do_name(self, args):\n if self.is_initialized:\n self.mongo_db.find_disease('name', args)\n else: \n print('The HetioDB is not initialized')\n\n def default(self, args):\n if args is not 'init' and not self.is_initialized:\n print('The HetioDB is not initialized')\n print('Enter \\'init\\' ')\n else:\n print('Enter \\'discover\\' to find all the hidden treatmnents')\n print('Enter \\'id\\' to find all the information about a disease')\n print('Enter \\'name\\' to find all the information about a disease')\n \n def do_exit(self, exit): \n print('Goodbye!')\n return True\n\n def emptyline(self):\n return \n\n def help(self):\n print('Enter \\'discover\\' to find all the hidden treatmnents')\n print('Enter \\'id\\' to find all the information about a disease')\n print('Enter \\'name\\' to find all the information about a disease')\n\nterm = Terminal()\nterm.cmdloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"50102012","text":"#http://norvig.com/spell-correct.html\nimport re, collections\nfrom lib import tweet\n\nclass Corrector:\n def __init__(self,words):\n if isinstance(words,str):\n self.file = open(words,'r')\n else:\n self.file = words\n self.NWORDS = self._train(self._words(self.file.read()))\n\n def _words(self,text): \n workingWord = text.split(\"/\")[0]\n return re.findall('[a-z]+', workingWord.lower()) \n\n def _train(self,features):\n model = collections.defaultdict(lambda: 1)\n for f in features:\n model[f] += 1\n return model\n\n def _edits1(self,word):\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n deletes = [a + b[1:] for a, b in splits if b]\n transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]\n replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]\n inserts = [a + c + b for a, b in splits for c in alphabet]\n return set(deletes + transposes + replaces + inserts)\n\n def _known_edits2(self,word):\n return set(e2 for e1 in self._edits1(word) for e2 in self._edits1(e1) if e2 in self.NWORDS)\n\n def _known(self,words): return set(w for w in words if w in self.NWORDS)\n\n def correct(self,inword):\n word = str(inword)\n candidates = self._known([word]) or self._known(self._edits1(word)) or self._known_edits2(word) or [word]\n returnval = max(candidates, key=self.NWORDS.get)\n if isinstance(inword,tweet.Word):\n returnval = tweet.Word(returnval)\n inword.copy_attributes(returnval)\n return returnval","sub_path":"cikm - twitter sentiment analysis/Python2-FullCleaner-7-22-11/lib/spell.py","file_name":"spell.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"366748157","text":"# Implement a class to hold room information. This should have name and\n# description attributes.\n\nclass Room:\n\n def __init__(self, name, description):\n self.name = name\n self.description = description\n self.n_to = None\n self.s_to = None\n self.e_to = None\n self.w_to = None\n self.item = []\n\n def get_direction(self, direction):\n \"\"\"This function points the player to the inputted direction \"\"\"\n if direction in [\"n\", \"N\"]:\n return self.n_to\n elif direction in [\"s\", \"S\"]:\n return self.s_to\n elif direction in [\"w\", \"W\"]:\n return self.w_to\n elif direction in [\"e\", \"E\"]:\n return self.e_to\n else:\n return None\n","sub_path":"src/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"247133862","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/7/25 15:27\n# @Author : lyx\n# @File : sh_lyx_0731_python_logging_handle_class.py\n# @Project : Django_Project\n\n# 将对日志文件的操作进行封装\nimport logging\nimport os\n\nfrom scripts.handle_config import conf_operate\nfrom scripts.contants import LOG_PATH\n\n# 日志文件\nLOG_FILE_PATH = os.path.join(LOG_PATH, \"lyx_logging_file.log\")\n\n\nclass HandleLogging:\n\n def __init__(self):\n logger_name = conf_operate.get_config_data(\"log file\", \"logger_name\") # 日志收集器名称\n # log_file = conf_operate.get_config_data(\"log file\", \"log_file_path\") # 日志文件名称\n log_format = conf_operate.get_config_data(\"log file\", \"log_format\") # 日志输出格式\n\n # 1.定义日志收集器\n self.my_logger = logging.getLogger(logger_name)\n\n # 2. 定义日志收集器的日志等级\n # 日志等级有: NOTSET(0)、DEBUG(10)、INFO(20)、WARNING(30)、ERROR(40)、CRITICAL(50)\n self.my_logger.setLevel(logging.DEBUG)\n # 第二种方法,使用字符串指定等级\n # self.my_logger.setLevel(\"DEBUG\")\n\n # 3. 定义日志输出的渠道\n # 控制台渠道\n console_handle = logging.StreamHandler()\n # 文件渠道\n file_handle = logging.FileHandler(LOG_FILE_PATH, mode=\"a\", encoding=\"utf-8\")\n\n # 4. 指定输出到渠道的日志等级\n console_handle.setLevel(logging.ERROR)\n file_handle.setLevel(logging.DEBUG)\n\n # 5. 定义日志输出格式\n # %(asctime)s 表示时间;%(levelname)s 表示日志等级; %(module)s 表示当前模块名称;%(lineno)d 表示代码行号 ;%(message)s 表示日志信息\n # log_formatter = logging.Formatter(\"%(asctime)s : %(levelname)s %(module)s :%(lineno)d - %(message)s\")\n log_formatter = logging.Formatter(log_format)\n\n console_handle.setFormatter(log_formatter)\n file_handle.setFormatter(log_formatter)\n \n # 6. 对接日志收集器与输出渠道\n self.my_logger.addHandler(console_handle)\n self.my_logger.addHandler(file_handle)\n\n def get_logger(self):\n \"\"\"\n 获取日志收集器\n :return:\n \"\"\"\n return self.my_logger\n\n\nlog_operate = HandleLogging().get_logger()\n\nif __name__ == '__main__':\n first_log = HandleLogging()\n my_logger = first_log.get_logger()\n my_logger.debug(\"这是我测试的debug级别日志\")\n my_logger.info(\"这是我测试的info级别日志\")\n my_logger.warning(\"这是我测试的warning级别日志\")\n my_logger.error(\"这是我测试的error级别日志\")\n my_logger.critical(\"这是我测试的critical级别日志\")\n\n\n","sub_path":"QianChengDai_API_Project/scripts/handle_logging.py","file_name":"handle_logging.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"60234451","text":"\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\nReplace this with more appropriate tests for your application.\n\"\"\"\n\nfrom django.test import TestCase, SimpleTestCase\nfrom contact.forms import ContactView\nfrom contact.models import ContactForm\nfrom datetime import datetime, timedelta\n# from django.test import RequestFactory\n# from django.core.urlresolvers import resolve\n# from contact.views import contact\n# from django.shortcuts import render_to_response\n\nclass SimpleTest(TestCase):\n def test_basic_addition(self):\n \"\"\"\n Tests that 1 + 1 always equals 2.\n \"\"\"\n self.assertEqual(1 + 1, 2)\n\nclass UserModelTest(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(UserModelTest,cls).setUpClass()\n ContactForm(email=\"a@b.com\", name=\"Ab\").save()\n ContactForm(email='test@model.com', name='Test').save()\n cls.firstUser=ContactForm(\n name='One',\n email='one@b.com',\n timestamp=datetime.today()+timedelta(days=3)\n )\n cls.firstUser.save()\n\n def test_contactform_str_returns_email(self):\n self.assertEquals('one@b.com',str(self.firstUser))\n\n def test_ordering(self):\n contacts=ContactForm.objects.all()\n self.assertEquals(self.firstUser,contacts[0])\n\nclass ContactViewTests(SimpleTestCase):\n def test_displayed_fields(self):\n expected_fields=['name','email','topic','message']\n self.assertEquals(ContactView.Meta.fields,expected_fields)\n\n\n# class ContactPageTests(TestCase):\n#\n# # SetUp\n# #*************#\n#\n# @classmethod\n# def setUpClass(cls):\n# request_factory=RequestFactory()\n# cls.request=request_factory.get('/contact/')\n# cls.request.session={}\n#\n# #Testing routes\n# #***************#\n# def test_root_resolvers_to_contact_view(self):\n# contact_page=resolve('/contact/')\n# self.assertEqual(contact_page.func, contact)\n#\n# def test_returns_appropriate_contact_html_response_code(self):\n# resp=contact(self.request)\n# self.assertEquals(resp.status_code,200)\n","sub_path":"chp9/tests/contact/testContactModels.py","file_name":"testContactModels.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"228561980","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport imageio\nimport sklearn\nimport os\nimport cv2\n\n# More tools for debugging an image processing\nfrom utils import crop_image,normal_image,process_image\nfrom debug import ipsh\n\n# Run on CPU or comment out for GPU\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n# GLobal Image Keeper\n\nimage_orgi = 0\nimageshift = 0\n# Set your Hyperparameters\nEPOCHS = 1\nbatch_size = 128 # Higher Batchsize is not possible with 2GB RAM\n#learning_rate = 0.0001\ncropx_top = 50\ncropx_bottom = 140\nx_new = cropx_bottom - cropx_top\ny_new = 320\narg_div = 18 # Increasing the argumentation div will lower the probability for image argumentation\n\n# Get your Samples\nsamples = []\nwith open('./data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n# Datashuffle and train split\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\nipsh() # Embedded ipython console\ndef generator(samples,batch_size=128):\n num_samples = len(samples)\n while 1:\n # Shuffle the trainings data\n shuffle(samples)\n # Split the data in batches\n for offset in range(0,num_samples,batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n steerings = []\n # Process every batch\n for batch_sample in batch_samples:\n # Get the front camera image\n filename1 = batch_sample[0].split('/')[-1]\n \"\"\"\n # Load images from car side cameras\n filename2 = batch_sample[1].split('/')[-1]\n filename3 = batch_sample[2].split('/')[-1]\n \"\"\"\n # Get images from fixed path that is not the same as csv record\n current_path = './data/IMG/'\n steering_center = float(batch_sample[3])\n \"\"\"\n # Not needed when training data is generated with mouse\n if abs(steering_center) < 0.01:\n print(\"Jump over straight data\")\n continue\n \"\"\"\n #Load the Images\n image = cv2.imread(current_path + filename1)\n # DNN shows good results in the YUV colorspace\n image = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)\n # Just do the cropping for all images since normalization\n image = crop_image(image)\n\n # Flip Image and steering\n if np.random.randint(arg_div) == 1:\n steering_center = -steering_center\n image = np.fliplr(image)\n\n # Change the brightness of the images\n if np.random.randint(arg_div) == 1:\n image = image + np.random.random_integers(-20,20)\n\n # Shift the images and the steerings\n if np.random.randint(arg_div) == 1:\n dx=15\n dy=15\n # Randomly change the shifting\n sx = dx * (np.random.rand() - 0.5)\n sy = dy * (np.random.rand() - 0.5)\n # Steering angle shifter\n steering_center += sx * 0.002\n mask = np.float32([[1, 0, sx], [0, 1, sy]])\n height, width = image.shape[:2]\n image = cv2.warpAffine(image, mask, (width, height))\n\n # Append training data\n steerings.append(steering_center)\n images.append(image)\n\n # Keras needs np array datatype\n X_train = np.array(images)\n y_train = np.array(steerings)\n\n # Shuffle the training data before return to the generator\n yield sklearn.utils.shuffle(X_train,y_train)\n\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense,Lambda,Cropping2D,Conv2D,Dropout\n\nmodel = Sequential()\n\n # 5 Conv2D Layers with weight regularizer\n # It draws samples from a truncated normal distribution centered on 0 with\n # Activation Function elu\n # Low Droprelu\nmodel.add(Lambda(lambda x: x / 255.0 -0.5,input_shape=(x_new, y_new, 3)))\nmodel.add(Conv2D(24, 5, 5, activation='relu',subsample=(2, 2)))\nmodel.add(Conv2D(36, 5, 5, activation='relu',subsample=(2, 2)))\nmodel.add(Conv2D(48, 5, 5, activation='relu',subsample=(2, 2)))\nmodel.add(Conv2D(64, 3, 3, activation='relu',subsample=(1, 1)))\nmodel.add(Conv2D(64, 3, 3, activation='relu',subsample=(1, 1)))\nmodel.add(Flatten())\n\n # 5 Fully Connected Layers\n # Dropout with drop probability of .5 and .25\n\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dropout(.5))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dropout(.5))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dropout(.25)) # Output\nmodel.add(Dense(1, activation='linear'))\n # One single steering output as a linear function\n\n\nmodel.summary()\nmodel.compile(loss='mse', optimizer='Adam')\nhistory = model.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data=validation_generator,\n nb_val_samples=len(validation_samples), nb_epoch=EPOCHS,verbose=1)\n\nmodel.save('model.h5')\n\nipsh()\n# Saving the figure does not work form the docker container\nfig = plt.figure()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nfig.title('model mean squared error loss')\nfig.ylabel('mean squared error loss')\nfig.xlabel('epoch')\nfig.legend(['training set', 'validation set'], loc='upper right')\nfig.savefig('loss_history.png')\nfig.close(fig)\n\n#history_object = model.fit_generator(train_generator, samples_per_epoch =\n# len(train_samples), validatio\n# validation_generator,\n# nb_val_samples = len(validation_samples),\n# nb_epoch=5, verbose=1)\n#\n#### print the keys contained in the history object\n#print(history_object.history.keys())\n#\n#### plot the training and validation loss for each epoch\n#plt.plot(history_object.history['loss'])\n#plt.plot(history_object.history['val_loss'])\n#plt.title('model mean squared error loss')\n#plt.ylabel('mean squared error loss')\n#plt.xlabel('epoch')\n#plt.legend(['training set', 'validation set'], loc='upper right')\n#plt.show()\n","sub_path":"clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":6808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"237814580","text":"###\n### Author: Sornali Rahman\n### Class: CSc 110\n### Description: This program displays a landscape in which a\n### 'vanishing point' may be seen while dragging the\n### landscape up and down.\n###\n\nfrom graphics import graphics\n\ndef draw_sky_and_land(gui):\n '''\n Draws the sky and the grass.\n gui should be a graphics object.\n '''\n gui.rectangle(0, 0, 600, 600, 'sky blue')\n\n gui.ellipse(400, 90, 90, 90, 'yellow')\n\n #cloud1\n gui.ellipse(70, 70, 110, 60, 'white')\n gui.ellipse(50, 90, 110, 60, 'white')\n\n #cloud2\n gui.ellipse(285, 80, 140, 80, 'white')\n gui.ellipse(310, 50, 110, 60, 'white')\n\n #cloud 3\n gui.ellipse(445, 160, 140, 80, 'white')\n gui.ellipse(470, 140, 110, 60, 'white')\n\n\n\ndef draw_mountain(gui):\n '''\n This function should draw a mountain.\n gui should be a graphics object.\n '''\n y = gui.mouse_y / 3\n\n shape_y1 = 0 + y\n\n\n # Use this to shift the mountain as it shrinks/grows\n\n gui.triangle(250, shape_y1, 0, 500, 500, 500, 'brown')\n\n #this draws the horizon\n gui.rectangle(0, 350, 500, 500, 'green')\n\ndef draw_grass(gui):\n '''\n This function draws the blades of grass\n gui should be a graphics object\n '''\n i = 0\n while i < 500:\n offset = i * 15\n if i % 2 == 0:\n gui.line(offset, 350, offset, 250, 'dark green', 25)\n i += 1\n\n\ndef draw_trees_and_pond(gui):\n\n y = gui.mouse_y /3\n shift = gui.mouse_y / 2\n\n #this draws the trees and allows the shift\n gui.rectangle(100,380, 30, 70, 'brown')\n gui.rectangle(370,380, 30, 70, 'brown')\n gui.ellipse(115,350,80,110,'dark green')\n gui.ellipse(385,350,80,110,'dark green')\n\n #this draws the pond and allows the shift\n gui.ellipse(250,390,180-y,20,'blue')\n\n\ndef main():\n gui = graphics(500, 500, 'vanishing point')\n while True:\n gui.clear()\n draw_sky_and_land(gui)\n draw_mountain(gui)\n draw_grass(gui)\n draw_trees_and_pond(gui)\n gui.update_frame(60)\n\nmain()","sub_path":"Vanishing Point/vanishing_point.py","file_name":"vanishing_point.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"600689880","text":"from selenium import webdriver\nfrom selenium.webdriver import ActionChains\n\ndriver = webdriver.Chrome()\ndriver.maximize_window()\ndriver.get(\"http://www.actimind.com\")\ndriver.implicitly_wait(30)\n\nareaOf = driver.find_element_by_xpath(\"//a[@href='areas-expertise.html ']\")\nloc = areaOf.location\nxOffset = loc.get(\"x\")\nyOffset = loc.get(\"y\")\n\naction = ActionChains(driver)\naction.move_by_offset(xOffset,yOffset)\naction.perform()\n\ndriver.close()","sub_path":"SelenProjects2/MouseActionPackage/SelenMouse2.py","file_name":"SelenMouse2.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"194015369","text":"from .comparison import Comparison\nfrom .state_base import StateBase\n\nclass OrChoiceRule(object):\n\t\"\"\"\n\tTests a list of Comparisons in the order of the comparison list provided\n\n\tReturns True if any Comparisons return True, otherwise False\n\t\"\"\"\n\n\tdef __init__(self, ComparisonList=[], NextState=None):\n\t\tself._comparison_list = None\n\t\tself._next_state = None\n\t\tself.set_comparison_list(ComparisonList)\n\t\tself.set_next_state(NextState)\n\n\tdef get_comparison_list(self):\n\t\treturn self._comparison_list\n\n\tdef set_comparison_list(self, ComparisonList=[]):\n\t\tif not ComparisonList:\n\t\t\traise Exception(\"ComparisonList must not be None for a OR ChoiceRule\")\n\t\tif not isinstance(ComparisonList, list):\n\t\t\traise Exception(\"ComparisonList must be a List of Comparison objects\")\n\t\tif len(ComparisonList) == 0:\n\t\t\traise Exception(\"ComparisonList must be a non-empty List of Comparison objects\")\n\t\tfor o in ComparisonList:\n\t\t\tif not isinstance(o, Comparison):\n\t\t\t\traise Exception(\"ComparisonList must only contain Comparison objects\")\n\t\tself._comparison_list = ComparisonList\n\n\tdef get_next_state(self):\n\t\treturn self._next_state\n\n\tdef set_next_state(self, NextState=None):\n\t\tif NextState and not isinstance(NextState, StateBase):\n\t\t\traise Exception(\"Invalid NextState for ChoiceRule, which must be subclass of StateBase\")\n\t\tself._next_state = NextState\n\n\tdef validate(self):\n\t\tif not self.get_next_state():\n\t\t\traise Exception(\"Invalid ChoiceRule - must declare NextState\")\n\t\tfor comparison in self.get_comparison_list():\n\t\t\tcomparison.validate()\n\n\tdef to_json(self):\n\t\tl = []\n\t\tfor comparison in self.get_comparison_list():\n\t\t\tl.append(comparison.to_json())\n\t\treturn {\n\t\t\t\"Or\" : l,\n\t\t\t\"Next\" : self.get_next_state().get_name()\n\t\t}\n\n\tdef clone(self, NameFormatString=\"{}\"):\n\t\t\"\"\"\n\t\tReturns a clone of this instance.\n\n\t\tThe NameFormatString will be used to clone the next state that this Choice Rule will initiate if triggered.\n\n\t\t:param NameFormatString: [Required] The naming template to be applied to generate the name of the next state.\n\t\t:type NameFormatString: str\n\n\t\t:returns: ``OrChoiceRule`` -- A new instance of this instance and any other instances in its branch.\n\t\t\"\"\"\n\t\tif not NameFormatString:\n\t\t\traise Exception(\"NameFormatString must not be None (step '{}')\".format(self.get_name()))\n\t\tif not isinstance(NameFormatString, str):\n\t\t\traise Exception(\"NameFormatString must be a str (step '{}')\".format(self.get_name()))\n\n\t\tc = OrChoiceRule()\n\n\t\tif self.get_comparison_list():\n\t\t\tc.set_comparison_list(ComparisonList=[ c.clone() for c in self.get_comparison_list() ])\n\n\t\tif self.get_next_state():\n\t\t\tc.set_next_state(NextState=self.get_next_state().clone(NameFormatString))\n\n\t\treturn c\n","sub_path":"awssl/or_choice_rule.py","file_name":"or_choice_rule.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"559572677","text":"import logging\nimport os\nimport sys\nimport time\nimport urllib\nfrom tesserocr import PyTessBaseAPI\n\nimport prawcore\nimport wget\nfrom praw import Reddit\n\nfrom tor import config\nfrom tor.core.initialize import configure_logging\nfrom tor.core.initialize import configure_redis\nfrom tor.core.initialize import configure_tor\nfrom tor.helpers.misc import _\nfrom tor.helpers.misc import explode_gracefully\nfrom tor.helpers.reddit_ids import clean_id\nfrom tor.strings.ocr import base_comment\n\n\"\"\"\nGeneral notes for implementation.\n\nProcess:\n\nu/transcribersofreddit identifies an image\n config.redis.rpush('ocr_ids', 'ocr::{}'.format(post.fullname))\n config.redis.set('ocr::{}'.format(post.fullname), result.fullname)\n \n...where result.fullname is the post that u/transcribersofreddit makes about\nthe image.\n\nBot:\n every interval (variable):\n thingy = config.redis.lpop('ocr_ids')\n u_tor_post_id = config.redis.get(thingy)\n \n get image from thingy\n download it\n ...OCR magic on thingy...\n save magic\n delete image\n \n u_tor_post_id.reply(ocr_magic)\n\"\"\"\n\nconfig.ocr_delay = 10\n\n\ndef process_image(local_file):\n with PyTessBaseAPI() as api:\n api.SetImageFile(local_file)\n text = api.GetUTF8Text()\n\n confidences = api.AllWordConfidences()\n if not confidences or len(confidences) == 0:\n # we have an image, but it *really* couldn't find anything, not\n # even false positives.\n return None\n\n logging.debug('Average of confidences: {}'.format(\n sum(confidences) / len(confidences))\n )\n\n # If you feed it a regular image with no text, more often than not\n # you'll get newlines and spaces back. We strip those out to see if\n # we actually got anything of substance.\n if text.strip() != '':\n return text\n else:\n return None\n\n\ndef chunks(s, n):\n \"\"\"\n Produce n-character chunks from s.\n :param s: incoming string.\n :param n: number of characters to cut the chunk at.\n \"\"\"\n for start in range(0, len(s), n):\n yield s[start:start+n]\n\n\ndef main(config):\n while True:\n try:\n time.sleep(config.ocr_delay)\n new_post = config.redis.lpop('ocr_ids')\n if new_post is None:\n logging.debug('No post found. Sleeping.')\n # nothing new in the queue. Wait and try again.\n continue\n\n # We got something!\n new_post = new_post.decode('utf-8')\n logging.info(\n 'Found a new post, ID {}'.format(new_post)\n )\n image_post = r.submission(id=clean_id(new_post))\n\n # download image for processing\n # noinspection PyUnresolvedReferences\n try:\n filename = wget.download(image_post.url)\n except urllib.error.HTTPError:\n # what if the post has been deleted? Ignore it and continue.\n continue\n\n try:\n result = process_image(filename)\n except RuntimeError:\n logging.warning(\n 'Either we hit an imgur album or no text was returned.'\n )\n os.remove(filename)\n continue\n\n logging.debug('result: {}'.format(result))\n\n # delete the image; we don't want to clutter up the hdd\n os.remove(filename)\n\n if not result:\n logging.info('Result was none! Skipping!')\n # we don't want orphan entries\n config.redis.delete(new_post)\n continue\n\n tor_post_id = config.redis.get(new_post).decode('utf-8')\n\n logging.info(\n 'posting transcription attempt for {} on {}'.format(\n new_post, tor_post_id\n )\n )\n\n tor_post = r.submission(id=clean_id(tor_post_id))\n\n thing_to_reply_to = tor_post.reply(_(base_comment))\n for chunk in chunks(result, 9000):\n # end goal: if something is over 9000 characters long, we\n # should post a top level comment, then keep replying to\n # the comments we make until we run out of chunks.\n thing_to_reply_to = thing_to_reply_to.reply(_(chunk))\n\n config.redis.delete(new_post)\n\n except (\n prawcore.exceptions.RequestException,\n prawcore.exceptions.ServerError\n ) as e:\n logging.warning(\n '{} - Issue communicating with Reddit. Sleeping for 60s!'\n ''.format(e)\n )\n time.sleep(60)\n\n\nif __name__ == '__main__':\n r = Reddit('bot_ocr') # loaded from local praw.ini config file\n configure_logging(config)\n logging.basicConfig(\n filename='ocr.log'\n )\n\n config.redis = configure_redis()\n\n # the subreddit object shortcut for TranscribersOfReddit\n tor = configure_tor(r, config)\n\n try:\n main(config)\n\n except KeyboardInterrupt:\n logging.info('Received keyboard interrupt! Shutting down!')\n sys.exit(0)\n\n except Exception as e:\n explode_gracefully('u/transcribot', e, tor)\n","sub_path":"tor/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"571121766","text":"\"\"\"\nMain script of overall visualization\n\nSupported Keywords in SWD file\n MASS = 'logarith of Lagrangean mass from surface lgm'\n R = 'logarith of radius cm lgr'\n V = 'velocity in 1e8 cm/s v8'\n T = 'lg T'\n TRAD = 'lg Trad 7 when nonzero'\n RHO = 'lg rho in 1e-6 gcc'\n P = 'lg P'\n QV = 'lg qv artificial viscosity'\n ENG = 'lg eng12 in 1e12'\n LUM = 'luminosity L_r lum40 in units 1e40 erg/s'\n KAPPA_ROSSELAND = 'kappa_Rosseland cap'\n\nSupported Keywords in ABN file\n H = 'hydrogen',\n He = 'helium',\n C = 'Carbon',\n N = 'nitrogen',\n O = 'Oxygen',\n Ne = 'Neon',\n Mg = 'magnesium',\n Si = 'Silicon',\n S = 'Sulfur',\n Ar = 'Argon',\n Ca = 'calcium',\n Fe = 'iron',\n Ni = 'Nickel'\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport argparse\nimport matplotlib as mpl\n\nfrom matplotlib import pyplot as plt\n\nfrom stella.core.plot import get_plotter\n\nfrom stella.utils.config import SWD\nfrom stella.utils.config import ABN\n\n\ndef main(args):\n # write plot configuration\n \"\"\"\n Possible Configurations\n 1. log_time (bool): log scale of time in contour if True\n 2. log_mass (bool): log (M_tot - M_r) if True\n 3. photosphere (bool): plot photosphere if True\n 4. magnitude (bool): plot Mbol, Mu... magnitude if True\n 5. log_time_mag (bool): log scale of time in magnitude plot if True\n 6. transparency (0.0 - 1.0): transparency of contour (default 0.6)\n 0.0 is the most transparent\n 7. title: title of plot\n if empty, it automatically parse from target key\n if you use arithmetic operation like SWD.V * 2,\n you must type title (cannot auto-inference) in latex format\n \"\"\"\n\n # this is example configuration\n # actually, you do not have to type all configurations\n # (can skip some keywords or leave just empty dictionary)\n configuration = {\n 'log_time': False,\n 'log_mass': False,\n 'photosphere': True,\n 'magnitude': True,\n 'log_time_mag': False,\n 'transparency': 0.6,\n 'title': r'$0.5 V^{2}$',\n # do not modify below configurations\n 'save': args.save,\n }\n\n if not args.save:\n plt.ion()\n\n if args.path is None:\n raise ValueError('Must pass path')\n elif args.path[-1] == '/':\n args.path = args.path[:-1]\n\n if args.prefix is None:\n args.prefix = args.path.split('/')[-1]\n\n plotter = get_plotter(root=args.path, prefix=args.prefix)\n plotter.plot(0.5 * SWD.V**2, **configuration)\n plotter.plot_abn_data(ABN.H, threshold=0.2, **configuration)\n plotter.plot_abn_data((ABN.C, ABN.O), threshold=0.1, **configuration)\n plotter.plot_abn_data((ABN.Ni, ABN.O, ABN.C), threshold=0.7, **configuration)\n\n if not args.save:\n input('Press Enter to exit ')\n else:\n figname = args.figname\n if figname is None:\n figname = args.path.split('/')[-1] + '.png'\n\n plotter.save(figname)\n\n\nif __name__ == \"__main__\":\n # command line argument parser\n parser = argparse.ArgumentParser(\n prog='python -m stella',\n description='stella simulation data visualizer',\n )\n parser.add_argument(\n '--path',\n help='root directory path of stella data',\n )\n parser.add_argument(\n '--prefix',\n help='name prefix of stella data files (default set to name of path)',\n )\n parser.add_argument(\n '--save',\n help='save figure rather than show figure',\n action='store_true',\n )\n parser.add_argument(\n '--figname',\n help='name of figure (valid if save is on)',\n )\n\n # global matplotlib configuration goes here\n mpl.rcParams['figure.figsize'] = [16.0, 9.0]\n mpl.rcParams['figure.dpi'] = 80\n mpl.rcParams['figure.titlesize'] = 'medium'\n\n mpl.rcParams['savefig.dpi'] = 100\n\n mpl.rcParams['font.size'] = 16\n\n mpl.rcParams['legend.fontsize'] = 12\n\n # color map\n mpl.rcParams['image.cmap'] = 'GnBu'\n main(parser.parse_args())\n","sub_path":"stella/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"409821748","text":"import csv\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n help = \"Formats the CSV to be easier to import.\"\n\n def handle(self, *args, **options):\n try:\n with open(settings.RAW_CSV, newline=\"\") as r, open(\n settings.MODIFIED_CSV, \"w\", newline=\"\"\n ) as w:\n reader = csv.reader(r, delimiter=\"\\t\")\n writer = csv.writer(w, delimiter=\";\")\n\n # Reformat header\n for row in reader:\n header = [\n name.replace(\"-\", \"_\").lstrip(\"_\") for name in row\n ]\n break\n\n # Skip header before writing\n next(reader, None)\n\n writer.writerow(header)\n\n rows_count = 0\n for row in reader:\n # Check if there's a nutriscore_grade and nova_group\n if row[44] and row[45]:\n rows_count += 1\n writer.writerow(row)\n\n except Exception as e:\n raise CommandError(f\"Something went wrong.\\n{e}\")\n\n self.stdout.write(\n self.style.SUCCESS(\n f\"Successfully formatted CSV file. {rows_count} products kept.\"\n )\n )\n","sub_path":"www/management/commands/format_csv.py","file_name":"format_csv.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"520399986","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2015--, micronota development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom os.path import join, basename, splitext\nfrom logging import getLogger\nimport re\n\nimport pandas as pd\n\nfrom dumpling import (\n check_choice, Dumpling, OptionParam, Parameters)\n\n\nblast_params = [\n OptionParam('--threads', 'cpus', help='number of cpu threads.'),\n\n OptionParam('--gapopen', help='Gap open penalty.'),\n OptionParam('--gapextend', help='Gap extension penalty.'),\n OptionParam('--matrix', help='Scoring matrix.'),\n OptionParam('--seg', help='Enable SEG masking.'),\n OptionParam('--max-target-seqs', '-k',\n help='The maximum number of hits per query to keep alignments for.'),\n OptionParam('--top',\n help='Keep alignments within the given percentage range of the top alignment'),\n OptionParam('--evalue', '-e', help='Maximum expected value to keep an alignment.'),\n OptionParam('--min-score',\n help=('Minimum bit score to keep an alignment. Setting this option'\n 'will override the --evalue parameter.')),\n OptionParam('--query-cover',\n help='Report only alignments above the given percentage of query cover.'),\n OptionParam('--salltitles',\n help='Print full length subject titles in output.'),\n\n OptionParam('--band', help=''),\n OptionParam('--index-chunks', '-c',\n help='The number of chunks for processing the seed index. Default is 4. '\n 'You can set this to 1 to increase speed (but also increase memory usage.'),\n\n OptionParam('--sensitive',\n help=('Trigger the sensitive alignment mode with a 16x9 seed'\n 'shape config.')),\n OptionParam('--tmpdir', '-t',\n help='Directory to be used for temporary storage. Default is /dev/shm for fast IO.'),\n\n OptionParam('--db', '-d', help='Path to DIAMOND database file'),\n OptionParam('--daa', '-a',\n help='Path to DAA file.'),\n OptionParam('--query', '-q',\n help=('Path to query input file in FASTA or FASTQ format '\n '(may be gzip compressed).'))]\n\n\nmakedb_params = [\n OptionParam('--threads', '-p', name='cpus'),\n OptionParam('--in', name='fasta',\n help='protein reference database file in FASTA format (may be gzip compressed)'),\n OptionParam('--db', '-d',\n help='DIAMOND database file.'),\n OptionParam('--block-size',\n help='Block size in billions of sequence letters to be processed at a time.')]\n\n\nview_params = [\n OptionParam('--outfmt', name='fmt', action=check_choice(('tab', 'sam')),\n help=('Format of output file. (tab = BLAST tabular format;'\n 'sam = SAM format)')),\n OptionParam('--daa', '-a',\n help='Path to DAA file.'),\n OptionParam('--out', '-o',\n help='Path to output file.'),\n OptionParam('--compress', action=check_choice((0, 1)),\n help='Compression for output file (0=none, 1=gzip).')]\n\n\ndef run_blast(query, daa, aligner='blastp', **kwargs):\n '''Search query sequences against the database.\n\n Parameters\n ----------\n query : str\n The file path of the query seq\n daa : str\n The file path of the output daa file\n aligner : str\n The aligner. blastp or blastx\n kwargs : dict\n keyword arguments. Command line parameters for diamond blastp\n or blastx.\n Returns\n -------\n str\n The file path of the blast result.\n '''\n logger = getLogger(__name__)\n\n blast = Dumpling(['diamond', aligner],\n params=Parameters(*blast_params))\n blast.update(query=query, daa=daa, **kwargs)\n logger.info('Running {}'.format(blast.command))\n blast()\n return blast\n\n\ndef run_view(daa, out, fmt='sam', **kwargs):\n '''Convert Diamond daa file to a human readable output.\n\n Parameters\n ----------\n daa : str\n Input file resulting from diamond blast.\n out : str\n Output file.\n '''\n logger = getLogger(__name__)\n view = Dumpling(['diamond', 'view'],\n params=Parameters(*view_params))\n view.update(daa=daa, out=out, fmt=fmt, **kwargs)\n logger.info('Running {}'.format(view.command))\n view()\n return view\n\n\ndef run_makedb(fasta, db=None, **kwargs):\n '''Format database from a fasta file.\n\n This is similar to running ``diamond makedb --in db.faa --db db``.\n\n Parameters\n ----------\n fasta : str\n Input path for the fasta file.\n db : str or None (default)\n Output path for the formatted database file. It will be named\n after input file in the same directory by default.\n kwargs : dict\n keyword arguments. Other command line parameters for diamond makedb.\n\n Returns\n -------\n `Dumpling`\n '''\n logger = getLogger(__name__)\n if db is None:\n db = splitext(fasta)[0]\n makedb = Dumpling(['diamond', 'makedb'], params=Parameters(*makedb_params),\n version='0.7.12', url='https://github.com/bbuchfink/diamond')\n makedb.update(fasta=fasta, db=db, **kwargs)\n logger.info('Running {}'.format(makedb.command))\n makedb()\n return makedb\n\n\ndef run(out_dir, query, db, query_cover=0, fmt='sam', aligner='blastp', **kwargs):\n '''\n Run Diamond search.\n\n Parameters\n ----------\n out_dir : str\n output dir\n query : str\n file path to query sequence\n db : str\n file path the db\n query_cover : `Numeric`\n report hits above the given percentage of query cover.\n fmt : str ('sam' or 'tab')\n output file format\n aligner : str ('blastp' or 'blastx'\n which aligning mode to use\n kwargs : dict\n keyword arguments passing to `run_blast`\n\n Returns\n -------\n None\n '''\n logger = getLogger(__name__)\n prefix = splitext(basename(db))[0]\n daa = join(out_dir, '{}.daa'.format(prefix))\n out = join(out_dir, '{0}.{1}'.format(prefix, fmt))\n logger.info('Running Diamond search ...')\n run_blast(daa=daa, db=db, query=query, aligner=aligner, query_cover=query_cover, **kwargs)\n run_view(daa=daa, out=out, fmt=fmt)\n\n\ndef parse_tabular(res, column='bitscore'):\n '''Parse the tabular output of diamond blastp/blastx.\n\n Parameters\n ----------\n res : str\n file path to Diamond tabular output\n column : str\n The column used to pick the best hits.\n\n Returns\n -------\n pandas.DataFrame\n The hit records for each query sequence.\n '''\n columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch',\n 'gapopen', 'qstart', 'qend', 'sstart', 'send',\n 'evalue', 'bitscore']\n df = pd.read_table(res, names=columns)\n return df\n\n\ndef parse_sam(res):\n '''Parse the SAM output of diamond blastp/blastx.\n\n Parameters\n ----------\n res : str\n file path to Diamond SAM output\n\n Returns\n -------\n pandas.DataFrame\n The hit records for each query sequence.\n '''\n columns = [\n 'qseqid', # Query template NAME.\n 'FLAG', # Combination of bitwise FLAGs\n 'sseqid', # Reference sequence NAME of the alignment\n 'POS', # 1-based leftmost mapping position of the first base\n 'MAPQ', # Mapping quality. -10log10(P_err).\n 'CIGAR', # CIGAR string\n 'RNEXT', # Reference sequence name of the primary alignment of NEXT\n 'PNEXT', # Position of the primary alignment of the NEXT read\n 'TLEN', # signed observed template length\n 'SEQ', # segment sequence\n 'QUAL'] # ASCII of base quality\n optional = [\n 'bitscore',\n 'NM', # Edit distance to the reference\n 'slen', # subject seq length\n 'rawscore',\n 'evalue',\n 'pident',\n 'frame',\n 'qstart', # start position of alignment\n 'MD']\n\n df = pd.read_table(res, names=columns + optional, comment='@')\n\n for col in optional:\n df[col] = df[col].apply(_convert)\n\n return df\n\n\ndef filter_best(df, column='evalue'):\n '''Filter out the best hits by their e-value or bitscore.'''\n # pick the rows that have highest bitscore for each qseqid\n # df_max = df.groupby('qseqid').apply(\n # lambda r: r[r[column] == r[column].max()])\n if column == 'evalue':\n idx = df.groupby('qseqid')[column].idxmin()\n elif column == 'bitscore':\n idx = df.groupby('qseqid')[column].idxmax()\n df_best = df.loc[idx]\n # df_best.set_index('qseqid', drop=True, inplace=True)\n return df_best\n\n\ndef filter_ident_overlap(df, pident=90, overlap=80):\n '''Filter away the hits using the same UniRef clustering standards.\n\n Parameters\n ----------\n df : `pandas.DataFrame`\n parsed from `parse_sam`\n pident : `Numeric`\n minimal percentage of identity\n overlap : `Numeric`\n minimal percentage of overlap for subject sequences.\n\n Returns\n -------\n `pandas.DataFrame`\n The data frame only containing hits that pass the thresholds.\n '''\n select_id = df.pident >= pident\n overlap_length = df.CIGAR.apply(_compute_aligned_length)\n select_overlap = overlap_length * 100 / df.slen >= overlap\n # if qlen * 100 / len(row.sequence) >= 80:\n df_filtered = df[select_id & select_overlap]\n # df_filtered.set_index('qseqid', drop=True, inplace=True)\n return df_filtered\n\n\ndef _compute_aligned_length(cigar):\n '''Compute the length of ungapped region in the alignment.\n\n It includes both matched and mismatched regions.\n\n Examples\n --------\n >>> [_compute_aligned_length(i) for i in (\n ... '',\n ... '45D',\n ... '18M2D19M')]\n [0, 0, 37]\n\n '''\n aligned = re.findall('([0-9]+)M', cigar)\n return sum(int(i) for i in aligned)\n\n\ndef _convert(s):\n field, t, v = s.split(':')\n if t == 'i':\n v = int(v)\n elif t == 'f':\n v = float(v)\n return v\n","sub_path":"micronota/bfillings/diamond.py","file_name":"diamond.py","file_ext":"py","file_size_in_byte":10292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"437773513","text":"#!/usr/bin/python3\nimport sys\n\ncount=0\nfor lines in sys.stdin:\n\trows = lines.strip(\"\\n\").split(\",\")\n\tcol = rows[0]\n\tcount=count+1\n\tif(count>4):\n\t\tprint(col,col,sep=',')\n","sub_path":"sqlEngine/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"318180686","text":"from odoo import api, models, fields\n\n\nclass StudentCoursesWizard(models.TransientModel):\n _name = \"school.student.courses.wizard\"\n\n course_ids = fields.One2many(\n 'school.student.courses.wizard.line', 'w_id', string='Course Lines')\n course_line_ids = fields.Many2many(\n 'courses.detail', 'student_course_rel_wizard', 'student_course_wiz_id', 'course_id', string='Courses')\n #\n\n def update_courses(self):\n print(\"\\n\\n\\n courses updated by wizard \\n\\n\\n\")\n # self.course_line_ids.course_id\n print(f\"\\n\\n\\n {self.course_line_ids.ids} \\n\\n\\n\")\n\n courses_list_ids = self.course_line_ids.ids\n\n # courses = []\n # for i in range(len(courses_list_ids)):\n # dt = self.env['courses.detail'].browse(courses_list_ids[i])\n # print(f\"\\n\\n\\n {dt} \\n\\n\\n\")\n # courses.append(dt)\n\n for i in range(len(courses_list_ids)):\n print(f\"\\n\\n\\n {courses_list_ids[i]} \\n\\n\\n\")\n # link id to student one2many fields\n self.env['school.student.detail'].browse(self._context.get(\"active_id\")).write({\n 'course_ids': [\n (0, 0, {\n 'course_id': courses_list_ids[i]\n })\n ]\n })\n\n return True\n # def update_courses(self):\n # print(\"\\n\\n\\n courses updated by wizard \\n\\n\\n\")\n\n # # get list of courses which added by user\n # # print(f\"\\n\\n {self.course_ids.ids} \\n\\n\")\n # # print(f\"\\n\\n\\n {self.browse([self.course_ids.ids[0].id])}\")\n # c_ids = self.env[\"school.student.courses.wizard.line\"].search(\n # [('w_id', '=', self.id)])\n # print(f\"\\n\\n\\n {c_ids.course_id}, {c_ids.course_id.ids}\")\n\n # # get ids from that one2many field\n # # loop for ids\n # f_ids = []\n # for i in range(len(c_ids.course_id.ids)):\n # # self.env['school.student.detail'].browse(self._context.get(\"active_id\")).write({\n # # 'course_ids': [\n # # (4, c_ids.course_id[i])\n # # ]\n # # })\n # f_ids.append(self.env['course.data.lines'].search(\n # [('course_id', '=', c_ids.course_id.ids[i])], limit=1))\n\n # print(f\"\\n\\n\\n {f_ids}\")\n\n # for i in range(len(f_ids)):\n # # link that id to student one2many fields\n # self.env['school.student.detail'].browse(self._context.get(\"active_id\")).write({\n # 'course_ids': [\n # (4, f_ids[i].id)\n # ]\n # })\n\n\nclass StudentCoursesWizardLine(models.TransientModel): # task.lines\n _name = 'school.student.courses.wizard.line'\n\n course_id = fields.Many2one('courses.detail', ondelete=\"set default\")\n short_desc = fields.Char(string=\"Short Desc.\",\n related='course_id.short_desc')\n created_by = fields.Char(\n string=\"Created By\", related='course_id.created_by')\n w_id = fields.Many2one('school.student.courses.wizard')\n\n def name_get(self):\n label = []\n for rec in self:\n title = f\"{rec.course_id.name} ({rec.st_id.name})\"\n label.append((rec.id, title))\n return label\n","sub_path":"Training_Work/ODOO_C_MODULE_BACKUPS/new_app/wizard/student_course_wizard.py","file_name":"student_course_wizard.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"257079284","text":"import json\n\nimport requests\n\n\ndef get_param(): # 返回data数据\n data = {\"methodId\": \"04A\", \"dataSourceId\": \"endq\", \"cardInfo\": {\n \"cardNumber\": \"1282520441\", \"cardBalance\": \"500\"\n }, \"cardUser\": {\n \"userId\": \"8751\"\n }}\n return data\n\n\ndef cosume_card(): # 正常消费\n data = get_param()\n url = \"http://115.28.108.130:8080/gasStation/process?\"\n res = requests.post(url=url, json=data)\n # print(res.json())\n dict = res.json()\n return dict\n\n\ndef cosume_card01(): #消费金额为空\n data = get_param()\n url = \"http://115.28.108.130:8080/gasStation/process?\"\n data[\"cardInfo\"][\"cardBalance\"] = \"\"\n res = requests.post(url=url, json=data)\n # res_dict = res.json()\n # print(json.dumps(res_dict,indent=2,ensure_ascii=False))\n # print(res.json())\n dict = res.json()\n return dict\n\n\ndef cosume_card02(): # code:5013,根据用户ID没有查询到卡号\n data = get_param()\n url = \"http://115.28.108.130:8080/gasStation/process?\"\n data[\"cardUser\"][\"userId\"] = \"123\"\n res = requests.post(url=url, json=data)\n print(res.json())\n dict = res.json()\n return dict\n\n\ndef cosume_card03(): # code:612,消费金额不是整数\n data = get_param()\n url = \"http://115.28.108.130:8080/gasStation/process?\"\n data [\"cardInfo\"][\"cardBalance\"] = \"-9\"\n res = requests.post(url=url, json=data)\n # print(res.json())\n dict = res.json()\n return dict\n\n\ndef cosume_card04(): # 不提交任何参数发送请求\n data = {}\n url = \"http://115.28.108.130:8080/gasStation/process?\"\n res = requests.post(url=url, json=data)\n # print(res.json())\n dict = res.json()\n return dict\n\n\nif __name__ == \"__main__\":\n # cosume_card()\n # cosume_card01()\n # cosume_card02()\n # cosume_card03()\n cosume_card04()","sub_path":"day03_homework/cosume_card_balance.py","file_name":"cosume_card_balance.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"71316205","text":"# use \"Two Pointer\"\n\nclass Solution:\n def threeSumClosest(self, nums: list, target: int) -> int:\n if len(nums) < 3:\n return 0\n nums.sort()\n print(nums)\n a = nums[0]\n b = nums[1]\n c = nums[-1]\n ans = a+b+c\n print(ans)\n for i in range(len(nums)-2):\n a = nums[i]\n bindex = i+1\n cindex = -1\n for j in range(i+1, len(nums)-1):\n b = nums[bindex]\n c = nums[cindex]\n if abs(a+b+c-target) < abs(ans-target):\n print(a, b, c)\n ans = a+b+c\n if a+b+c == target:\n return a+b+c\n if a+b+c > target:\n cindex -= 1\n elif a+b+c < target:\n bindex += 1\n print(ans, a, b, c)\n return ans\n\n\ns = Solution()\nprint(s.threeSumClosest([-111, -111, 3, 6, 7, 16, 17, 18, 19], 13))\n\n\"\"\"\n[1,2,3,4,5,6]\ni = 1\n2~3\n\n\"\"\"\n","sub_path":"Python/16/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"372583830","text":"def longestPalindrome(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n '''\n s = sorted(s)\n len_s = len(s)\n output = []\n center = []\n temp = -1\n for i in range(len_s):\n if i < len_s - 1 and s[i] == s[i+1]:\n continue\n else:\n c = i - temp\n\n temp = i\n\n if c % 2 == 1:\n center += [s[i]]\n output += [s[i]]* (c-1)\n else:\n output += [s[i]]* c\n\n if len(center):\n return len(output) + 1\n else:\n return len(output)'''\n\n ans = 0\n for v in collections.Counter(s).itervalues():\n ans += v // 2 * 2\n if ans % 2 == 0 and v % 2 == 1:\n ans += 1\n return ans\n\ns = \"abccccdd\"\nprint(longestPalindrome(s))","sub_path":"409 最长回文串.py","file_name":"409 最长回文串.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"248079490","text":"# -*- coding: utf-8 -*-\nfrom luckycommon.sensor.sensor_model import SensorSwitch\nfrom luckycommon.utils.decorator import sql_wrapper\n\n\n@sql_wrapper\ndef sensor_status(platform, build_number):\n sensor_switch = SensorSwitch.query.filter(\n SensorSwitch.platform == platform).filter(\n SensorSwitch.build_number == build_number\n ).first()\n if not sensor_switch:\n return False\n return sensor_switch.sensor_status","sub_path":"luckycommon/sensor/sensor_db.py","file_name":"sensor_db.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167307382","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nfrom ....proto import onnx_proto\nfrom ...common._apply_operation import apply_add, apply_exp, apply_reshape, apply_sub\nfrom ...common._registration import register_converter\nimport numpy as np\n\n\ndef convert_sklearn_naive_bayes(scope, operator, container):\n # Computational graph:\n #\n # Note: In the following graph, variable names are in lower case characters only\n # and operator names are in upper case characters. We borrow operator names \n # from the official ONNX spec: https://github.com/onnx/onnx/blob/master/docs/Operators.md\n # All variables are followed by their shape in [].\n #\n # Symbols:\n # M: Number of test set instances\n # N: Number of features\n # C: Number of classes\n # input(or x): test set input\n # output(or y): test set output (There are two paths for producing output, one for\n # string labels and the other one for int labels) \n # feature_log_prob: Empirical log probability of features given a class, P(x_i|y)\n # class_log_prior: Smoothed empirical log probability for each class\n #\n # Multinomial NB\n # Equation: \n # y = argmax (class_log_prior + X . feature_log_prob^T)\n #\n # Graph:\n #\n # input[M, N] -> MATMUL <- feature_log_prob.T[N, C]\n # |\n # V\n # matmul_result[M, C] -> CAST <- onnx_proto.TensorProto.FLOAT\n # |\n # V\n # cast_result[M, C] -> SUM <- class_log_prior[1, C]\n # |\n # V \n # sum_result[M, C] -> ARGMAX -> argmax_output[M, 1] \n # | \n # V \n # classes[C] -------> ARRAYFEATUREEXTRACTOR\n # |\n # V (string labels)\n # array_feature_extractor_result[M, 1] ----------------------------\n # (int labels) | | \n # V |\n # output_shape[1] -> RESHAPE <- cast2_result[M, 1] <- CAST(to=onnx_proto.TensorProto.FLOAT) |\n # | |\n # V V\n # reshaped_result[M,] |------------------------------------- RESHAPE\n # | |\n # V V\n # onnx_proto.TensorProto.INT64 -> CAST --------> output[M,]\n #\n # Bernoulli NB\n # Equation:\n # y = argmax (class_log_prior + \\sum neg_prob - X . neg_prob)\n # neg_prob = log( 1 - e ^ feature_log_prob)\n #\n # Graph:\n #\n # feature_log_prob.T[N, C] -> EXP -> exp_result[N, C] \n # |\n # V\n # constant -> SUB -> sub_result[N, C] -> LOG -> neg_prob[N, C]\n # |\n # V\n # ----------------- sum_neg_prob[1, C] <------------------------ REDUCE_SUM\n # | |\n # | V\n # | input[M, N] -> MATMUL -> inp_neg_prob_prod[M, C] -> CAST(to=onnx_proto.TensorProto.FLOAT)\n # | |\n # | V\n # --------------------------------------> SUB <- cast_result[M, C]\n # |\n # V\n # class_log_prior[1, C] -> SUM <- difference_matrix[M, C]\n # |\n # V\n # sum_result[M, C] -> ARGMAX -> argmax_output[M, 1] \n # |\n # V\n # classes[C] -------> ARRAYFEATUREEXTRACTOR\n # |\n # V (string labels)\n # array_feature_extractor_result[M, 1] ----------------------------\n # (int labels) | | \n # V |\n # output_shape[1] -> RESHAPE <- cast2_result[M, 1] <- CAST(to=onnx_proto.TensorProto.FLOAT) | \n # | |\n # V |\n # reshaped_result[M,] --------------------------------------RESHAPE\n # | |\n # V |\n # onnx_proto.TensorProto.INT64 -> CAST -> output[M,] <-|\n\n nb = operator.raw_operator\n class_log_prior = nb.class_log_prior_.astype('float32').reshape((1, -1))\n feature_log_prob = nb.feature_log_prob_.T.astype('float32')\n classes = nb.classes_\n output_shape = [-1,]\n\n class_log_prior_name = scope.get_unique_variable_name('class_log_prior')\n feature_log_prob_name = scope.get_unique_variable_name('feature_log_prob')\n sum_result_name = scope.get_unique_variable_name('sum_result')\n cast_result_name = scope.get_unique_variable_name('cast_result')\n argmax_output_name = scope.get_unique_variable_name('argmax_output')\n cast2_result_name = scope.get_unique_variable_name('cast2_result')\n reshaped_result_name = scope.get_unique_variable_name('reshaped_result')\n classes_name = scope.get_unique_variable_name('classes')\n reduce_log_sum_exp_result_name = scope.get_unique_variable_name('reduce_log_sum_exp_result')\n log_prob_name = scope.get_unique_variable_name('log_prob')\n prob_tensor_name = scope.get_unique_variable_name('prob_tensor')\n array_feature_extractor_result_name = scope.get_unique_variable_name('array_feature_extractor_result')\n\n class_type = onnx_proto.TensorProto.STRING\n zipmap_attrs = {'name': scope.get_unique_operator_name('ZipMap')}\n if np.issubdtype(nb.classes_.dtype, np.floating):\n class_type = onnx_proto.TensorProto.INT32\n classes = np.array(list(map(lambda x: int(x), classes)))\n zipmap_attrs['classlabels_int64s'] = classes \n elif np.issubdtype(nb.classes_.dtype, np.signedinteger):\n class_type = onnx_proto.TensorProto.INT32\n zipmap_attrs['classlabels_int64s'] = classes\n else:\n zipmap_attrs['classlabels_strings'] = classes\n classes = np.array([s.encode('utf-8') for s in classes])\n\n container.add_initializer(feature_log_prob_name, onnx_proto.TensorProto.FLOAT,\n feature_log_prob.shape, feature_log_prob.flatten())\n container.add_initializer(classes_name, class_type, classes.shape, classes)\n\n if operator.type == 'SklearnMultinomialNB':\n container.add_initializer(class_log_prior_name, onnx_proto.TensorProto.FLOAT,\n class_log_prior.shape, class_log_prior.flatten())\n matmul_result_name = scope.get_unique_variable_name('matmul_result')\n\n container.add_node('MatMul', [operator.inputs[0].full_name, feature_log_prob_name],\n matmul_result_name, name=scope.get_unique_operator_name('MatMul'))\n # Cast is required here as Sum op doesn't work with Float64\n container.add_node('Cast', matmul_result_name, cast_result_name,\n to=onnx_proto.TensorProto.FLOAT, op_version=7)\n \n shape_result_name = scope.get_unique_variable_name('shape_result')\n container.add_node('Shape', class_log_prior_name, shape_result_name)\n reshape_result_name = scope.get_unique_variable_name('reshape_result')\n container.add_node('Reshape', [cast_result_name, shape_result_name], reshape_result_name)\n \n container.add_node('Sum', [reshape_result_name, class_log_prior_name],\n sum_result_name, name=scope.get_unique_operator_name('Sum'))\n else:\n container.add_initializer(class_log_prior_name, onnx_proto.TensorProto.FLOAT,\n class_log_prior.shape, class_log_prior.flatten())\n constant_name = scope.get_unique_variable_name('constant')\n exp_result_name = scope.get_unique_variable_name('exp_result')\n sub_result_name = scope.get_unique_variable_name('sub_result')\n neg_prob_name = scope.get_unique_variable_name('neg_prob')\n sum_neg_prob_name = scope.get_unique_variable_name('sum_neg_prob')\n inp_neg_prob_prod_name = scope.get_unique_variable_name('inp_neg_prob_prod')\n difference_matrix_name = scope.get_unique_variable_name('difference_matrix')\n\n container.add_initializer(constant_name, onnx_proto.TensorProto.FLOAT,\n [], [1.0])\n\n input_name = operator.inputs[0].full_name\n\n if nb.binarize is not None:\n threshold_name = scope.get_unique_variable_name('threshold')\n condition_name = scope.get_unique_variable_name('condition')\n cast_values_name = scope.get_unique_variable_name('cast_values')\n cast_input_name = scope.get_unique_variable_name('cast_input')\n zero_tensor_name = scope.get_unique_variable_name('zero_tensor')\n binarised_input_name = scope.get_unique_variable_name('binarised_input')\n\n container.add_initializer(threshold_name, onnx_proto.TensorProto.FLOAT,\n [1], [nb.binarize])\n \n container.add_node('Cast', operator.inputs[0].full_name,\n cast_input_name, to=onnx_proto.TensorProto.FLOAT, op_version=7)\n container.add_node('Greater', [cast_input_name, threshold_name],\n condition_name, name=scope.get_unique_operator_name('Greater'), op_version=7)\n container.add_node('Cast', condition_name, \n cast_values_name, to=onnx_proto.TensorProto.FLOAT, op_version=7)\n container.add_node('ConstantLike', operator.inputs[0].full_name, zero_tensor_name,\n name=scope.get_unique_operator_name('ConstantLike'),\n dtype=onnx_proto.TensorProto.FLOAT, op_version=9)\n apply_add(scope, [zero_tensor_name, cast_values_name], binarised_input_name, container, broadcast=1)\n input_name = binarised_input_name\n\n apply_exp(scope, feature_log_prob_name, exp_result_name, container)\n apply_sub(scope, [constant_name, exp_result_name], sub_result_name, container, broadcast=1)\n container.add_node('Log', sub_result_name,\n neg_prob_name, name=scope.get_unique_operator_name('Log'))\n container.add_node('ReduceSum', neg_prob_name,\n sum_neg_prob_name, name=scope.get_unique_operator_name('ReduceSum'), axes=[0])\n container.add_node('MatMul', [input_name, neg_prob_name],\n inp_neg_prob_prod_name, name=scope.get_unique_operator_name('MatMul'))\n # Cast is required here as Sub op doesn't work with Float64\n container.add_node('Cast', inp_neg_prob_prod_name, \n cast_result_name, to=onnx_proto.TensorProto.FLOAT, op_version=7)\n apply_sub(scope, [sum_neg_prob_name, cast_result_name], difference_matrix_name, container, broadcast=1)\n container.add_node('Sum', [difference_matrix_name, class_log_prior_name],\n sum_result_name, name=scope.get_unique_operator_name('Sum'))\n\n container.add_node('ArgMax', sum_result_name,\n argmax_output_name, name=scope.get_unique_operator_name('ArgMax'), axis=1)\n\n # Following four statements are for predicting probabilities\n container.add_node('ReduceLogSumExp', sum_result_name,\n reduce_log_sum_exp_result_name, name=scope.get_unique_operator_name('ReduceLogSumExp'),\n axes=[1], keepdims=0)\n apply_sub(scope, [sum_result_name, reduce_log_sum_exp_result_name], log_prob_name, container, broadcast=1)\n apply_exp(scope, log_prob_name, prob_tensor_name, container)\n container.add_node('ZipMap', prob_tensor_name, operator.outputs[1].full_name,\n op_domain='ai.onnx.ml', **zipmap_attrs)\n\n container.add_node('ArrayFeatureExtractor', [classes_name, argmax_output_name],\n array_feature_extractor_result_name, name=scope.get_unique_operator_name('ArrayFeatureExtractor'), op_domain='ai.onnx.ml')\n # Reshape op does not seem to handle INT64 tensor even though it is listed as one of the\n # supported types in the doc, so Cast was required here.\n if class_type == onnx_proto.TensorProto.INT32: # int labels\n container.add_node('Cast', array_feature_extractor_result_name, \n cast2_result_name, to=onnx_proto.TensorProto.FLOAT, op_version=7)\n apply_reshape(scope, cast2_result_name, reshaped_result_name, container, desired_shape=output_shape)\n container.add_node('Cast', reshaped_result_name, \n operator.outputs[0].full_name, to=onnx_proto.TensorProto.INT64, op_version=7)\n else: # string labels\n apply_reshape(scope, array_feature_extractor_result_name, operator.outputs[0].full_name, container,\n desired_shape=output_shape)\n\nregister_converter('SklearnMultinomialNB', convert_sklearn_naive_bayes)\nregister_converter('SklearnBernoulliNB', convert_sklearn_naive_bayes)\n","sub_path":"onnxmltools/convert/sklearn/operator_converters/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":14848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"382506684","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport argparse\nimport os\nimport pickle\nimport numpy as np\nimport scipy.io as sio\nimport torch\nimport torch.optim as optim\nimport scipy.sparse as sp\nimport torch.nn as nn\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.model_selection import KFold\nfrom sklearn import svm\nfrom sklearn.metrics import f1_score\nfrom optimizer import loss_function_entropysample\n\nfrom utils import normalize, load_citationANEmatWeight, load_citationANEWeight, sparse_mx_to_torch_sparse_tensor, EdgeSampler\nfrom gcl.model import CONN\nfrom preprocessing import mask_test_edges_gclWeight\n\n# Training settings\nparser = argparse.ArgumentParser()\nparser.add_argument('--cuda', type=str, default='2', help='specify cuda devices')\nparser.add_argument('--dataset', type=str, default=\"cora\",\n help='Dataset to use.')\nparser.add_argument('--model_type', type=str, default=\"conn\",\n help='Dataset to use.')\nparser.add_argument('--mode', type=str, default=\"train\",\n help='Dataset to use.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=200,\n help='Number of epochs to train.')\nparser.add_argument('--activate', type=str, default=\"relu\",\n help='relu | prelu')\nparser.add_argument('--batch_size', type=int, default=1024,\n help='Number of epochs to train.')\nparser.add_argument('--lr', type=float, default=0.01,\n help='Initial learning rate.')\nparser.add_argument('--weight_decay', type=float, default=0.0,\n help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--trade_weight', type=float, default=0.2,\n help='trade_off parameters).')\nparser.add_argument('--hid1', type=int, default=512,\n help='Number of hidden units.')\nparser.add_argument('--hid2', type=int, default=64,\n help='Number of hidden units.')\nparser.add_argument('--dim', type=int, default=128,\n help='Number of hidden units.')\nparser.add_argument('--use_net', type=int, default=1,\n help='Use attribute or not')\nparser.add_argument('--nlayer', type=int, default=2,\n help='Use attribute or not')\nparser.add_argument('--use_cpu', type=int, default=0,\n help='Use attribute or not')\nparser.add_argument('--loss_type', type=str, default=\"entropy\",\n help='entropy | BPR')\nparser.add_argument('--patience', type=int, default=50,\n help='Use attribute or not')\nparser.add_argument('--dropout', type=float, default=0.6,\n help='Dropout rate (1 - keep probability).')\nparser.add_argument('--drop', type=int, default=1,\n help='Indicate whether drop out or not')\n\n\ndef get_roc_score(net, adj, dataloader_val, device):\n net.eval()\n preds = []\n preds_neg = []\n while True:\n try:\n pos_edge, neg_edge = dataloader_val.next()\n except StopIteration:\n break\n pos_src_, pos_dst_ = zip(*pos_edge)\n neg_src_, neg_dst_ = zip(*neg_edge)\n pos_src = torch.LongTensor(pos_src_).to(device)\n pos_dst = torch.LongTensor(pos_dst_).to(device)\n neg_src = torch.LongTensor(neg_src_).to(device)\n neg_dst = torch.LongTensor(neg_dst_).to(device)\n src_emb, dst_emb, src_neg_emb, dst_neg_emb = net(adj, pos_src, pos_dst, neg_src, neg_dst)\n\n pos_logit, neg_logit = net.pred_logits(src_emb, dst_emb,\n src_neg_emb, dst_neg_emb)\n pos_logit = torch.sigmoid(pos_logit)\n neg_logit = torch.sigmoid(neg_logit)\n pos_logit = pos_logit.data.cpu().numpy().reshape(-1)\n neg_logit = neg_logit.data.cpu().numpy().reshape(-1)\n preds.extend(pos_logit.tolist())\n preds_neg.extend(neg_logit.tolist())\n\n preds_all = np.hstack([preds, preds_neg])\n labels_all = np.hstack([np.ones(len(preds)).tolist(), np.zeros(len(preds_neg)).tolist()])\n roc_score = roc_auc_score(labels_all, preds_all)\n ap_score = average_precision_score(labels_all, preds_all)\n\n return roc_score, ap_score\n\n\ndef output_nodeemb(adj, net, num_node, device):\n net.eval()\n batch_size = 64\n n = int(num_node / batch_size)\n if n * batch_size < num_node:\n n = n + 1\n index_all = list(range(num_node))\n start = 0\n x1 = []\n for i in range(n):\n if i == n-1:\n index_ = np.array(index_all[start:])\n else:\n index_ = np.array(index_all[start:start+batch_size])\n\n node_index = torch.LongTensor(index_).to(device)\n node_emb = net.get_emb(node_index, adj)\n x1.append(node_emb.data.cpu().numpy())\n start += batch_size\n x1 = np.concatenate(x1, axis=0)\n return x1\n\n\ndef train(features, adj, dataloader, dataloader_val, save_path, device, args, pos_weight, norm):\n num_node = features.shape[0]\n num_attri = features.shape[1]\n model = CONN(nfeat=args.dim,\n nnode=num_node,\n nattri=num_attri,\n nlayer=args.nlayer,\n dropout=args.dropout,\n drop=args.drop,\n hid1=args.hid1,\n hid2=args.hid2,\n act=args.activate)\n optimizer = optim.Adam(model.parameters(),\n lr=args.lr, weight_decay=args.weight_decay)\n b_xent = nn.BCEWithLogitsLoss()\n\n model.to(device)\n adj = adj.to(device)\n max_auc = 0.0\n max_ap = 0.0\n best_epoch = 0\n cnt_wait = 0\n for epoch in range(args.epochs):\n steps = 0\n epoch_loss = 0.0\n model.train()\n while True:\n try:\n pos_edge, neg_edge = dataloader.next()\n except StopIteration:\n break\n pos_src_, pos_dst_ = zip(*pos_edge)\n neg_src_, neg_dst_ = zip(*neg_edge)\n pos_src = torch.LongTensor(pos_src_).to(device)\n pos_dst = torch.LongTensor(pos_dst_).to(device)\n neg_src = torch.LongTensor(neg_src_).to(device)\n neg_dst = torch.LongTensor(neg_dst_).to(device)\n src_emb, dst_emb, src_neg_emb, dst_neg_emb = model(adj, pos_src, pos_dst, neg_src, neg_dst)\n\n pos_logit, neg_logit = model.pred_logits(src_emb, dst_emb,\n src_neg_emb, dst_neg_emb)\n loss_train = loss_function_entropysample(pos_logit, neg_logit, b_xent, loss_type=args.loss_type)\n optimizer.zero_grad()\n loss_train.backward()\n optimizer.step()\n\n epoch_loss += loss_train.item()\n print('--> Epoch %d Step %5d loss: %.3f' % (epoch + 1, steps + 1, loss_train.item()))\n steps += 1\n\n auc_, ap_ = get_roc_score(model, adj, dataloader_val, device)\n if auc_ > max_auc:\n max_auc = auc_\n max_ap = ap_\n best_epoch = epoch\n cnt_wait = 0\n torch.save(model.state_dict(), save_path)\n else:\n cnt_wait += 1\n\n print('Epoch %d / %d' % (epoch, args.epochs),\n 'current_best_epoch: %d' % best_epoch,\n 'train_loss: %.4f' % (epoch_loss / steps),\n 'valid_acu: %.4f' % auc_,\n 'valid_ap: %.4f' % ap_)\n\n if cnt_wait == args.patience:\n print('Early stopping!')\n break\n\n print('!!! Training finished',\n 'best_epoch: %d' % best_epoch,\n 'best_auc: %.4f' % max_auc,\n 'best_ap: %.4f' % max_ap)\n\n emb_result = []\n model.load_state_dict(torch.load(save_path))\n emb = output_nodeemb(adj, model, num_node, device)\n return emb\n\n\ndef train_save(features, adj, dataloader, dataloader_val, save_path, device, args, pos_weight, norm):\n num_node = features.shape[0]\n num_attri = features.shape[1]\n model = CONN(nfeat=args.dim,\n nnode=num_node,\n nattri=num_attri,\n nlayer=args.nlayer,\n dropout=args.dropout,\n drop=args.drop,\n hid1=args.hid1,\n hid2=args.hid2,\n act=args.activate)\n\n model.to(device)\n adj = adj.to(device)\n\n emb_result = []\n model.load_state_dict(torch.load(save_path))\n emb = output_nodeemb(adj, model, num_node, device)\n return emb\n\n\ndef print_configuration(args):\n print('--> Experiment configuration')\n for key, value in vars(args).items():\n print('{}: {}'.format(key, value))\n\ndef accuracy(preds, labels):\n correct = (preds == labels).astype(float)\n correct = correct.sum()\n return correct / len(labels)\n\ndef test_classify(feature, labels, args):\n shape = len(labels.shape)\n if shape == 2:\n labels = np.argmax(labels, axis=1)\n f1_mac = []\n f1_mic = []\n accs = []\n kf = KFold(n_splits=5, random_state=args.seed, shuffle=True)\n for train_index, test_index in kf.split(feature):\n train_X, train_y = feature[train_index], labels[train_index]\n test_X, test_y = feature[test_index], labels[test_index]\n clf = svm.SVC(kernel='rbf', decision_function_shape='ovo')\n clf.fit(train_X, train_y)\n preds = clf.predict(test_X)\n\n micro = f1_score(test_y, preds, average='micro')\n macro = f1_score(test_y, preds, average='macro')\n acc = accuracy(preds, test_y)\n f1_mac.append(macro)\n f1_mic.append(micro)\n accs.append(acc)\n f1_mic = np.array(f1_mic)\n f1_mac = np.array(f1_mac)\n accs = np.array(accs)\n f1_mic = np.mean(f1_mic)\n f1_mac = np.mean(f1_mac)\n accs = np.mean(accs)\n print('Testing based on svm: ',\n 'f1_micro=%.4f' % f1_mic,\n 'f1_macro=%.4f' % f1_mac,\n 'acc=%.4f' % accs)\n return f1_mic, f1_mac, accs\n\ndef hop1_get(adj, trade_weight, num_node):\n adj = adj.tolil()\n adj_attr = adj[:num_node, num_node:].tocsr()\n adj_net = adj[:num_node, :num_node].tocsr()\n adj_attr = normalize(adj_attr)\n adj_net = normalize(adj_net)\n n, d = adj_attr.shape\n adj_train = sp.dok_matrix((n + d, n + d), dtype=np.float32)\n adj_train = adj_train.tolil()\n weight_net = trade_weight\n weight_attr = (1 - trade_weight)\n adj_net = adj_net * weight_net\n adj_net = adj_net.tolil()\n adj_attr = adj_attr * weight_attr\n adj_attr = adj_attr.tolil()\n adj_attri_2 = sp.csr_matrix(np.eye(d, dtype=float)).tolil()\n adj_attri_2 = adj_attri_2 * weight_net\n adj_train[:n, n:] = adj_attr\n adj_train[n:, :n] = adj_attr.T\n adj_train[:n, :n] = adj_net\n adj_train[n:, n:] = adj_attri_2\n adj_train = adj_train.tocsr()\n return adj_train\n\nif __name__ == '__main__':\n args = parser.parse_args()\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.use_cpu:\n device = torch.device(\"cpu\")\n else:\n if torch.cuda.is_available():\n cuda_name = 'cuda:' + args.cuda\n device = torch.device(cuda_name)\n print('--> Use GPU %s' % args.cuda)\n torch.cuda.manual_seed(args.seed)\n else:\n device = torch.device(\"cpu\")\n print(\"--> No GPU\")\n\n print('---> Loading %s dataset...' % args.dataset)\n if args.dataset == 'BlogCatalog' or args.dataset == 'Flickr' or args.dataset == 'ACM':\n adj_ori, features, adj, labels, idx_train, idx_val, idx_test = load_citationANEmatWeight(args.dataset)\n else:\n adj_ori, features, adj, labels, idx_train, idx_val, idx_test = load_citationANEWeight(args.dataset)\n print('--->Generate train/valid links for unsupervised learning...')\n adj_orig = adj\n adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)\n adj_orig.eliminate_zeros()\n num_node = features.shape[0]\n print('---> Prepare training loader...')\n t1 = time.time()\n adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges_gclWeight(adj, features.shape[0])\n adj = adj_train\n print('---> Finish training loader with time: %d' % (time.time() - t1))\n\n idx_train = np.array(idx_train)\n idx_val = np.array(idx_val)\n idx_test = np.array(idx_test)\n\n mat_path = 'emb/' + args.dataset + '_{}'.format(args.trade_weight) + 'gnn.pickle'\n\n if os.path.isfile(mat_path):\n with open(mat_path, 'rb') as f:\n data = pickle.load(f)\n adj_norm = data['adj_norm']\n else:\n print('---> Start adj_norm')\n tt1 = time.time()\n adj_norm = hop1_get(adj, args.trade_weight, num_node)\n tt2 = time.time()\n print('---> finish adj_norm with time: {}'.format(tt2 - tt1))\n save_dict = {'adj_norm': adj_norm}\n with open(mat_path, 'wb') as pfile:\n pickle.dump(save_dict, pfile, pickle.HIGHEST_PROTOCOL)\n\n adj_norm = sparse_mx_to_torch_sparse_tensor(adj_norm).float()\n\n dataloader = EdgeSampler(train_edges[0], train_edges[1], args.batch_size)\n dataloader_val = EdgeSampler(val_edges, np.array(val_edges_false), args.batch_size, remain_delet=False)\n pos_weight = torch.Tensor([float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()])\n norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)\n\n adj_label = adj_train + sp.eye(adj_train.shape[0])\n save_path = \"./weights/%s_\" % args.model_type + args.dataset + '_%d_' % args.dim + '_%d_' % args.hid1\\\n + '_%d_' % args.hid2 + '{}'.format(args.trade_weight) + '.pth'\n print_configuration(args)\n\n if args.mode == 'train':\n print('---> Start training...')\n node_emb = train(features, adj_norm, dataloader, dataloader_val, save_path, device, args,\n pos_weight, norm)\n else:\n print('---> Start save embedding...')\n node_emb = train_save(features, adj_norm, dataloader, dataloader_val, save_path, device, args, pos_weight, norm)\n\n print('---> Start testing with shape: {}'.format(node_emb.shape))\n features = sp.csr_matrix(node_emb)\n f1_mic_svm, f1_mac_svm, acc_svm = test_classify(features.toarray(), labels, args)\n\n print('!!! SVM classification results: '\n 'f1_svm_mic: %.4f' % f1_mic_svm,\n 'f1_svm_mac: %.4f' % f1_mac_svm,\n 'f1_svm_acc: %.4f' % acc_svm,\n )\nprint('---> Finish!!!!')","sub_path":"main_classify.py","file_name":"main_classify.py","file_ext":"py","file_size_in_byte":14584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"295676021","text":"from networkx import all_shortest_paths\nfrom Class_wrDCJ_Node import Node\n\nfrom Class_extremities_and_adjacencies import Extremities_and_adjacencies\nfrom Class_Network_wrDCJ import Network\n\nimport time\n\nt0 = time.time()\n\n#genomeA = [[1, 2, 3, 5, 6, 4, 7, -8, 9]]\n#genomeB = [[1, 2,3 ,4,5,6,7, 8, 9]]\n#genomeA = [[1,4, 5, 2, 3,-6,7]]\n#genomeB = [[1, 2,3 ,4], [5, 6, 7]]\n#genomeA = [[1,-3,-2, 4, 5,6,9,7], [8, 10],[ 11, 12]]\n#genomeB = [[1, 2,3 ,4 , 5, 6, 7], [8, 9, 10,11, 12]]\n#genomeA = [[1, 6, 7, 4, 5, 2, 3, -8, 9]]\n#genomeB = [[1, 2,3 ,4,5,6,7, 8, 9]]\n#genomeA = [[1,5,6,7,2,3,4,8]]\n#genomeB = [[1,2,3,4,5,6,7,8]]\n\n#genomeA = [[1,-4,-3, -2,5,6,11], [-7,-10,8,9]]\n#genomeB = [[1,2,3,4], [5,6,7,8,9], [10,11]]\n\n#genomeA = [[1,-4, -3, 6,7, -2, 5, 8, -9, 10]]\n#genomeA = [[1, -4, 6, -3, -2, 5, 7]]\n#genomeA = [[1,-3,-2,4,5,6,7]]\n#genomeB = [[1,2,3,4,5,6,7,8, 9, 10]]\n\n#genomeA =[[-7, 16, -6], [-12, -11, -10, -9, -8, 1, 2, -3, 4, 5, -13], [-17, -15, -14]]\n\n\n#genomeB = [[1, 2,3,4, 5,6,7],[8,9, 10, 11, 12], [13, 14, 15,16, 17]]\n\n\ngenomeA =[[1, 8, -10, -9, -5, -4, 11, 12, -15, -14, -3], [-7, -6, -13], [2]]\ngenomeB = [[1, 2,3,4, 5,6,7],[8,9, 10, 11, 12], [13, 14, 15]]\n#from genes to adjacencies\nget_adjacencies = Extremities_and_adjacencies()\nadjacencies_genomeA = get_adjacencies.adjacencies_ordered_and_sorted(genomeA)\nadjacencies_genomeB = get_adjacencies.adjacencies_ordered_and_sorted(genomeB)\n\nprint('Adjacencies of the genomes: ')\nprint('Genome A: ', adjacencies_genomeA)\nprint('Genome B: ', adjacencies_genomeB)\nprint('____________________________________')\nprint()\nprint()\n\n\n\n\n#Create start and target node\nstart_node = Node(adjacencies_genomeA)\ntarget_node = Node(adjacencies_genomeB)\n\n#Construct entire network\nconstruct_network = Network(start_node, target_node, adjacencies_genomeB)\n\n\n\n\nnetwork = construct_network.build_network()\n\n#graph = GraphTheory(network)\n\n#plot the entire network in hierarchical structure (saved as 'hierarchical_network_plot.png')\n#graph.plot_network(start_node)\n\n#prints out metrics\n#metrics_on_degree_sequence= graph.metrics_on_degree_sequence()\n\n#calcute different centrality measures\n#centrality_measures = graph.centrality_algorithms()\n#pagerank = centrality_measures[0]\n##c_degree = centrality_measures[1]\n#c_closeness = centrality_measures[2]\n#c_betweenness = centrality_measures[3]\n\n#plot the 4 different centrality measure on one graph (saved as 'centrality_measures_plot.png')\n#graph.plot_centrality_measures(start_node, pagerank, c_degree, c_closeness, c_betweenness)\n\n\n\n#paths = list(construct_network.get_all_shortest_paths(network, start_node, target_node))\n'''\nprint('number of paths: ', len(paths))\nfor path in paths:\n print(path)\n for element in path:\n\n print('children: ', element.children)\n print('wieghts: ', element.children_weights)\n print()\n print(element.state)\n for child in element.children:\n print(' ', child.state)\n\n #print(element.children_weights)\n print()\n'''\nnew_shortest_paths = (list(all_shortest_paths(network, start_node, target_node, weight='weight')))\n#rDCJ_shortest_paths = (list(nx.all_shortest_paths(network, start_node, target_node)))\nprint(len(new_shortest_paths))\n#print(len(rDCJ_shortest_paths))\n\n\n\n\n''''\n\nprint()\ni=0\nfor path in rDCJ_shortest_paths:\n i=i+1\n print('PATH ', i)\n for element in path:\n adj = element.state\n print(get_adjacencies.adjacencies_to_genome(adj))\n print()\nprint('***************')\nprint()\n\ni=0\nfor path in new_shortest_paths:\n i=i+1\n print('PATH ', i)\n for element in path:\n adj = element.state\n print(get_adjacencies.adjacencies_to_genome(adj))\n print()\n\n'''\nj = 1\ntot_b_trl = 0\ntot_u_trl = 0\ntot_inv = 0\ntot_trp1 = 0\ntot_trp2 = 0\ntot_fus = 0\ntot_fis = 0\nfor path in new_shortest_paths:\n print()\n i = 0\n b_trl = 0\n u_trl = 0\n inv = 0\n trp1 = 0\n trp2 = 0\n fus = 0\n fis = 0\n while i < len(path):\n\n current = path[i]\n if i == 0:\n #print(get_adjacencies.adjacencies_to_genome(current.state))\n pass\n else:\n x = path[i-1].children.index(current)\n operation_type = path[i-1].children_operations[x][1]\n if operation_type == 'b_trl':\n b_trl+=1\n elif operation_type == 'u_trl':\n u_trl+=1\n elif operation_type == 'inv':\n inv+=1\n elif operation_type == 'trp1':\n trp1+=1\n elif operation_type == 'trp2':\n trp2+=1\n elif operation_type == 'fus':\n fus+=1\n elif operation_type =='fis':\n fis+=1\n #print(operation_type)\n\n\n #print(get_adjacencies.adjacencies_to_genome(current.state))\n\n i+=1\n print('Path ', j )\n print('inv: ', inv, ' trp1: ', trp1, ' trp2: ', trp2, ' b_trl: ', b_trl, ' u_trl: ', u_trl, ' fus: ', fus,\n ' fis: ', fis)\n tot_b_trl += b_trl\n tot_u_trl += u_trl\n tot_inv += inv\n tot_trp1 += trp1\n tot_trp2 += trp2\n tot_fus += fus\n tot_fis += fis\n j+=1\n\nprint('Totals')\nprint('inv: ', tot_inv, ' trp1: ', tot_trp1, ' trp2: ', tot_trp2, ' b_trl: ', tot_b_trl, ' u_trl: ', tot_u_trl, ' fus: ', tot_fus,\n ' fis: ', tot_fis)\n\nt1 = time.time()\nprint('time: ', t1-t0)\n","sub_path":"DC_graph_theory_wrDCJ.py","file_name":"DC_graph_theory_wrDCJ.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"142732229","text":"import numpy as np\nimport cv2\n\nimg = cv2.imread('./inputs/usb.jpg', cv2.IMREAD_COLOR)\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ntemplate = cv2.imread('./inputs/usb_port.jpg', cv2.IMREAD_GRAYSCALE)\nw, h = template.shape[::-1]\n\nres = cv2.matchTemplate(img_gray, template,cv2.TM_CCOEFF_NORMED)\nthreshold = 0.7 # 70%\n'''\nlowering threshold increases number of template matching...but also increases number of false matchings\n'''\nloc = np.where(res >= threshold)\n\nfor pt in zip(*loc[::-1]):\n cv2.rectangle(img, pt, (pt[0]+w,pt[1]+h), color=(0,255,255), thickness=1)\n\ncv2.imshow('img',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"template_mateching.py","file_name":"template_mateching.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"10960071","text":"import MySQLdb\nimport random\nimport names\n\ndb = MySQLdb.connect('localhost', 'root', 'jj', 'mydb')\nc = db.cursor()\n\n\ndef add_Team(team_name):\n #sql_ADD_TEAM = \"\"\" INSERT INTO Teams(Team_name) VALUES (%s); \"\"\", (NAMES_T,)\n NAMES_T = []\n NAMES_T.append(str(team_name))\n print(NAMES_T)\n c.execute(\"\"\" INSERT INTO Teams(Team_name) VALUES (%s); \"\"\", (NAMES_T,))\n print(\"Druzyna zostala dodana {}\".format(team_name))\n db.commit()\n\ndef add_Player(player_name, player_surname, player_email, player_team):\n # dodaje zawoddnika o podanym Name, surname, email i id_team\n ID_ZAW = [1]\n NAME_ZAW = []\n SURNAME_ZAW = []\n EMAIL_ZAW = []\n ID_TEAM = []\n\n NAME_ZAW.append(player_name)\n SURNAME_ZAW.append(player_surname)\n EMAIL_ZAW.append(player_email)\n ID_TEAM.append(player_team)\n\n c.execute(\"\"\"INSERT INTO Zawodnicy VALUES (%s, %s, %s, %s, %s);\"\"\",(ID_ZAW, NAME_ZAW, SURNAME_ZAW, EMAIL_ZAW, ID_TEAM))\n print('Zawdnika zostal dodany')\n db.commit()\n\ndef czyszczenie():\n c.execute(\"\"\" DELETE FROM Zawodnicy ; \"\"\")\n c.execute(\"\"\"DELETE FROM Teams ; \"\"\")\n db.commit()\n print('wyczyszczono')\n\ndef zerowanie():\n c.execute(\"\"\"ALTER TABLE Teams AUTO_INCREMENT=0 ;\"\"\")\n c.execute(\"\"\"ALTER TABLE Zawodnicy AUTO_INCREMENT=0 ;\"\"\")\n print(\"wyzerowano\")\n\n\nczyszczenie()\nzerowanie()\nadd_Team('HESBASKETBALL')\nadd_Team('W5')\n#add_Team()\nadd_Player('Darek', 'Awesome', 'daro@gmial.com', 1)\n","sub_path":"Programowanie/ZPI/funkcje_baza.py","file_name":"funkcje_baza.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"336395922","text":"# -*- Coding: utf-8 -*-\n\n\nimport sys, os\nimport numpy as np\nimport pyqtgraph as pg\nimport re\nimport spc\nimport pickle\nfrom spc.spc import subFile\nfrom scipy.optimize import minimize_scalar\nfrom scipy.stats import t # method を使用する際に必要\n\nimport copy\nimport glob\n\nfrom PIL import Image\nimport struct\nimport itertools\nimport functools\nimport datetime\n\nfrom PyQt5.QtGui import (\n QColorDialog, \n QColor, \n QFont, \n )\nfrom PyQt5.QtWidgets import (\n QLabel, \n QDialog, \n QVBoxLayout, \n QHBoxLayout, \n QGridLayout, \n QDoubleSpinBox, \n QPushButton, \n QWidget, \n QTabWidget, \n QLineEdit, \n QFormLayout, \n QSpacerItem, \n QFileDialog, \n )\n\n# デフォルト値\nver = \"0.5.4\"\nprint(\"version: %s\"%ver)\n\ndefault_last_opened_dir = os.path.expanduser('~') + '/Desktop'\ndefault_method_dir = os.path.expanduser('~') + '/Desktop'\ndefault_plugin_dir = os.path.expanduser('~') + '/Desktop'\n\nbase_path = \"\"\nicon_path = \"\"\nsettings_path = \"\"\n\ndef set_base_path(base_path_):\n global base_path\n global icon_path\n global settings_path\n base_path = base_path_\n icon_path = os.path.join(base_path, \"icons\")\n settings_path = os.path.join(base_path, \"settings\")\n\nCRR_half_window1 = 10\nCRR_half_window2 = 1\nCRR_SN = 10\nCRR_percentile = 80\n# CRR_ones = np.ones(CRR_window, dtype=float)\n\n# windows/mac\n# if os.name == 'nt': # windows\n# meta_key = \"Alt\"\n# elif os.name == 'posix': # mac or linux\n# meta_key = \"Meta\"\n\n\n# スペクトルのフォント:o: original, a: added, u: unmixed\nselected_set = \"set3\"\ndefault_keys = [\"bg_brush\", \"graph_line\", \"o_color\", \"a_color\", \"u_color\"]\nif selected_set == \"set1\":\n bg_brush = (0,0,0,255)\n graph_line = (150,150,150,255)\n o_color = \"d\"\n a_color = \"r\"\n u_color = \"y\"\n t_color = (255,0,0,255)\nelif selected_set == \"set2\":\n bg_brush = (0,0,0,255)\n graph_line = (150,150,150,255)\n o_color = (150,150,150,255)\n a_color = (255,0,0,255)\n u_color = (255,255,0,255)\n t_color = (255,0,0,255)\nelif selected_set == \"set3\":\n bg_brush = (255,255,255,255)\n graph_line = (0,0,0,255)\n o_color = (0,0,0,255)\n a_color = (0,0,255,255)\n u_color = (255,0,0,255)\n t_color = (255,0,0,255)\nelif selected_set == \"set4\":\n bg_brush = (255,255,255,255)\n graph_line = (0,0,0,255)\n o_color = \"d\"\n a_color = \"f\"\n u_color = \"y\"\n t_color = (255,0,0,255)\ncrr_color = \"r\"\n\nsettings = {\n \"last opened dir\": default_last_opened_dir, \n \"method dir\": default_method_dir, \n \"plugin dir\": default_plugin_dir, \n # グラフ背景など\n \"bg_brush\":bg_brush, \n \"graph_line\":graph_line, \n # ライン色\n \"o_color\":o_color, \n \"a_color\":a_color, \n \"u_color\":u_color,\n \"t_color\":t_color,\n }\n\n# 設定ファイルの読み込み(なければ作る:初回のみ?)\ndef load_settings_file():\n # 設定ファイルがない場合は新規作成 & 保存(最初に開かれたときのみ)\n if not os.path.exists(settings_path):\n # settingsファイルを新規作成\n with open(settings_path, mode='wb') as f:\n pickle.dump(settings, f)\n # 設定ファイルの読み込み\n with open(settings_path, mode='rb') as f:\n loaded_settings = pickle.load(f)\n for key, value in loaded_settings.items():\n settings[key] = value\n # 存在しないパスは、デスクトップへのパスに変更する。\n if not os.path.exists(settings[\"last opened dir\"]):\n settings[\"last opened dir\"] = default_last_opened_dir\n if not os.path.exists(settings[\"method dir\"]):\n settings[\"method dir\"] = default_method_dir\n if not os.path.exists(settings[\"plugin dir\"]):\n settings[\"plugin dir\"] = default_plugin_dir\n # background, foreground 設定\n pg.setConfigOption(\"background\", settings[\"bg_brush\"])\n pg.setConfigOption(\"foreground\", settings[\"graph_line\"])\ndef save_settings_file():\n with open(settings_path, mode='wb') as f:\n pickle.dump(settings, f)\n\n# original, added, unmixed\ndef mk_o_pen():\n return pg.mkPen(settings[\"o_color\"], width=1)\ndef mk_a_pen():\n return pg.mkPen(settings[\"a_color\"], width=1)\ndef mk_u_pen():\n return pg.mkPen(settings[\"u_color\"], width=1)\ndef mk_bg_pen(): # unmixed の condition に統一中\n return pg.mkPen(settings[\"u_color\"], width=1)\n# def mk_t_pen(): # unmixed の condition に統一中\n# return pg.mkPen(settings[\"u_color\"], width=1)\ndef mk_crr_pen():\n return pg.mkPen(crr_color, width=1)\n\n# クロスヘア\nc_color = (255,0,0,100)\nc_width = 1\ndef mk_c_pen():\n return pg.mkPen(c_color, width=c_width)\n\n# 色 default_background_color\ndbg_color = \"white\"\nbrushAlpha = 50\ndef mk_u_brush():\n color = QColor(*settings[\"u_color\"])\n color.setAlpha(brushAlpha)\n return pg.mkBrush(color)\ntargetAlpha = 150\nt_width = 0.5\ndef mk_target_color():\n color = QColor(*settings[\"t_color\"])\n color.setAlpha(targetAlpha)\n return pg.mkPen(color=color, width=t_width)\n\n # サイズ\nicon_width = 30\naxis_width = 60\ndcm = 1 # デフォルトcontentsMargins\ndsp = 1 # デフォルトspacing\nspacer_size = icon_width / 4\ngrad_rect_size = 8 # グラジエントの太さ\nhistogram_height = icon_width * 2 / 3\nmainwindow_height = icon_width + 4 * dcm\nmainwindow_width = 400\nbatch_window_min_height = 500\nbatch_window_min_width = 700\nprocess_widget_height = 40\nmap_widget_margin = 7\n\n# フォント ###############################\nmono_small = QFont(\"Courier\")\nmono_small.setPointSize(9)\nboldFont=QFont()\nboldFont.setBold(True)\ndef just_small(size):\n just_small = QFont()\n just_small.setPointSize(size)\n return just_small\n# デフォ記入地\nvalue_settings_popups_init = 2950\n\n# monospaced = QFont(\"Courier\")\n# monoBold = QFont(\"Courier\")\n# monoBold.setBold(True)\n# monoSmall = QFont(\"Courier\")\n# monoSmall.setPointSize(10)\n# monoBigBold = QFont(\"Courier\")\n# monoBigBold.setBold(True)\n# monoBigBold.setPointSize(20)\n# monoSmallBold = QFont(\"Courier\")\n# monoSmallBold.setBold(True)\n# monoSmallBold.setPointSize(10)\n# timesSmall = QFont(\"Times\")\n# timesSmall.setPointSize(10)\n# helvetica = QFont(\"0\")\n\n# フォント付きラベル\nclass QRichLabel(QLabel):\n def __init__(self, text, font=None):\n super().__init__(text)\n if font:\n self.setFont(font)\n###########################################\n\n\n# バイナリファイルへの書き込み用:上書きでなく、挿入の形で書き込み\ndef insert(f, insert_re, offset, from_what): # 0:ファイル頭、1:現在の位置、2:ファイルお尻\n # from_whatからoffsetだけ移動した後、そこよりあとの部分を読み込み、\n f.seek(offset, from_what)\n latter_part = f.read()\n # 読み込んだ時点で最後まで位置が移動してしまっているので元の位置に戻り、\"insert_re, 前もって読んでおいたlatter_part\"の順で上書き\n f.seek(offset, from_what)\n f.write(insert_re)\n f.write(latter_part)\n\n# バイナリファイルへの書き込み用:指定された正規表現の部分を削除\ndef remove_between(f, remove_re, flags=0):\n f.seek(0, 0)\n matchedObject_list = list(re.finditer(remove_re, f.read(), flags=flags))\n if len(matchedObject_list) > 1:\n raise Exception(\"Cannot change the map_size: multiple [map_size] sequence was found in the file.\")\n matchedObject = matchedObject_list[0]\n s_point, e_point = matchedObject.span()\n # remove_reでマッチした部分より後半の文字列: あとで付け足す用\n f.seek(e_point, 0)\n later_letter = f.read()\n # remove_reでマッチした部分を含め、それよりあとのものを削除\n f.seek(s_point, 0)\n f.truncate()\n # 追加\n f.write(later_letter)\n return matchedObject\n\n# ファイルパスから、ファイル名とフォルダ名、拡張子抜きのファイル名を取得\ndef file_name_processor(file_path):\n splitted_file_path = file_path.split(\"/\")\n file_name = splitted_file_path[-1]\n file_name_wo_ext = \".\".join(file_name.split(\".\")[:-1])\n dir_path = \"/\".join(splitted_file_path[:-1])\n return dir_path, file_name, file_name_wo_ext\n\n# y軸を左右に作る時:vb1にvb2を追加する。その時の、vbサイズを同一にする用\ndef updateViews(vb1, vb2):\n vb2.setGeometry(vb1.sceneBoundingRect())\n vb2.linkedViewChanged(vb1, vb2.XAxis)\n\n# pillowを用いた16bit画像保存\ndef save_u16_to_tiff(u16in):#, size, tiff_filename):\n \"\"\"\n Since Pillow has poor support for 16-bit TIFF, I made my own\n save function to properly save a 16-bit TIFF.\n \"\"\" # write 16-bit TIFF image\n # PIL interprets mode 'I;16' as \"uint16, little-endian\"\n w, h = u16in.shape\n return Image.frombytes(\"I;16\", (h, w), u16in.tostring())\n\n # u16in = u16in.astype(int)\n # img_out = Image.new('I;16', u16in.shape)\n # NUMPY 持ってる場合 # make sure u16in little-endian, output bytes\n # outpil = u16in.astype(u16in.dtype.newbyteorder(\"<\")).tobytes()\n # NUMPY 持ってない場合:何故かエラー出る # little-endian u16 format\n # outpil = struct.pack(\"<%dH\"%(len(u16in)), *u16in)\n # img_out.frombytes(outpil)\n # return(img_out)\n # img_out.save(tiff_filename)\n\n# 素因数を求める(mapサイズを求める用)\ndef get_prime_factors(n):\n i = 2\n prime_factor_list = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n prime_factor_list.append(i)\n if n > 1:\n prime_factor_list.append(n)\n return(np.array(prime_factor_list))\n\n# 2つの積に分解する\ndef into_2_products(n):\n # まずは素因数分解\n prime_factor_list = get_prime_factors(n)\n # 使用不使用のリスト\n unique_factors, N_list = np.unique(prime_factor_list, return_counts=True)\n comb_list = np.array(list(itertools.product(*[np.arange(N+1) for N in N_list])))\n product1_list = np.power(unique_factors[np.newaxis, :], comb_list).prod(axis=1)\n product1_list.sort()\n product2_list = (n / product1_list).astype(int)\n return(product1_list, product2_list)\n\n# \"\"\"\n# get x to minimize ||Ax - b||\n# 片側 5% 検定\n# \"\"\"\n# def ls_stat(A, b):\n# N_data, N_var = A.shape\n# d_freedom = N_data - N_var\n# result = ll(A, b)\n# Var_x = np.linalg.inv(np.dot(A.T, A)) * np.dot(result.fun.T, result.fun) / d_freedom\n# SE = np.sqrt(np.diag(Var_x))\n# t_vals = np.divide(result.x, SE)\n# p = t.cdf(-np.absolute(t_vals), df=d_freedom)\n# # btm = result.x - t.ppf(0.975, df=d_freedom) * SE\n# # top = result.x + t.ppf(0.975, df=d_freedom) * SE\n# btm95 = result.x - t.ppf(0.95, df=d_freedom) * SE\n# return result.x, SE, t_vals, p, btm95\n\n\n# 与えられた範囲XにおけるYの最大値・最小値を求める\ndef get_local_maximum(x_list, y_list, x_range):\n local_area = (x_range[0] <= x_list) & (x_list <= x_range[1])\n return(y_list[local_area].max())\ndef get_local_minimum(x_list, y_list, x_range):\n local_area = (x_range[0] <= x_list) & (x_list <= x_range[1])\n return(y_list[local_area].min())\ndef get_local_minmax(x_list, y_list, x_range):\n local_area = (x_range[0] <= x_list) & (x_list <= x_range[1])\n local_y = y_list[local_area]\n return(local_y.min(), local_y.max())\ndef spectrum_linear_subtraction_core(master_x_list, master_y_list, added_regional_y_list):\n # 引き算したあとの関数を直線近似した際の、二乗誤差を求める関数\n def rnorm(n):\n diff_y_list = master_y_list - n * added_regional_y_list\n # 直線近似(a:傾き、b:切片)\n params, residuals, rank, s = np.linalg.lstsq(np.vstack([master_x_list, np.ones(len(master_x_list))]).T, diff_y_list, rcond=-1)\n return residuals[0]\n optimization_results = minimize_scalar(rnorm, bounds=(0, np.inf))\n umx_height_value = -optimization_results.x # [係数リスト(-optimization_results.x = -n), オリジナル]\n return umx_height_value\n\n###########################\n\n# カイザーのidxを求める(インスタンスメソッド追加の形で使う)\ndef get_idx(self, RS):\n RS_idx = np.argmin(np.absolute(self.x - RS))\n return RS_idx\n# 範囲のデータを取り出す:ソートはされない\ndef get_data(self, sRS, eRS, sub_idx=0): #, sort=False\n # (sRS, eRS)と(sRS-idx, eRS-idx)は必ずしも一致しない(RSは降順にもなりうるがidxはあくまで昇順に並ぶ)\n sRS_idx, eRS_idx = np.sort([self.get_idx(sRS), self.get_idx(eRS)])\n y_list = self.sub[sub_idx].y[sRS_idx:eRS_idx + 1]\n x_list = self.x[sRS_idx:eRS_idx + 1]\n # # sortがTRUEなら、x_listで昇順に並べ替えた形で表示\n # if sort:\n # order = np.argsort(x_list)\n # x_list = x_list[order]\n # y_list = y_list[order]\n return x_list, y_list\n# # x軸方向のインターバルを求める\n# def get_RS_diff(self):\n# RS_diff = np.absolute((self.ffirst - self.flast) / self.fnpts)\n# return(RS_diff)\n\n# ポイント強度によるmap作成\ndef get_point_intensity_list(self, RS):\n RS_idx = self.get_idx(RS)\n return np.array([self.sub[sub_idx].y[RS_idx] for sub_idx in range(self.fnsub)])\ndef get_total_intensity_list(self):\n return np.array([self.sub[sub_idx].y.sum() for sub_idx in range(self.fnsub)])\n# カーブフィットのための関数\ndef gaussian_fitting_function(x, u, s, h, a, b):\n y = h * np.exp(-(((x - u) / s)**2 / 2)) + a * x + b\n return y\ndef gaussian_function(x, u, s, h):\n y = h * np.exp(-(((x - u) / s)**2 / 2))\n return y\n\n# logboxのサイズ変更(マイナスもありうる)\ndef update_logsizd(file_path, flogoff=None, added_length=None):\n with open(file_path, 'rb+') as f:\n if flogoff is None:\n f.seek(248)\n flogoff = struct.unpack(\"= 0:\n # 宇宙線候補領域以外で最小二乗法したとき\n data_without_cosmic_ray_around = np.empty((0), dtype=float)\n pre_e_idx = 0\n for s_idx, e_idx in se_set:\n data_without_cosmic_ray_around = np.hstack((data_without_cosmic_ray_around, self.sub[around_idx].y[pre_e_idx:s_idx]))\n pre_e_idx = e_idx\n else:\n data_without_cosmic_ray_around = np.hstack((data_without_cosmic_ray_around, self.sub[around_idx].y[pre_e_idx:]))\n A = np.vstack([data_without_cosmic_ray_around, without_cosmic_ray_ones])\n SlopeInterceptRSQ_set[idx, :2] = np.dot(np.dot(np.linalg.inv(np.dot(A, A.T)), A), data_without_cosmic_ray_center)\n SlopeInterceptRSQ_set[idx, 2] = np.corrcoef(data_without_cosmic_ray_center, data_without_cosmic_ray_around)[0, 1]\n # 宇宙線領域の修正\n SlopeInterceptRSQ_set[:, 2] /= np.nansum(SlopeInterceptRSQ_set[:, 2])\n for idx, (s_idx, e_idx) in enumerate(se_set):\n # 修正前に、オリジナルのデータを保存\n self.log_dict[b\"cosmic_ray_locs\"][sub_idx][2].append(copy.deepcopy(self.sub[sub_idx].y[s_idx:e_idx+1]))\n # 修正\n data_for_replacement = np.zeros(e_idx-s_idx+1, dtype=float)\n for idx, around_idx in enumerate(TopBottomLeftRight_idxes):\n if not np.isnan(SlopeInterceptRSQ_set[idx, 2]):\n data_for_replacement += (self.sub[around_idx].y[s_idx:e_idx+1] * SlopeInterceptRSQ_set[idx, 0] + SlopeInterceptRSQ_set[idx, 1]) * SlopeInterceptRSQ_set[idx, 2]\n self.sub[sub_idx].y[s_idx:e_idx+1] = data_for_replacement\n\ndef clear_CRR_fm_object(self):\n # オブジェクトをオリジナルに戻す and いろいろ削除\n for sub_idx, (se_set, TopBottomLeftRight_idxes, original_data_set) in self.log_dict[b\"cosmic_ray_locs\"].items():\n for (s_idx, e_idx), original_data in zip(se_set, original_data_set):\n self.sub[sub_idx].y[s_idx:e_idx+1] = copy.deepcopy(original_data)\n del self.log_dict[b\"cosmic_ray_locs\"]\n del self.log_dict[b\"cosmic_ray_removal_params\"]\n\n# to ndArray with shape(self.fnsub, self.fnpts)\ndef toNumPy_2dArray(self):\n spc_set = np.full((self.fnsub, self.fnpts), np.nan, dtype=float)\n for sub_idx in range(self.fnsub):\n spc_set[sub_idx] = self.sub[sub_idx].y\n return spc_set\ndef fmNumPy_2dArray(self, numpy2dArray):\n for sub_idx, data in enumerate(numpy2dArray):\n self.sub[sub_idx].y = data\ndef clear_CRR_fm_binary(file_path):\n with open(file_path, 'rb+') as f:\n # CRR results 除去\n remove_re = b\"\\n\\[CRR\\]\\r\\n.+\\r\\n\\[CRR\\]\\r\\n\"\n matchedObject = remove_between(f, remove_re, flags=re.DOTALL)\n len1 = len(matchedObject.group(0))\n # CRR params 除去\n remove_re = b\"\\n\\[CRR_p\\]\\r\\n.+\\r\\n\\[CRR_p\\]\\r\\n\"\n matchedObject = remove_between(f, remove_re, flags=re.DOTALL)\n len2 = len(matchedObject.group(0))\n # logsizd を update\n update_logsizd(file_path, flogoff=None, added_length= -len1- len2)\n\nclass CustomColorButton(QPushButton):\n def __init__(self):\n super().__init__()\n self.setStyleSheet(\"QPushButton{border:1px solid black; background-color:white}\")\n self.btn_color = (255,255,255,255)\n def set_color(self, rgba_color):\n self.btn_color = rgba_color\n self.setStyleSheet(\"QPushButton{border:1px solid black; background-color:rgba%s}\"%str(rgba_color))\nclass HorizontalLayout(QHBoxLayout):\n def __init__(self, widgets, stretch=False):\n super().__init__()\n for widget in widgets:\n self.addWidget(widget)\n if stretch:\n self.addStretch(1)\n\n# settings 設定 popup ##########################\nclass SettingsPopup(QDialog):\n def __init__(self, settings):\n super().__init__()\n self.setWindowTitle(\"Preferences\")\n # ボタン\n btn_ok = QPushButton(\"OK\")\n btn_cancel = QPushButton(\"Cancel\")\n btn_ok.clicked.connect(self.btn_ok_clicked)\n btn_cancel.clicked.connect(self.btn_cancel_clicked)\n ok_cancel_layout = QHBoxLayout()\n ok_cancel_layout.addStretch(1)\n ok_cancel_layout.addWidget(btn_cancel)\n ok_cancel_layout.addWidget(btn_ok)\n # タブ\n self.temp_settings = {}\n self.settings = settings\n tab = QTabWidget()\n tab.addTab(ColorSettings(parent=self), \"color\")\n tab.addTab(PathSettings(parent=self), \"path\")\n # レイアウト\n layout = QVBoxLayout()\n layout.addWidget(tab)\n layout.addLayout(ok_cancel_layout)\n self.setLayout(layout)\n def btn_ok_clicked(self, event=None):\n for key, value in self.temp_settings.items():\n self.settings[key] = value\n # save_settings_file()\n with open(settings_path, mode='wb') as f:\n pickle.dump(self.settings, f)\n self.close()\n def btn_cancel_clicked(self, event=None):\n self.close()\nclass PathSettings(QWidget):\n def __init__(self, parent):\n super().__init__()\n self.parent = parent\n # 中身\n self.last_opened_dir = QLineEdit(self.parent.settings[\"last opened dir\"])\n self.method_dir = QLineEdit(self.parent.settings[\"method dir\"])\n self.plugin_dir = QLineEdit(self.parent.settings[\"plugin dir\"])\n btn_set_last_opened_dir = QPushButton(\"...\")\n btn_set_method_dir = QPushButton(\"...\")\n btn_set_plugin_dir = QPushButton(\"...\")\n # レイアウト\n layout = QVBoxLayout()\n layout.setSpacing(0)\n layout.addLayout(HorizontalLayout([QLabel(\"last opened file folder\")], stretch=False))\n layout.addLayout(HorizontalLayout([self.last_opened_dir, btn_set_last_opened_dir]))\n layout.addItem(QSpacerItem(0,5))\n layout.addLayout(HorizontalLayout([QLabel(\"last opened method folder\")], stretch=False))\n layout.addLayout(HorizontalLayout([self.method_dir, btn_set_method_dir]))\n layout.addItem(QSpacerItem(0,5))\n layout.addLayout(HorizontalLayout([QLabel(\"plugin directory (reboot to apply)\")], stretch=False))\n layout.addLayout(HorizontalLayout([self.plugin_dir, btn_set_plugin_dir]))\n layout.addItem(QSpacerItem(0,5))\n layout.addStretch(1)\n self.setLayout(layout)\n # イベントコネクト\n btn_set_last_opened_dir.clicked.connect(self.set_last_opened_dir)\n btn_set_method_dir.clicked.connect(self.set_method_dir)\n btn_set_plugin_dir.clicked.connect(self.set_plugin_dir)\n\n def set_last_opened_dir(self, event=None):\n dir_path = QFileDialog.getExistingDirectory(self, 'select folder', settings[\"last opened dir\"])\n settings[\"last opened dir\"] = dir_path\n def set_method_dir(self, event=None):\n dir_path = QFileDialog.getExistingDirectory(self, 'select folder', settings[\"method dir\"])\n settings[\"method dir\"] = dir_path\n def set_plugin_dir(self, event=None):\n dir_path = QFileDialog.getExistingDirectory(self, 'select folder', settings[\"plugin dir\"])\n settings[\"plugin dir\"] = dir_path\n\nclass ColorSettings(QWidget):\n def __init__(self, parent):\n super().__init__()\n self.parent = parent\n # 中身\n self.bg_brush = CustomColorButton()\n self.bg_brush.clicked.connect(functools.partial(self.get_color, btn_type=\"bg_brush\"))\n self.graph_line = CustomColorButton()\n self.graph_line.clicked.connect(functools.partial(self.get_color, btn_type=\"graph_line\"))\n self.o_color = CustomColorButton()\n self.o_color.clicked.connect(functools.partial(self.get_color, btn_type=\"o_color\"))\n self.a_color = CustomColorButton()\n self.a_color.clicked.connect(functools.partial(self.get_color, btn_type=\"a_color\"))\n self.u_color = CustomColorButton()\n self.u_color.clicked.connect(functools.partial(self.get_color, btn_type=\"u_color\"))\n self.set_icon_color()\n # ボタン\n btn_reset = QPushButton(\"Reset\")\n btn_reset.clicked.connect(self.btn_reset_clicked)\n # レイアウト\n grid_layout = QGridLayout()\n grid_layout.addWidget(QLabel(\"color\"), 0, 1)\n grid_layout.addWidget(QLabel(\"graph background\\n(reboot to apply)\"), 1, 0)\n grid_layout.addWidget(self.bg_brush, 1, 1)\n grid_layout.addWidget(QLabel(\"graph lines\\n(reboot to apply)\"), 2, 0)\n grid_layout.addWidget(self.graph_line, 2, 1)\n grid_layout.addWidget(QLabel(\"original spectrum\"), 3, 0)\n grid_layout.addWidget(self.o_color, 3, 1)\n grid_layout.addWidget(QLabel(\"added spectrum\"), 4, 0)\n grid_layout.addWidget(self.a_color, 4, 1)\n grid_layout.addWidget(QLabel(\"unmixed spectrum\"), 5, 0)\n grid_layout.addWidget(self.u_color, 5, 1)\n reset_layout = QHBoxLayout()\n reset_layout.addStretch(1)\n reset_layout.addWidget(btn_reset)\n layout = QVBoxLayout()\n layout.addLayout(grid_layout)\n layout.addLayout(reset_layout)\n self.setLayout(layout)\n def set_icon_color(self):\n for key, value in self.parent.settings.items():\n try:\n getattr(self, key).set_color(value)\n except:\n pass\n def get_color(self, event=None, btn_type=None):\n cur_btn = getattr(self, btn_type)\n color_dialog = QColorDialog(QColor(*cur_btn.btn_color))\n done = color_dialog.exec_()\n if done == 1:\n color = color_dialog.selectedColor().getRgb()\n cur_btn.setStyleSheet(\"QPushButton{border:1px solid black; background-color:rgba%s}\"%str(color))\n self.parent.temp_settings[btn_type] = color\n def btn_reset_clicked(self, event=None):\n for key in default_keys:\n value = eval(key)\n self.parent.temp_settings[key] = value\n try:\n getattr(self, key).setStyleSheet(\"QPushButton{border:1px solid black; background-color:rgba%s}\"%str(value))\n except:\n pass\n\n###########################\n\n# アンミックスメソッド。中身は基本、spcを踏襲する形で…\nclass UnmixingMethod():\n def __init__(self, procedures, *argsm, **kwargs):\n self.version = \"2.0\"\n self.spc_like_list = []\n self.file_path_list = [] # spc_like_listとindexごとに対応\n self.isBackgroundSet = False\n self.procedures = procedures # 文字列からなるリスト形式å\n self.target_range = [None, None]\n # def add_spectrum(self, x_list, y_list):\n # # subFileの作成\n # subLike = SubLike()\n # sub_like.init_fmt(self.spc_file.sub[0])\n # subLike.add_data(y_list, sub_idx=0)\n # # spcFileの作成\n # spcLike = SpcLike()\n # spcLike.x = x_list\n # spcLike.add_subLike(subLike)\n # self.spc_like_list.append(spcLike)\n # def add_spc_file(self, spc_file):\n # self.spc_file_list.append(spc_file)\n \n\n\n# スペクトル描画の際はspcファイル毎にwidgetに渡されるので、その形に似せといたほうが良い\nclass SpcLike(spc.File):\n def __init__(self):\n # main header\n self.length = None\n self.ftflg = None\n self.fversn = None\n self.fexper = None\n self.fexp = None\n self.fnpts = None\n self.ffirst = None\n self.flast = None\n self.fnsub = 0\n self.fxtype = None\n self.fytype = None\n self.fztype = None\n self.fpost = None\n self.fdate = None\n self.fres = None\n self.fsource = None\n self.fpeakpt = None\n self.fspare = None\n self.fcmnt = None\n self.fcatxt = None\n self.flogoff = 512\n self.fmods = None\n self.fprocs = None\n self.flevel = None\n self.fsampin = None\n self.ffactor = None\n self.fmethod = None\n self.fzinc = None\n self.fwplanes = None\n self.fwinc = None\n self.fwtype = None\n self.freserv = None\n #\n self.tsprec = None\n self.tcgram = None\n self.tmulti = None\n self.trandm = None\n self.tordrd = None\n self.talabs = None\n self.txyxys = None\n self.txvals = None\n self.year = None\n self.month = None\n self.day = None\n self.hour = None\n self.minute = None\n self.cmnt = None\n self.dat_multi = None\n self.dat_fmt = None\n self.x = None\n self.sub = []\n # log header\n self.logsizd = 64\n self.logsizm = 4096\n self.logtxto = 64\n self.logbins = 0\n self.logdsks = 0\n self.logspar = b\"\\x00\" * 44\n # log information\n self.log_content = None\n self.log_dict = None\n self.log_other = None\n # additional information\n self.spacing = None\n self.xlabel = None\n self.zlabel = None\n self.ylabel = None\n self.exp_type = None\n def init_fmt(self, spc_file):\n attrib_list = [\n \"ftflg\", \n \"fversn\", \n \"fexper\", \n \"fexp\", \n \"fxtype\", \n \"fytype\", \n \"fztype\", \n \"fpost\", \n \"fres\", \n \"fsource\", \n \"fpeakpt\", \n \"fspare\", \n \"fcmnt\", \n \"fcatxt\", \n \"fmods\", \n \"fprocs\", \n \"flevel\", \n \"fsampin\", \n \"ffactor\", \n \"fmethod\", \n \"fzinc\", \n \"fwplanes\", \n \"fwinc\", \n \"fwtype\", \n \"freserv\", \n #\n \"log_content\"\n ]\n for attrib in attrib_list:\n setattr(self, attrib, getattr(spc_file, attrib))\n self.fdate = datetime2decimal(datetime.datetime.now())\n def add_xData(self, xData):\n self.x = xData\n self.fnpts = len(self.x)\n self.ffirst = self.x[0]\n self.flast = self.x[-1]\n def add_subLike(self, subLike):\n fnpts = len(subLike.y)\n if self.fnpts != fnpts:\n raise Exception(\"self.fnpts and length of added spectrum does not match\")\n self.fnsub += 1\n self.sub.append(subLike)\n def save_as_spcl(self, save_path):\n with open(save_path, 'wb') as f:\n pickle.dump(self, f)\n def save_as_spc(self, save_path):\n # data block\n datablock_binary = b\"\"\n for i, sub in enumerate(self.sub):\n print(i)\n subfile_binary = sub.toBinary()\n datablock_binary += subfile_binary\n # update flogoff\n self.flogoff += len(datablock_binary)\n # log block\n logblock_binary = self.logblock2binary()\n # main header block (512)\n mainheader_binary = self.mainheader2binary()\n # connect\n spcfile_binary = mainheader_binary + datablock_binary + logblock_binary + b\"\\x00\"\n with open(save_path, 'wb') as f:\n f.write(spcfile_binary)\n def mainheader2binary(self):\n # byte 文字列にする\n self.fexper = self.fexper.to_bytes(1, byteorder=\"little\")\n self.fexp = self.fexp.to_bytes(1, byteorder=\"little\")\n self.fxtype = self.fxtype.to_bytes(1, byteorder=\"little\")\n self.fytype = self.fytype.to_bytes(1, byteorder=\"little\")\n self.fztype = self.fztype.to_bytes(1, byteorder=\"little\")\n self.fcmnt = self.fcmnt.encode('utf-8')\n attrib_list = [\n \"ftflg\", \n \"fversn\", \n \"fexper\", \n \"fexp\", \n \"fnpts\", ###\n \"ffirst\", ###\n \"flast\", ###\n \"fnsub\", ###\n \"fxtype\", \n \"fytype\", \n \"fztype\", \n \"fpost\", \n \"fdate\", ###\n \"fres\", \n \"fsource\", \n \"fpeakpt\", \n \"fspare\", \n \"fcmnt\", \n \"fcatxt\", \n \"flogoff\", ###\n \"fmods\", \n \"fprocs\", \n \"flevel\", \n \"fsampin\", \n \"ffactor\", \n \"fmethod\", \n \"fzinc\", \n \"fwplanes\", \n \"fwinc\", \n \"fwtype\", \n \"freserv\"\n ]\n head_str = \" GUI Window <========================================================= #\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import scrolledtext\nfrom tkinter import messagebox\nfrom tkinter.messagebox import askyesno\nimport tkinter as tk\n\nfrom toolz.itertoolz import join\nfrom config import DEFAULT_BUY_IN, DEFAULT_BUY_OUT, DEFAULT_PRICE, DEFAULT_SELL_IN, DEFAULT_SELL_OUT, DEFAULT_GAS_PRICE, DEFAULT_GAS_FEE, DEFAULT_TOKEN\n\nwindow = Tk()\n\nlblAccountVal = tk.StringVar()\nlblBalanceVal = tk.DoubleVar()\nlblCurPriceVal = tk.DoubleVar()\nlblSigPriceVal = tk.DoubleVar()\nlblGapPriceVal = tk.DoubleVar()\nlblGapPriceLimit = tk.StringVar()\ntxtAmountInVal = tk.DoubleVar()\nlblAmountInCurrency = tk.StringVar()\ntxtAmountOutVal = tk.DoubleVar()\nlblAmountOutCurrency = tk.StringVar()\nlblAmountEst = tk.StringVar()\ntxtGasFeeVal = tk.IntVar()\ntxtGasPriVal = tk.IntVar()\nlblGasEstVal = tk.StringVar()\ncheckAuto = tk.IntVar()\nisAutoStarted = False\nisAuto = False\n\n\n# ==============================================> Trade <===============================================#\n\nimport sys\nimport json\nimport time\nimport requests\nfrom decimal import Decimal\nfrom web3 import Web3, contract\nfrom config import CONTRACT_ADDRESS, CONTRACT_ABI, RECIPIENT_ADDRESS, PROVIDER_ADDRESS, PRIVATE_ADDRESS, TOKEN_LIST\n\nclass Trader:\n def __init__(self):\n self.w3 = Web3(Web3.HTTPProvider('https://ropsten.infura.io/v3/bb12f60463e14f6ea9257284fac7e3b7'))\n # Get the contract \n abi = json.loads(CONTRACT_ABI)\n self.traderContract = self.w3.eth.contract(address=CONTRACT_ADDRESS, abi=abi)\n abi = json.loads(TOKEN_LIST[DEFAULT_TOKEN]['abi'])\n self.tokenContract = self.w3.eth.contract(address=TOKEN_LIST[DEFAULT_TOKEN]['address'], abi=abi)\n\n def resetToken(self, token=DEFAULT_TOKEN):\n abi = json.loads(TOKEN_LIST[token]['abi'])\n self.tokenContract = self.w3.eth.contract(address=TOKEN_LIST[token]['address'], abi=abi)\n \n def setCurPrice(self, token=DEFAULT_TOKEN):\n tokenAddress = TOKEN_LIST[token]['address']\n # tokenAddress = '0x6b175474e89094c44da98b954eedeac495271d0f' # if mainnet\n response = requests.get('https://api.coingecko.com/api/v3/simple/token_price/ethereum?contract_addresses='+ tokenAddress +'&vs_currencies=usd')\n response_json = json.loads(response.content)\n if tokenAddress in response_json:\n lblCurPriceVal.set(response_json[tokenAddress]['usd'])\n else:\n lblCurPriceVal.set(DEFAULT_PRICE)\n\n\n def validateForm(self):\n amountIn = txtAmountInVal.get()\n amountOut = txtAmountOutVal.get()\n gasPrice = txtGasPriVal.get()\n gasFee = txtGasFeeVal.get()\n if amountIn == '' or amountIn <= 0:\n messagebox.showwarning(\"Warning\",\"Please input Amount In ( Value > 0 )\")\n return 0\n if amountOut == '' or amountOut <= 0:\n messagebox.showwarning(\"Warning\",\"Please input Amount Out ( Value > 0 )\")\n return 0\n if gasPrice == '' or gasPrice <= 0:\n messagebox.showwarning(\"Warning\",\"Please input Gas Price ( Value > 0 )\")\n return 0\n if gasFee == '' or gasFee <= 0:\n messagebox.showwarning(\"Warning\",\"Please input Gas Fee ( Value > 0 )\")\n return 0\n return 1\n\n def initUI(self):\n # Show Account & Balance\n console_log('Current Account: ' + str(PROVIDER_ADDRESS))\n # show current balance of connected wallet\n console_log('Current Balance: ' + str(self.w3.fromWei(self.w3.eth.get_balance(PROVIDER_ADDRESS), 'ether')) + ' ETH')\n lblBalanceVal.set(self.w3.fromWei(self.w3.eth.get_balance(PROVIDER_ADDRESS), 'ether'))\n # show the current price between ETH and Token\n console_log('ETH/'+ DEFAULT_TOKEN +' Price : ' + str(self.traderContract.functions.getEstimatedETHforToken(TOKEN_LIST[DEFAULT_TOKEN]['address'], 1).call()))\n\n # default settings\n lblAccountVal.set(PROVIDER_ADDRESS)\n lblCurPriceVal.set(DEFAULT_PRICE)\n lblSigPriceVal.set(DEFAULT_PRICE)\n lblGapPriceVal.set(0.0)\n lblGapPriceLimit.set(' ( Must be < 25% ) ')\n txtAmountInVal.set(DEFAULT_BUY_IN)\n txtAmountOutVal.set(DEFAULT_BUY_OUT)\n lblAmountInCurrency.set('(ETH)')\n lblAmountOutCurrency.set('(DAI)')\n lblAmountEst.set(' ( Must be <= Estimated Max[0.0])')\n txtGasPriVal.set(DEFAULT_GAS_PRICE)\n txtGasFeeVal.set(DEFAULT_GAS_FEE)\n lblGasEstVal.set(' ( Must be >= Estimated Fee[0] )')\n checkAuto.set(0)\n\n window.mainloop()\n\n def updateUI(self, tradeType='BUY', token='DAI', sigPrice=0, amountIn=0, amountOut=0, gasFee=0, gasPrice=0):\n # Show Account & Balance\n console_log('Current Account: ' + str(PROVIDER_ADDRESS))\n self.resetToken(token)\n if tradeType == 'BUY':\n combo_type.current(0)\n console_log('Current Balance: ' + str(self.w3.fromWei(self.w3.eth.get_balance(PROVIDER_ADDRESS), 'ether')) + ' ETH')\n console_log('ETH/'+ token +' : ' + str(self.traderContract.functions.getEstimatedETHforToken(TOKEN_LIST[token]['address'], 1).call()))\n lblBalanceVal.set(self.w3.fromWei(self.w3.eth.get_balance(PROVIDER_ADDRESS), 'ether'))\n combo_currency['values'] = ('ETH')\n combo_currency.current(0)\n combo_currency['state'] = 'disable'\n combo_currency_out['state'] = 'readonly'\n combo_currency_out['values'] = list(TOKEN_LIST.keys())\n combo_currency_out.current(list(TOKEN_LIST.keys()).index(token))\n lblAmountInCurrency.set('(ETH)')\n lblAmountOutCurrency.set('('+ token +')')\n else:\n combo_type.current(1)\n console_log('Current Balance: ' + str(self.w3.fromWei(self.tokenContract.functions.balanceOf(PROVIDER_ADDRESS).call(), 'ether')) + ' ' +token)\n console_log(token +'/ETH : ' + str(self.traderContract.functions.getEstimatedTokenforETH(TOKEN_LIST[token]['address'], 1).call()))\n lblBalanceVal.set(self.w3.fromWei(self.tokenContract.functions.balanceOf(PROVIDER_ADDRESS).call(), 'ether'))\n combo_currency['state'] = 'readonly'\n combo_currency['values'] = list(TOKEN_LIST.keys())\n combo_currency.current(list(TOKEN_LIST.keys()).index(token))\n combo_currency_out['values'] = ('ETH')\n combo_currency_out.current(0)\n combo_currency_out['state'] = 'disable'\n lblAmountInCurrency.set('('+ token +')')\n lblAmountOutCurrency.set('(ETH)')\n # update settings\n if sigPrice > 0:\n self.setCurPrice()\n lblSigPriceVal.set(sigPrice)\n lblGapPriceVal.set(abs(lblCurPriceVal.get() - lblSigPriceVal.get()) * 100 / lblCurPriceVal.get())\n if gasPrice > 0:\n txtGasPriVal.set(gasPrice)\n if gasFee > 0:\n txtGasFeeVal.set(gasFee)\n if amountIn > 0:\n txtAmountInVal.set(amountIn)\n if amountOut > 0:\n txtAmountOutVal.set(amountOut)\n\n def buy(self, amountIn, amountOut, token, gasFee, gasPrice, isAuto):\n console_log('Buy start ==========>')\n # convert amounts\n _token = self.w3.toChecksumAddress(token)\n # _token = token\n _amountIn = self.w3.toWei(Decimal(amountIn), 'ether')\n _amountOut = self.w3.toWei(Decimal(amountOut), 'ether')\n _gasPrice = self.w3.toWei(Decimal(gasPrice), 'gwei')\n # Get Estimated Amount Out\n estimatedAmount = self.traderContract.functions.getEstimatedETHforToken(_token, _amountIn).call()\n if isAuto:\n _amountOut = estimatedAmount - 100\n txtAmountOutVal.set(self.w3.fromWei(estimatedAmount, 'ether'))\n else:\n estimatedAmount = self.w3.fromWei(estimatedAmount, 'ether')\n if estimatedAmount < amountOut:\n console_log(' > Too high amount(estimated: ' + str(estimatedAmount) + ' )')\n lblAmountEst.set(' ( Must be <= Estimated Max['+ str(estimatedAmount) +'])')\n return\n nonce = self.w3.eth.get_transaction_count(PROVIDER_ADDRESS)\n transaction = self.traderContract.functions.Buy(\n _amountOut,\n _token,\n RECIPIENT_ADDRESS).buildTransaction({\n 'gas': gasFee,\n 'gasPrice': _gasPrice,\n 'from': PROVIDER_ADDRESS,\n 'value': _amountIn,\n 'nonce': nonce\n })\n estimateFee = self.w3.eth.estimateGas(transaction={'to':CONTRACT_ADDRESS, 'from':PROVIDER_ADDRESS, 'value':_amountIn, 'data':transaction[\"data\"]})\n if isAuto:\n gasFee = estimateFee + 100\n txtGasFeeVal.set(gasFee)\n else: \n if estimateFee > gasFee:\n console_log(' > Too low gas(estimated: ' + str(estimateFee) + ')')\n lblGasEstVal.set(' ( Must be >= Estimated Fee['+ str(estimateFee) +'] )')\n return\n signed_txn = self.w3.eth.account.signTransaction(transaction, private_key=PRIVATE_ADDRESS)\n self.w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n tx_hash = self.w3.toHex(self.w3.keccak(signed_txn.rawTransaction))\n console_log(' > tx: ' + tx_hash)\n status = self.w3.eth.wait_for_transaction_receipt(tx_hash)['status']\n if status == True:\n console_log(' > Success !!!')\n else:\n console_log(' > Failed ~')\n console_log('Buy finished ==========>')\n\n def sell(self, amountIn, amountOut, token, gasFee, gasPrice, isAuto):\n print('Sell start ==========>')\n # convert amounts\n _token = self.w3.toChecksumAddress(token)\n _amountIn = self.w3.toWei(Decimal(amountIn), 'ether')\n _amountOut = self.w3.toWei(Decimal(amountOut), 'ether')\n _gasPrice = self.w3.toWei(str(gasPrice), 'gwei')\n # Get Estimated Amount Out\n estimatedAmount = self.traderContract.functions.getEstimatedTokenforETH(_token, _amountIn).call()\n if isAuto:\n _amountOut = estimatedAmount - 100\n txtAmountOutVal.set(self.w3.fromWei(estimatedAmount, 'ether'))\n else:\n estimatedAmount = self.w3.fromWei(estimatedAmount, 'ether')\n if estimatedAmount < amountOut:\n console_log(' > Too high amount(estimated: ' + str(estimatedAmount) + ' )')\n lblAmountEst.set(' ( Must be <= Estimated Max['+ str(estimatedAmount) +'])')\n return\n # Approve provider to trader contract\n curAllowance = self.tokenContract.functions.allowance(PROVIDER_ADDRESS, CONTRACT_ADDRESS).call()\n if curAllowance < _amountIn:\n if _amountIn > self.approve(_amountIn, gasFee, _gasPrice, 30):\n print(' > Sorry, Insufficient allowances to trade')\n return\n else:\n print(' > Allowance: ' + str(curAllowance))\n # Swap token to ETH \n nonce = self.w3.eth.get_transaction_count(PROVIDER_ADDRESS)\n transaction = self.traderContract.functions.Sell(\n _amountIn,\n _amountOut,\n _token,\n PROVIDER_ADDRESS).buildTransaction({\n 'gas': gasFee,\n 'gasPrice': _gasPrice,\n 'from': PROVIDER_ADDRESS,\n 'nonce': nonce\n })\n estimateFee = self.w3.eth.estimateGas(transaction={'to':CONTRACT_ADDRESS, 'from':PROVIDER_ADDRESS, 'value':0, 'data':transaction[\"data\"]})\n if isAuto:\n gasFee = estimateFee + 100\n txtGasFeeVal.set(gasFee)\n else:\n if estimateFee > gasFee:\n print(' > Too low gas (estimated: ' + str(estimateFee) + ')')\n lblGasEstVal.set(' ( Must be >= Estimated Fee['+ str(estimateFee) +'] )')\n return\n signed_txn = self.w3.eth.account.signTransaction(transaction, private_key=PRIVATE_ADDRESS)\n self.w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n tx_hash = self.w3.toHex(self.w3.keccak(signed_txn.rawTransaction))\n print(' > tx: ' + str(tx_hash))\n status = self.w3.eth.wait_for_transaction_receipt(tx_hash)['status']\n if status:\n print(' > Success !!!')\n else:\n print(' > Failed ~')\n\n print('Sell finished <==========')\n\n def approve(self, amountIn, gasFee, gasPrice, delay):\n console_log(' > Approving now... ')\n nonce = self.w3.eth.get_transaction_count(PROVIDER_ADDRESS)\n transaction = self.tokenContract.functions.approve(CONTRACT_ADDRESS, amountIn).buildTransaction({\n 'gas': gasFee,\n 'gasPrice': gasPrice,\n 'from': PROVIDER_ADDRESS,\n 'nonce': nonce\n })\n signed_txn = self.w3.eth.account.signTransaction(transaction, private_key=PRIVATE_ADDRESS)\n self.w3.eth.sendRawTransaction(signed_txn.rawTransaction)\n for i in range(delay, 0, -1):\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\" (please wait for {:2d}s)\".format(i))\n sys.stdout.flush()\n time.sleep(1)\n curAllowance = self.tokenContract.functions.allowance(PROVIDER_ADDRESS, CONTRACT_ADDRESS).call()\n console_log('\\n')\n console_log(' > Current Allowance : ' + str(curAllowance))\n return curAllowance\n\n# ================================================> GUI Details <========================================================= #\n\ntrader = Trader()\n\n# Event Handler\ndef onTradeTypeChanged(e):\n trader.updateUI(combo_type.get())\ndef onCurrencyInChanged(e):\n trader.updateUI(combo_type.get(), combo_currency.get())\ndef onCurrencyOutChanged(e):\n trader.updateUI(combo_type.get(), combo_currency_out.get())\n# window\nwindow.title(\"Uniswap Trading Bot\")\nwindow.geometry('650x500')\nwindow.resizable(False, False)\n# tradig type(buy or sell)\ncombo_type = Combobox(window, width=6, state='readonly')\ncombo_type['values']= (\"BUY\", \"SELL\")\ncombo_type.current(0) #set default value as 'Buy'\ncombo_type['state'] = 'readonly'\ncombo_type.bind(\"<>\", onTradeTypeChanged)\ncombo_type.grid(column=0, row=0, padx=20, pady=20, sticky=\"W\")\n# linked account\nlbl_account_txt = Label(window, text='Account: ', width=15)\nlbl_account_txt.grid(column=1, row=0, sticky=\"E\")\nlbl_account_val = Label(window, textvariable=lblAccountVal)\nlbl_account_val.grid(column=2, row=0, sticky=\"W\", columnspan=3)\n# current balance\nlbl_balance_txt = Label(window, text='Current Balance: ')\nlbl_balance_txt.grid(column=0, row=1, padx=20, sticky=\"E\")\nlbl_balance_val = Label(window, textvariable=lblBalanceVal)\nlbl_balance_val.grid(column=1, row=1, sticky=\"E\")\ncombo_currency = Combobox(window, width=5)\ncombo_currency['values']= (\"ETH\")\ncombo_currency.current(0) #set default value as 'ETH'\ncombo_currency['state'] = 'disable'\ncombo_currency.bind(\"<>\", onCurrencyInChanged)\ncombo_currency.grid(column=2, row=1, sticky=\"E\")\nlbl_exchange_sep = Label(window, text=' Swap to =====> ')\nlbl_exchange_sep.grid(column=3, row=1)\ncombo_currency_out = Combobox(window, width=5)\ncombo_currency_out['values']= list(TOKEN_LIST.keys())\ncombo_currency_out.current(0) #set default value as 'ETH'\ncombo_currency_out['state'] = 'readonly'\ncombo_currency_out.bind(\"<>\", onCurrencyOutChanged)\ncombo_currency_out.grid(column=4, row=1, sticky=\"W\")\n# current price\nlbl_cur_price_txt = Label(window, text='Current Price: ')\nlbl_cur_price_txt.grid(column=0, row=2, padx=20, sticky=\"E\")\nlbl_cur_price_val = Label(window, textvariable=lblCurPriceVal)\nlbl_cur_price_val.grid(column=1, row=2, sticky=\"E\")\nlbl_cur_price_usd = Label(window, text='(USD)')\nlbl_cur_price_usd.grid(column=2, row=2, sticky=\"W\")\n# signal price\nlbl_sig_price_txt = Label(window, text='Signal Price: ')\nlbl_sig_price_txt.grid(column=0, row=3, padx=20, sticky=\"E\")\nlbl_sig_price_val = Label(window, textvariable=lblSigPriceVal)\nlbl_sig_price_val.grid(column=1, row=3, sticky=\"E\")\nlbl_sig_price_usd = Label(window, text='(USD)')\nlbl_sig_price_usd.grid(column=2, row=3, sticky=\"W\")\n# gap price(%)\nlbl_gap_price_txt = Label(window, text='Gap of Prices: ')\nlbl_gap_price_txt.grid(column=0, row=4, padx=20, sticky=\"E\")\nlbl_gap_price_val = Label(window, textvariable=lblGapPriceVal)\nlbl_gap_price_val.grid(column=1, row=4, sticky=\"E\")\nlbl_gap_price_usd = Label(window, text='(%)')\nlbl_gap_price_usd.grid(column=2, row=4, sticky=\"W\")\nlbl_gap_price_lim = Label(window, textvariable=lblGapPriceLimit)\nlbl_gap_price_lim.grid(column=3, row=4, sticky=\"W\")\n# amount In/Out/Estimated\nlbl_amount_in_txt = Label(window, text='Amount In: ')\nlbl_amount_in_txt.grid(column=0, row=5, padx=20, pady=10, sticky=\"E\")\ntxt_amount_in_val = Spinbox(window, from_=0, to=10, width=25, textvariable=txtAmountInVal)\ntxt_amount_in_val.grid(column=1, row=5, sticky=\"E\")\nlbl_amount_in_usd = Label(window, textvariable=lblAmountInCurrency)\nlbl_amount_in_usd.grid(column=2, row=5, sticky=\"W\")\nlbl_amount_out_txt = Label(window, text='Amount Out: ')\nlbl_amount_out_txt.grid(column=0, row=6, padx=20, sticky=\"E\")\ntxt_amount_out_val = Spinbox(window, from_=0, to=10, width=25, textvariable=txtAmountOutVal)\ntxt_amount_out_val.grid(column=1, row=6, sticky=\"E\")\nlbl_amount_out_usd = Label(window, textvariable=lblAmountOutCurrency)\nlbl_amount_out_usd.grid(column=2, row=6, sticky=\"W\")\nlbl_amount_est_txt = Label(window, textvariable=lblAmountEst)\nlbl_amount_est_txt.grid(column=3, row=6, sticky=\"W\", columnspan=2)\n# gas price\nlbl_gas_price_txt = Label(window, text='Gas Price: ')\nlbl_gas_price_txt.grid(column=0, row=7, padx=20, pady=10, sticky=\"E\")\ntxt_gas_price_val = Spinbox(window, from_=0, to=50000, width=15, textvariable=txtGasPriVal)\ntxt_gas_price_val.grid(column=1, row=7, sticky=\"E\")\nlbl_gas_price_usd = Label(window, text='(GWI)')\nlbl_gas_price_usd.grid(column=2, row=7, sticky=\"W\", columnspan=2)\n# gas fee\nlbl_gas_fee_txt = Label(window, text='Gas Fee: ')\nlbl_gas_fee_txt.grid(column=0, row=8, padx=20, sticky=\"E\")\ntxt_gas_fee_val = Spinbox(window, from_=0, to=50000, width=15, textvariable=txtGasFeeVal)\ntxt_gas_fee_val.grid(column=1, row=8, sticky=\"E\")\nlbl_gas_est_txt = Label(window, textvariable=lblGasEstVal)\nlbl_gas_est_txt.grid(column=3, row=8, sticky=\"W\", columnspan=2)\n# runtime lines\nlbl_command_txt = Label(window, text='Runtime: ')\nlbl_command_txt.grid(column=0, row=9, padx=20, sticky=\"E\")\ncommand_textbox = scrolledtext.ScrolledText(window,width=60,height=10)\ncommand_textbox.config(background='#333', foreground='#fff')\ncommand_textbox.grid(column=1, row=9, pady=10, sticky=\"E\", columnspan=4)\n# check auto\nchk_auto = Checkbutton(window, text='Auto',variable=checkAuto, onvalue=1, offvalue=0)\nchk_auto.grid(column=0, row=10, pady=10, sticky=\"E\")\n\n# ============================================> Events & Auto Trading <==============================================\n\nfrom threading import Timer\nimport random\nimport config\nfrom signals import TEST_SIGNALS\n\n# # Event Handler\ndef isEnoughBalance(token):\n trader.updateUI(combo_type.get(), token)\n if txtAmountInVal.get() > lblBalanceVal.get():\n messagebox.showwarning(\"Warning\",\"Insufficient Balance !!! ( Must be more than AmountIn: \"+ str(txtAmountInVal.get()) +\")\")\n return 0\n return 1\n\ndef trade_start():\n if checkAuto.get() == 1: # auto trading\n auto_start()\n else: # manual trading\n if trader.validateForm():\n amountIn = txtAmountInVal.get()\n amountOut = txtAmountOutVal.get()\n gasPrice = txtGasPriVal.get()\n gasFee = txtGasFeeVal.get()\n if combo_type.get() == 'BUY':\n token = combo_currency_out.get()\n if isEnoughBalance(token):\n trader.buy(amountIn, amountOut, TOKEN_LIST[token]['address'], gasFee, gasPrice, False)\n else:\n token = combo_currency.get()\n if isEnoughBalance(token):\n trader.sell(amountIn, amountOut, TOKEN_LIST[token]['address'], gasFee, gasPrice, False)\n# Close window\ndef trade_exit():\n window.destroy()\n \ndef setInterval(timer, task):\n if task():\n Timer(timer, setInterval, [timer, task]).start()\n return True\n return False\n\n# Auto Trading \ndef auto_trade():\n # Check if balance is more than amountIn\n index = random.randint(0, len(TEST_SIGNALS) - 1)\n type = TEST_SIGNALS[index]['type']\n token = TEST_SIGNALS[index]['token']\n price = TEST_SIGNALS[index]['price']\n console_log('[Signal'+ str(index + 1) +'] ===========================')\n if type == 'BUY':\n console_log('>>> ' + type + ' ETH => ' + token + ', Price: ' + str(price) + '$')\n amountIn = DEFAULT_BUY_IN\n else:\n console_log('>>> ' + type + ' '+ token +' => ETH, Price: ' + str(price) + '$')\n amountIn = DEFAULT_SELL_IN \n trader.updateUI(type, token, price, amountIn)\n if not isEnoughBalance(token):\n global isAutoStarted\n isAutoStarted = False\n return 0\n if lblGapPriceVal.get() > 25:\n console_log(' > Too much price gap ('+ str(lblGapPriceVal.get()) +'%)')\n confirm_start('Gas Warning', 'Too much price gap ('+ str(lblGapPriceVal.get()) +'%) \\n Are you try to trade again?')\n return\n gasPrice = txtGasPriVal.get()\n gasFee = txtGasFeeVal.get()\n if type == 'BUY': \n trader.buy(amountIn, DEFAULT_BUY_OUT, TOKEN_LIST[token]['address'], gasFee, gasPrice, True)\n else:\n trader.sell(amountIn, DEFAULT_SELL_OUT, TOKEN_LIST[token]['address'], gasFee, gasPrice, True)\n return isAutoStarted\n\n# Start Auto\ndef auto_start():\n # Trading automatically every 60s\n global isAutoStarted\n isAutoStarted = True\n setInterval(60, auto_trade)\n# Stop Auto\ndef auto_stop():\n if checkAuto.get() == 0:\n messagebox.showwarning(\"Warning\",\"Trading Stop is available on Only Auto Trading\")\n return\n global isAutoStarted\n if isAutoStarted:\n console_log(' > Auto Trade Stopping...')\n console_log('Stoped Auto Trading')\n isAutoStarted = False\n else:\n messagebox.showwarning(\"Warning\",\"Auto Trading didn't start yet\")\n \n# Insert Console Log\ndef console_log(text):\n print(text)\n if command_textbox.get(INSERT) != '':\n text += '\\n'\n command_textbox.insert(INSERT, text)\n# Confirm Dialog\ndef confirm_start(title, content):\n answer = askyesno(title=title, message=content)\n if answer:\n trade_start()\n\n# buttons\nbtn_trade = Button(window, text=\"Trade(Buy & Sell)\", command=trade_start)\nbtn_trade.grid(column=1, row=10, pady=10, sticky=\"W\")\nbtn_stop = Button(window, text=\"Stop\", command=auto_stop)\nbtn_stop.grid(column=3, row=10, sticky=\"E\")\nbtn_exit = Button(window, text=\"Exit\", command=trade_exit)\nbtn_exit.grid(column=4, row=10, sticky=\"W\")\n\nif __name__ == '__main__':\n trader.initUI()","sub_path":"src/trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":23151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"555935439","text":"import numpy as np\nimport laplacian\nimport flood_fill\nimport half_edge\nimport datetime \n\"\"\"\nfind curvature at every point -- if large than 0.01 (not plane) mark edges\nuse edges to cluster facets -- connect single facets using floodfill\nif some facets still single merge them to their neighbor facet with closest norm\n\"\"\"\n\ndef segment_by_curvature(coord_array, tri_array):\n \"\"\" Function to detect planar surfaces and label them, it will separate curved surfaces from planar surfaces.\n :param coord_array: xyz coordinates per vertex in array of shape=(#points, 3)\n :param tri_array: vertex connection indices of the part in array of shape=(#triangles, 3)\n :return label: array with surface id numbers per triangle, shape=(#triangles, )\n \"\"\"\n print (\"Recognizing surfaces...\")\n starttime = datetime.datetime.now() \n \n # Build numpy structured array half edge for an efficient computation\n M = half_edge.half_edge(coord_array, tri_array)\n nf = M['facet'].shape[0]\n\n L, Mlapl = laplacian.laplacian(coord_array, tri_array)\n mean_curv = np.dot(L.todense(), coord_array)\n mean_curv = np.sqrt(np.sum(mean_curv**2, axis=1))\n thresh = 0.001\n if True:\n #for thresh in np.linspace(0.001,0.1,100): \n \n bndry_vtx = np.array((mean_curv > thresh).astype(int))\n bndry_edge = np.logical_and(bndry_vtx[M['edge'][:, 0]], bndry_vtx[M['edge'][:, 1]]).astype(int)\n \n nei = [[] for _ in range(nf)] # Instead of cell(nf,1), list of lists where each list can be set independently\n for i in range(nf): # loop over all facets\n fh = M['facet_child_halfedge'][i] # facet i starts from halfedge fh\n start_fh = fh\n \n pf = M['halfedge_parent_facet'][M['halfedge_opposite_halfedge'][fh]].tolist() # opposite half edge - its facet\n if pf == 0 or bndry_edge[int(M['halfedge_parent_edge'][fh])] == 1: # fisrt facet or fh is edge\n pf = [np.nan] # Use list, assign list with np.nan value to later prevent appending to list\n \n if not np.isnan(pf[0]): # else\n nei[i].append(pf)\n \n fh = M['halfedge_next_halfedge'][fh]\n while fh != start_fh: # repeat for the other two edges\n pf = M['halfedge_parent_facet'][M['halfedge_opposite_halfedge'][fh]].tolist() \n if pf == 0 or bndry_edge[int(M['halfedge_parent_edge'][fh])] == 1:\n pf = [np.nan]\n \n if not np.isnan(pf[0]):\n nei[i].append(pf)\n fh = M['halfedge_next_halfedge'][fh]\n \n label = np.arange(nf) # Index shift used here\n old_label = np.zeros_like(label) \n \n while np.sum(label != old_label): # cluster facets btween edges\n \n old_label = np.copy(label)\n label_map = np.arange(nf) # Index shift\n for i in range(nf):\n list_with_lists = [[i]] + nei[i]\n flattened = [int(val) for sublist in list_with_lists for val in sublist] # 'flatten' list_with_lists\n nei_label = label[flattened]\n min_nei_label = np.amin(label_map[nei_label])\n label_map[nei_label] = min_nei_label\n \n label = label_map[label] # Shifted one down because later used as Python index\n \n #change label indexs from discrete to continuous\n un = np.unique(label[np.nonzero(label+1)[0]]) # Shift label when searching for non-zeros\n label_map[un] = np.arange(len(un)) # Index shift\n label = label_map[label]\n \n sorted_label = np.sort(label)\n unique_label, ia = np.unique(sorted_label, return_index=True)\n label_cnt = np.hstack((ia[np.arange(1, len(ia))], len(label)))-ia\n one_el_label = np.nonzero(label_cnt == 1)[0] # single labels\n \n label_map = np.ones_like(unique_label) \n label_map[one_el_label] = 0\n bw_label = label_map[label]\n \n i = np.nonzero(bw_label == 0)[0] # single facets \n while len(i): # so while not empty i\n mask = flood_fill.flood_fill(M, i[0], bw_label) #get all connected single facets\n if len(mask) > 2:\n label[mask] = np.amax(label)+1 # add new label number\n \n bw_label[mask] = 1 # give up real single facet\n i = np.nonzero(bw_label == 0)[0] # next loop\n \n \n un = np.unique(label[np.nonzero(label+1)[0]])\n label_map = np.zeros(un[-1]+1).astype(int) # Create new label_map, Python cannot automatically resize var\n label_map[un] = np.arange(len(un)) # Index shift\n label = label_map[label]\n \n sorted_label = np.sort(label)\n unique_label, ia = np.unique(sorted_label, return_index=True)\n label_cnt = np.hstack((ia[np.arange(1, len(ia))], len(label)))-ia\n one_el_label = np.nonzero(label_cnt == 1)[0]\n \n # prestore the facet index, in case one point label is another one's target in next loop\n pts_one_label = [np.nonzero(label == one_el_label[j])[0] for j in range(len(one_el_label))]\n \n for j in range(len(one_el_label)):\n fid = np.nonzero(label == one_el_label[j])[0]\n fid_temp = fid[0]\n v1 = M['vertex'][M['facet'][fid_temp, 1], :] - M['vertex'][M['facet'][fid_temp, 0], :] # line10\n v2 = M['vertex'][M['facet'][fid_temp, 2], :] - M['vertex'][M['facet'][fid_temp, 0], :] # line20\n my_n_temp = np.cross(v1, v2) # norm of facet\n my_n = my_n_temp/np.sqrt(np.sum(my_n_temp**2)) # norm vector\n \n fh = int(M['facet_child_halfedge'][pts_one_label[j]])# get the original index\n start_fh = fh\n \n pf = M['halfedge_parent_facet'][M['halfedge_opposite_halfedge'][fh]].astype(int)\n v1 = M['vertex'][M['facet'][pf, 1], :] - M['vertex'][M['facet'][pf, 0], :]\n v2 = M['vertex'][M['facet'][pf, 2], :] - M['vertex'][M['facet'][pf, 0], :]\n n_temp = np.cross(v1, v2)\n n = n_temp/np.sqrt(np.sum(n_temp**2)) # norm vector of nei facet\n nei_n = n\n nei_id = pf\n fh = int(M['halfedge_next_halfedge'][fh]) # next edge\n \n while fh != start_fh:\n pf = M['halfedge_parent_facet'][M['halfedge_opposite_halfedge'][fh]].astype(int)\n v1 = M['vertex'][M['facet'][pf, 1], :] - M['vertex'][M['facet'][pf, 0], :]\n v2 = M['vertex'][M['facet'][pf, 2], :] - M['vertex'][M['facet'][pf, 0], :]\n n_temp = np.cross(v1, v2)\n n = n_temp/np.sqrt(np.sum(n_temp**2))\n nei_n = np.vstack((nei_n, n))\n nei_id = np.hstack((nei_id, pf))\n fh = int(M['halfedge_next_halfedge'][fh])\n \n max_id = np.argmax(np.dot(nei_n, my_n.conj().T)) # get the smallest angle between nei norm and original facet\n label[fid] = label[nei_id[max_id]] # merge this label to the max one\n \n un = np.unique(label[np.nonzero(label)[0]])\n label_map[un] = np.arange(1, len(un)+1)\n label = label_map[label] # Shifted id numbers w.r.t MATLAB\n \n endtime = datetime.datetime.now() \n elapsedtime = (endtime - starttime).seconds\n print (\"%d surfaces recognized. Elapsed time is %s seconds.\" %(np.amax(label)+1,elapsedtime))\n \n label+=1\n \n print(thresh,np.amax(label))\n \n np.savez_compressed('temp/label', label=label)\n return label # Work with MATLAB label format\n \nif __name__ == \"__main__\":\n data = np.load('temp/coo.npz') \n tri_array = data['tri_array'] ; coord_array = data['coord_array']\n label = segment_by_curvature(coord_array, tri_array)\n ","sub_path":"segment_by_curvature.py","file_name":"segment_by_curvature.py","file_ext":"py","file_size_in_byte":7741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"486616390","text":"#!/usr/bin/env python\n\"\"\"\nopy_main.py\n\"\"\"\nfrom __future__ import print_function\n\nimport hashlib\nimport optparse\nimport os\nimport sys\nimport marshal\nimport types\n\nfrom . import pytree\nfrom . import skeleton\n\nfrom .pgen2 import driver, parse, pgen, grammar\nfrom .pgen2 import token\nfrom .pgen2 import tokenize\n\nfrom .compiler2 import consts\nfrom .compiler2 import dis_tool\nfrom .compiler2 import misc\nfrom .compiler2 import transformer\n\n# Disabled for now because byterun imports 'six', and that breaks the build.\nfrom .byterun import execfile\nfrom .byterun import ovm\n\nfrom frontend import args\nfrom core import util\n\nfrom ovm2 import oheap2\n\nlog = util.log\n\n\n# From lib2to3/pygram.py. This takes the place of the 'symbol' module.\n# compiler/transformer module needs this.\n\nclass Symbols(object):\n\n def __init__(self, gr):\n \"\"\"\n Creates an attribute for each grammar symbol (nonterminal), whose value is\n the symbol's type (an int >= 256).\n \"\"\"\n for name, symbol in gr.symbol2number.items():\n setattr(self, name, symbol)\n #log('%s -> %d' % (name, symbol))\n # For transformer to use\n self.number2symbol = gr.number2symbol\n\n\ndef HostStdlibNames():\n import symbol\n import token\n names = {}\n for k, v in symbol.sym_name.items():\n names[k] = v\n for k, v in token.tok_name.items():\n names[k] = v\n return names\n\n\ndef WriteGrammar(grammar_path, pickle_path):\n log(\"Generating grammar tables from %s\", grammar_path)\n g = pgen.generate_grammar(grammar_path)\n log(\"Writing grammar tables to %s\", pickle_path)\n try:\n # calls pickle.dump on self.__dict__ after making it deterministic\n g.dump(pickle_path)\n except OSError as e:\n log(\"Writing failed: %s\", e)\n\n\ndef CountTupleTree(tu):\n \"\"\"Count the nodes in a tuple parse tree.\"\"\"\n if isinstance(tu, tuple):\n s = 0\n for entry in tu:\n s += CountTupleTree(entry)\n return s\n elif isinstance(tu, int):\n return 1\n elif isinstance(tu, str):\n return 1\n else:\n raise AssertionError(tu)\n\n\nclass TupleTreePrinter(object):\n def __init__(self, names):\n self._names = names\n\n def Print(self, tu, f=sys.stdout, indent=0):\n ind = ' ' * indent\n f.write(ind)\n if isinstance(tu, tuple):\n f.write(self._names[tu[0]])\n f.write('\\n')\n for entry in tu[1:]:\n self.Print(entry, f, indent=indent+1)\n elif isinstance(tu, int):\n f.write(str(tu))\n f.write('\\n')\n elif isinstance(tu, str):\n f.write(str(tu))\n f.write('\\n')\n else:\n raise AssertionError(tu)\n\n\nclass TableOutput(object):\n\n def __init__(self, out_dir):\n self.out_dir = out_dir\n self.frames_f = open(os.path.join(out_dir, 'frames.tsv2'), 'w')\n self.names_f = open(os.path.join(out_dir, 'names.tsv2'), 'w')\n self.consts_f = open(os.path.join(out_dir, 'consts.tsv2'), 'w')\n self.flags_f = open(os.path.join(out_dir, 'flags.tsv2'), 'w')\n self.ops_f = open(os.path.join(out_dir, 'ops.tsv2'), 'w')\n\n # NOTE: The opcode encoding is variable length, so bytecode_bytes is\n # different than the number of instructions.\n print('path\\tcode_name\\targcount\\tnlocals\\tstacksize\\tbytecode_bytes',\n file=self.frames_f)\n print('path\\tcode_name\\tkind\\tname', file=self.names_f)\n print('path\\tcode_name\\ttype\\tlen_or_val', file=self.consts_f)\n print('path\\tcode_name\\tflag', file=self.flags_f)\n print('path\\tcode_name\\top_name\\top_arg', file=self.ops_f)\n\n def WriteFrameRow(self, path, code_name, argcount, nlocals, stacksize,\n bytecode_bytes):\n row = [path, code_name, str(argcount), str(nlocals), str(stacksize),\n str(bytecode_bytes)]\n print('\\t'.join(row), file=self.frames_f)\n\n def WriteNameRow(self, path, code_name, kind, name):\n row = [path, code_name, kind, name]\n print('\\t'.join(row), file=self.names_f)\n\n def WriteConstRow(self, path, code_name, type_, len_or_val):\n row = [path, code_name, type_, str(len_or_val)]\n print('\\t'.join(row), file=self.consts_f)\n\n def WriteFlagRow(self, path, code_name, flag_name):\n row = [path, code_name, flag_name]\n print('\\t'.join(row), file=self.flags_f)\n\n def WriteOpRow(self, path, code_name, op_name, op_arg):\n row = [path, code_name, op_name, str(op_arg)]\n print('\\t'.join(row), file=self.ops_f)\n\n def Close(self):\n self.frames_f.close()\n self.names_f.close()\n self.consts_f.close()\n self.flags_f.close()\n self.ops_f.close()\n log('Wrote 5 files in %s', self.out_dir)\n\n\ndef WriteDisTables(pyc_path, co, out):\n \"\"\"Write 3 TSV files.\"\"\"\n #log('Disassembling %s in %s', co, pyc_path)\n out.WriteFrameRow(pyc_path, co.co_name, co.co_argcount, co.co_nlocals,\n co.co_stacksize, len(co.co_code))\n\n # Write a row for every name\n for name in co.co_names:\n out.WriteNameRow(pyc_path, co.co_name, 'name', name)\n for name in co.co_varnames:\n out.WriteNameRow(pyc_path, co.co_name, 'var', name)\n for name in co.co_cellvars:\n out.WriteNameRow(pyc_path, co.co_name, 'cell', name)\n for name in co.co_freevars:\n out.WriteNameRow(pyc_path, co.co_name, 'free', name)\n\n # Write a row for every op.\n for op_name, op_arg in dis_tool.ParseOps(co.co_code):\n out.WriteOpRow(pyc_path, co.co_name, op_name, op_arg)\n\n # TODO: Write a row for every flag. OPy outputs these:\n # CO_VARARGS, CO_VAR_KEYWORDS, CO_GENERATOR, CO_NEWLOCALS (we only support\n # this?) FUTURE_DIVISION, FUTURE_ABSOLUTE_IMPORT, etc.\n for flag in sorted(consts.VALUE_TO_NAME):\n if co.co_flags & flag:\n flag_name = consts.VALUE_TO_NAME[flag]\n out.WriteFlagRow(pyc_path, co.co_name, flag_name)\n\n # Write a row for every constant\n for const in co.co_consts:\n if isinstance(const, int):\n len_or_val = const\n elif isinstance(const, (str, tuple)):\n len_or_val = len(const)\n else:\n len_or_val = 'NA'\n out.WriteConstRow(pyc_path, co.co_name, const.__class__.__name__, len_or_val)\n\n if isinstance(const, types.CodeType):\n WriteDisTables(pyc_path, const, out)\n\n\ndef Options():\n \"\"\"Returns an option parser instance.\"\"\"\n p = optparse.OptionParser()\n\n # NOTE: default command is None because empty string is valid.\n\n # NOTE: In 'opy run oil.pyc -c', -c is an arg to opy, and not a flag.\n\n p.add_option(\n '-c', dest='command', default=None,\n help='Python command to run')\n return p\n\n\n# Made by the Makefile.\nPICKLE_REL_PATH = '_build/opy/py27.grammar.pickle'\n\ndef OpyCommandMain(argv):\n \"\"\"Dispatch to the right action.\"\"\"\n\n # TODO: Use core/args.\n #opts, argv = Options().parse_args(argv)\n\n try:\n action = argv[0]\n except IndexError:\n raise args.UsageError('opy: Missing required subcommand.')\n\n argv = argv[1:] # TODO: Should I do input.ReadRequiredArg()?\n # That will shift the input.\n\n if action in (\n 'parse', 'compile', 'dis', 'ast', 'symbols', 'cfg', 'compile-ovm',\n 'eval', 'repl', 'run', 'run-ovm'):\n loader = util.GetResourceLoader()\n f = loader.open(PICKLE_REL_PATH)\n gr = grammar.Grammar()\n gr.load(f)\n f.close()\n\n # In Python 2 code, always use from __future__ import print_function.\n try:\n del gr.keywords[\"print\"]\n except KeyError:\n pass\n\n symbols = Symbols(gr)\n pytree.Init(symbols) # for type_repr() pretty printing\n transformer.Init(symbols) # for _names and other dicts\n\n compiler = skeleton.Compiler(gr)\n else:\n # e.g. pgen2 doesn't use any of these. Maybe we should make a different\n # tool.\n compiler = None\n\n # TODO: Also have a run_spec for 'opyc run'.\n compile_spec = args.OilFlags()\n compile_spec.Flag('-emit-docstring', args.Bool, default=True,\n help='Whether to emit docstrings')\n compile_spec.Flag('-fast-ops', args.Bool, default=True,\n help='Whether to emit LOAD_FAST, STORE_FAST, etc.')\n compile_spec.Flag('-oil-subset', args.Bool, default=False,\n help='Only allow the constructs necessary to implement'\n 'Oil. Example: using multiple inheritance will abort '\n 'compilation.')\n\n #\n # Actions\n #\n\n if action == 'pgen2':\n grammar_path = argv[0]\n pickle_path = argv[1]\n WriteGrammar(grammar_path, pickle_path)\n\n elif action == 'stdlib-parse':\n # This is what the compiler/ package was written against.\n import parser\n\n py_path = argv[1]\n with open(py_path) as f:\n st = parser.suite(f.read())\n\n tree = st.totuple()\n\n printer = TupleTreePrinter(HostStdlibNames())\n printer.Print(tree)\n n = CountTupleTree(tree)\n log('COUNT %d', n)\n\n elif action == 'lex':\n py_path = argv[0]\n with open(py_path) as f:\n tokens = tokenize.generate_tokens(f.readline)\n for typ, val, start, end, unused_line in tokens:\n print('%10s %10s %-10s %r' % (start, end, token.tok_name[typ], val))\n\n elif action == 'lex-names': # Print all the NAME tokens.\n for py_path in argv:\n log('Lexing %s', py_path)\n with open(py_path) as f:\n tokens = tokenize.generate_tokens(f.readline)\n for typ, val, start, end, unused_line in tokens:\n if typ == token.NAME:\n print(val)\n\n elif action == 'parse':\n py_path = argv[0]\n with open(py_path) as f:\n tokens = tokenize.generate_tokens(f.readline)\n p = parse.Parser(gr, convert=skeleton.py2st)\n parse_tree = driver.PushTokens(p, tokens, gr.symbol2number['file_input'])\n\n if isinstance(parse_tree, tuple):\n n = CountTupleTree(parse_tree)\n log('COUNT %d', n)\n\n printer = TupleTreePrinter(transformer._names)\n printer.Print(parse_tree)\n else:\n tree.PrettyPrint(sys.stdout)\n log('\\tChildren: %d' % len(tree.children), file=sys.stderr)\n\n elif action == 'ast': # output AST\n opt, i = compile_spec.Parse(argv)\n py_path = argv[i]\n with open(py_path) as f:\n graph = compiler.Compile(f, opt, 'exec', print_action='ast')\n\n elif action == 'symbols': # output symbols\n opt, i = compile_spec.Parse(argv)\n py_path = argv[i]\n with open(py_path) as f:\n graph = compiler.Compile(f, opt, 'exec', print_action='symbols')\n\n elif action == 'cfg': # output Control Flow Graph\n opt, i = compile_spec.Parse(argv)\n py_path = argv[i]\n with open(py_path) as f:\n graph = compiler.Compile(f, opt, 'exec', print_action='cfg')\n\n elif action == 'compile': # 'opyc compile' is pgen2 + compiler2\n # spec.Arg('action', ['foo', 'bar'])\n # But that leads to some duplication.\n\n opt, i = compile_spec.Parse(argv)\n\n py_path = argv[i]\n out_path = argv[i+1]\n\n with open(py_path) as f:\n co = compiler.Compile(f, opt, 'exec')\n\n log(\"Compiled to %d bytes of top-level bytecode\", len(co.co_code))\n\n # Write the .pyc file\n with open(out_path, 'wb') as out_f:\n h = misc.getPycHeader(py_path)\n out_f.write(h)\n marshal.dump(co, out_f)\n\n elif action == 'compile-ovm':\n opt, i = compile_spec.Parse(argv)\n py_path = argv[i]\n out_path = argv[i+1]\n\n # Compile to Python bytecode (TODO: remove ovm_codegen.py)\n mode = 'exec'\n with open(py_path) as f:\n co = compiler.Compile(f, opt, mode)\n\n if 1:\n with open(out_path, 'wb') as out_f:\n oheap2.Write(co, out_f)\n return 0\n\n log(\"Compiled to %d bytes of top-level bytecode\", len(co.co_code))\n # Write the .pyc file\n with open(out_path, 'wb') as out_f:\n if 1:\n out_f.write(co.co_code)\n else:\n h = misc.getPycHeader(py_path)\n out_f.write(h)\n marshal.dump(co, out_f)\n log('Wrote only the bytecode to %r', out_path)\n\n elif action == 'eval': # Like compile, but parses to a code object and prints it\n opt, i = compile_spec.Parse(argv)\n py_expr = argv[i]\n f = skeleton.StringInput(py_expr, '')\n co = compiler.Compile(f, opt, 'eval')\n\n v = dis_tool.Visitor()\n v.show_code(co)\n print()\n print('RESULT:')\n print(eval(co))\n\n elif action == 'repl': # Like eval in a loop\n while True:\n py_expr = raw_input('opy> ')\n f = skeleton.StringInput(py_expr, '')\n\n # TODO: change this to 'single input'? Why doesn't this work?\n co = compiler.Compile(f, opt, 'eval')\n\n v = dis_tool.Visitor()\n v.show_code(co)\n print(eval(co))\n\n elif action == 'dis-tables':\n out_dir = argv[0]\n pyc_paths = argv[1:]\n\n out = TableOutput(out_dir)\n\n for pyc_path in pyc_paths:\n with open(pyc_path) as f:\n magic, unixtime, timestamp, code = dis_tool.unpack_pyc(f)\n WriteDisTables(pyc_path, code, out)\n\n out.Close()\n\n elif action == 'dis':\n opt, i = compile_spec.Parse(argv)\n path = argv[i]\n v = dis_tool.Visitor()\n\n if path.endswith('.py'):\n with open(path) as f:\n co = compiler.Compile(f, opt, 'exec')\n\n log(\"Compiled to %d bytes of top-level bytecode\", len(co.co_code))\n v.show_code(co)\n\n else: # assume pyc_path\n with open(path, 'rb') as f:\n v.Visit(f)\n\n elif action == 'dis-md5':\n pyc_paths = argv\n if not pyc_paths:\n raise args.UsageError('dis-md5: At least one .pyc path is required.')\n\n for path in pyc_paths:\n h = hashlib.md5()\n with open(path) as f:\n magic = f.read(4)\n h.update(magic)\n ignored_timestamp = f.read(4)\n while True:\n b = f.read(64 * 1024)\n if not b:\n break\n h.update(b)\n print('%6d %s %s' % (os.path.getsize(path), h.hexdigest(), path))\n\n elif action == 'run': # Compile and run, without writing pyc file\n # TODO: Add an option like -v in __main__\n\n #level = logging.DEBUG if args.verbose else logging.WARNING\n #logging.basicConfig(level=level)\n #logging.basicConfig(level=logging.DEBUG)\n\n opt, i = compile_spec.Parse(argv)\n\n py_path = argv[i]\n opy_argv = argv[i:]\n\n if py_path.endswith('.py'):\n with open(py_path) as f:\n co = compiler.Compile(f, opt, 'exec')\n num_ticks = execfile.run_code_object(co, opy_argv)\n\n elif py_path.endswith('.pyc') or py_path.endswith('.opyc'):\n with open(py_path) as f:\n f.seek(8) # past header. TODO: validate it!\n co = marshal.load(f)\n num_ticks = execfile.run_code_object(co, opy_argv)\n\n else:\n raise args.UsageError('Invalid path %r' % py_path)\n\n elif action == 'run-ovm': # Compile and run, without writing pyc file\n opt, i = compile_spec.Parse(argv)\n py_path = argv[i]\n opy_argv = argv[i+1:]\n\n if py_path.endswith('.py'):\n #mode = 'exec'\n mode = 'ovm' # OVM bytecode is different!\n with open(py_path) as f:\n co = compiler.Compile(f, opt, mode)\n log('Compiled to %d bytes of OVM code', len(co.co_code))\n num_ticks = ovm.run_code_object(co, opy_argv)\n\n elif py_path.endswith('.pyc') or py_path.endswith('.opyc'):\n with open(py_path) as f:\n f.seek(8) # past header. TODO: validate it!\n co = marshal.load(f)\n num_ticks = ovm.run_code_object(co, opy_argv)\n\n else:\n raise args.UsageError('Invalid path %r' % py_path)\n\n else:\n raise args.UsageError('Invalid action %r' % action)\n","sub_path":"opy/opy_main.py","file_name":"opy_main.py","file_ext":"py","file_size_in_byte":15053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"322999444","text":"from psearcher.BaseEngine import BaseEngine\nfrom retry import retry\n\n\nclass Baidu(BaseEngine):\n \n startIndex = 0\n indexGap = 10\n\n def get_url_and_parmas(self):\n url = 'http://www.baidu.com/s'\n params = {\n \"word\": self.keyWord,\n \"pn\": self.startIndex + self.indexGap*self.nextPage,\n \"rn\": 50\n }\n return url, params\n\n def parser(self, soup):\n results = []\n tags = soup.find_all(\"div\", class_=\"c-container\")[1:]\n for tag in tags:\n if not tag.h3 and not tag.p:\n continue\n title = tag.h3.get_text() if tag.h3 else tag.p.get_text()\n try:\n link = self.get_real_link(tag.h3.a.get(\"href\") if tag.h3 else tag.a.get(\"href\"))\n except Exception:\n continue\n else:\n describe = tag.get_text()\n result = {\n \"link\": link,\n \"describe\": describe,\n \"title\": title,\n }\n results.append(result)\n return results\n\n @retry(tries=3, backoff=1, delay=1)\n def get_real_link(self, baidu_search_link):\n r = self.session.get(baidu_search_link, proxies=self.proxies, timeout=5)\n return r.url\n","sub_path":"build/lib/psearcher/Baidu.py","file_name":"Baidu.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"562914242","text":"import plugins\n\nimport os\nimport valve.source.a2s\n\ndef _initialise(bot):\n plugins.register_user_command([\"cs_ip\", \"cs_info\"])\n\ndef cs_ip(bot, event, *args):\n \"\"\"\n Display CS connection string\n\n /bot cs_ip\n \"\"\"\n\n yield from bot.coro_send_message(event.conv, \"connect \" + str(os.environ['HOST_IP']).strip())\n\ndef cs_info(bot, event, *args):\n \"\"\"\n Display CS information\n\n /bot cs_info\n \"\"\"\n\n SERVER_ADDRESS = (str(os.environ['HOST_IP']).strip(), 27015)\n\n server = valve.source.a2s.ServerQuerier(SERVER_ADDRESS)\n info = server.info()\n players = server.players()\n\n html_text = 'Map: {map}
    '.format(**info)\n player_count = '{player_count}/{max_players}'.format(**info)\n html_text += 'Players: ' + player_count + '

    '\n\n for player in sorted(players[\"players\"], key=lambda p: p[\"score\"], reverse=True):\n html_text += '{score} {name}
    '.format(**player)\n\n yield from bot.coro_send_message(event.conv, html_text)\n","sub_path":"hangupsbot/plugins/csserver.py","file_name":"csserver.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"330701040","text":"# -*- coding: utf-8 -*-\n# @Author: ichadhr\n# @Date: 2018-10-02 17:28:58\n# @Last Modified by: richard.hari@live.com\n# @Last Modified time: 2018-10-18 10:09:00\nimport sys\nimport time\nimport os\nimport appinfo\nimport itertools\nimport subprocess\nimport re\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom gui import Ui_MainWindow\nfrom pathlib import Path\nimport distutils.dir_util\nimport distutils.spawn\n# import warnings\n\n# variabel for header CSV\nHEAD_CODE_STORE = 'code_store'\nHEAD_PO_NO = 'po_no'\nHEAD_BARCODE = 'barcode'\nHEAD_QTY = 'qty'\nHEAD_MODAL = 'modal_karton'\n\nNEWDIR = 'CSV-output'\nDELIM = ';'\n\nCODE_STORE = '004575'\n\nIS_WIN32 = 'win32' in str(sys.platform).lower()\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n\n return os.path.join(os.path.abspath(\".\"), relative_path)\n\n# warnings.filterwarnings(\"ignore\", message=\"RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibity. Expected 56, got 52\")\n\n# main class\nclass mainWindow(QMainWindow, Ui_MainWindow) :\n def __init__(self) :\n QMainWindow.__init__(self)\n self.setupUi(self)\n\n # app icon\n self.setWindowIcon(QIcon(':/resources/icon.png'))\n\n # centering app\n tr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n tr.moveCenter(cp)\n self.move(tr.topLeft())\n\n # path tabula\n self.jarfile = resource_path(\"tabula/bin/tabula-1.0.2-jar-with-dependencies.jar\")\n\n # button Open\n self.btOpen.clicked.connect(self.openPDF)\n\n # button convert\n self.btCnv.clicked.connect(self.BtnCnv)\n\n # status bar\n self.statusBar().showMessage('v'+appinfo._version)\n\n # hide label path\n self.lbPath.hide()\n self.lbPath.clear()\n\n\n # PATH FILE\n def openPDF(self) :\n fileName, _ = QFileDialog.getOpenFileName(self,\"Open File\", \"\",\"PDF Files (*.pdf)\")\n if fileName:\n self.lbPath.setText(fileName)\n x = QUrl.fromLocalFile(fileName).fileName()\n self.edFile.setText(x)\n self.edFile.setStyleSheet(\"\"\"QLineEdit { color: green }\"\"\")\n\n\n # Create Directory\n def CreateDir(self, cDIR, nDir, filename) :\n\n resPathFile = os.path.abspath(os.path.join(cDIR, nDir, \"{}.csv\".format(filename)))\n\n if os.path.exists(resPathFile) :\n os.remove(resPathFile)\n else :\n # os.makedirs(os.path.dirname(resPathFile), exist_ok=True)\n distutils.dir_util.mkpath(os.path.dirname(resPathFile))\n\n return resPathFile\n\n\n # open file\n def open_file(self, filename):\n if sys.platform == \"win32\":\n os.startfile(filename)\n else:\n opener =\"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, filename])\n\n # running tabula\n def tabula(self, jarfile, coordinate, pathFile) :\n\n output = self.launchWithoutConsole('java', ['-jar', str(jarfile), '-p', 'all', '-a', str(coordinate), str(pathFile)])\n\n return output\n\n\n def launchWithoutConsole(self, command, args):\n \"\"\"Launches 'command' windowless and waits until finished\"\"\"\n startupinfo = subprocess.STARTUPINFO()\n stdin = subprocess.PIPE\n stdout = subprocess.PIPE\n stderr = subprocess.PIPE\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n tmpRes, err = subprocess.Popen([command] + args, startupinfo=startupinfo, stdin=stdin, stderr=stderr, stdout=stdout).communicate()\n\n result = tmpRes.decode('utf-8').splitlines()\n\n return result\n\n\n def PDFponum(self, pathPDF) :\n\n crdnt = \"76.883,458.618,89.888,594.023\"\n\n result = self.tabula(self.jarfile, crdnt, pathPDF)\n\n return result\n\n\n def PDFbarcode(self, pathPDF) :\n\n crdnt = \"139.613,28.688,789.863,99.833\"\n\n tmpResult = self.tabula(self.jarfile, crdnt, pathPDF)\n\n result = self.checkListFloat(tmpResult, True)\n\n # result = self.checkListFloat2d(tmpResult, True)\n\n return result\n\n\n def PDFqty(self, pathPDF) :\n crdnt = \"139.613,341.573,789.863,382.883\"\n\n tmpResult = self.tabula(self.jarfile, crdnt, pathPDF)\n\n result = self.checkListFloat(tmpResult, True)\n\n return result\n\n\n def PDFmodal(self, pathPDF) :\n crdnt = \"139.613,401.243,789.863,468.563\"\n\n tmpResult = self.tabula(self.jarfile, crdnt, pathPDF)\n\n result = self.checkListFloat(tmpResult)\n\n return result\n\n\n # check a list for float type value\n def checkListFloat(self, arList, isfloat = False) :\n result = []\n\n if isfloat :\n for _i in arList:\n if self.checkFLoat(_i) :\n result.append([int(float(_i))])\n else :\n for _i in arList:\n res = re.sub('[^\\d\\.,]', '', _i)\n if res :\n result.append([res])\n\n return result\n\n\n # check float\n def checkFLoat(self, value) :\n try :\n return float(value).is_integer()\n except ValueError:\n return False\n\n # button convert CSV\n def BtnCnv(self) :\n\n checkJava = distutils.spawn.find_executable(\"java\")\n\n if checkJava is not None :\n\n current_dir = os.getcwd()\n\n # PATH file\n pathPDF = self.lbPath.text()\n resPath, resFilename = os.path.split(os.path.splitext(pathPDF)[0])\n resPathFile = self.CreateDir(current_dir, NEWDIR, resFilename)\n resultPath = Path(os.path.abspath(os.path.join(current_dir, NEWDIR)))\n\n tmpponum = self.PDFponum(pathPDF)\n ponum = tmpponum[0]\n brc = self.PDFbarcode(pathPDF)\n qty = self.PDFqty(pathPDF)\n mdl = self.PDFmodal(pathPDF)\n\n # prepare write CSV\n with open(resPathFile, \"w+\") as csv :\n\n # write first header\n csv.write(HEAD_CODE_STORE + DELIM + HEAD_PO_NO + DELIM + HEAD_BARCODE + DELIM + HEAD_QTY + DELIM + HEAD_MODAL)\n\n # write new line\n csv.write(\"\\n\")\n\n for br, qt, md in zip(brc, qty, mdl) :\n for resCD, resPO, resBC, resQT, resMD in zip(itertools.repeat(CODE_STORE, len(br)), itertools.repeat(ponum, len(br)), br, qt, md) :\n\n resBC = str(resBC)\n resQT = str(resQT)\n resMD = str(resMD)\n\n csv.write(resCD+DELIM+resPO+DELIM+resBC+DELIM+resQT+DELIM+resMD+'\\n')\n\n csv.close()\n\n reply = QMessageBox.information(self, \"Information\", \"Success!\", QMessageBox.Ok)\n\n if reply == QMessageBox.Ok :\n self.open_file(str(resultPath))\n\n else :\n msg = \"``java` command is not found in this system. Please ensure Java is installed and PATH is set for `java`\"\n\n QMessageBox.critical(self, \"Error\", msg, QMessageBox.Ok)\n\n\nif __name__ == '__main__' :\n app = QApplication(sys.argv)\n\n # create splash screen\n splash_pix = QPixmap(':/resources/unilever_splash.png')\n\n splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)\n splash.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n splash.setEnabled(False)\n\n # adding progress bar\n progressBar = QProgressBar(splash)\n progressBar.setMaximum(10)\n progressBar.setGeometry(17, splash_pix.height() - 20, splash_pix.width(), 50)\n\n splash.show()\n\n for iSplash in range(1, 11) :\n progressBar.setValue(iSplash)\n t = time.time()\n while time.time() < t + 0.1 :\n app.processEvents()\n\n time.sleep(1)\n\n window = mainWindow()\n window.setWindowTitle(appinfo._appname)\n # window.setWindowFlags(QtCore.Qt.WindowCloseButtonHint)\n # window.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n window.show()\n splash.finish(window)\n sys.exit(app.exec_())\n","sub_path":"[Jakarta]_Rezeki_Supermarketing/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"595817411","text":"from odoo import api, fields, osv, models, _\nfrom lxml import etree as et\nimport base64\nimport re\n_RFC_PATTERN = re.compile('[A-Z\\xc3\\x91&]{3,4}[0-9]{2}[0-1][0-9][0-3][0-9][A-Z0-9]?[A-Z0-9]?[0-9A-Z]?')\n_SERIES_PATTERN = re.compile('[A-Z]+')\n_UUID_PATTERN = re.compile('[a-f0-9A-F]{8}-[a-f0-9A-F]{4}-[a-f0-9A-F]{4}-[a-f0-9A-F]{4}-[a-f0-9A-F]{12}')\n\nclass eaccount_complements(models.Model):\n _inherit = 'eaccount.complements'\n\n @api.multi\n def onchange_attachment(self, selected_file, currency_id = False):#cr, uid, ids, \n if selected_file:\n xml_data = base64.b64decode(selected_file)\n try:\n xmlTree = et.ElementTree(et.fromstring(xml_data))\n except:\n raise osv.except_osv('Formato de archivo incorrecto', u'Se necesita cargar un archivo de extensi\\xf3n \".xml\" (CFDI o CFD)')\n if 'cfdi:Comprobante' not in xml_data[0:100] and 'Comprobante' not in xml_data[0:100]:\n raise osv.except_osv('Archivo XML incorrecto', 'Se necesita cargar un archivo de tipo CFDI o CFD.')\n vals = {}\n if 'cfdi:Comprobante' in xml_data[0:100]:\n vouchNode = xmlTree.getroot()\n if vouchNode is None:\n raise osv.except_osv(u'Estructura CFDI inv\\xe1lida', u'No se encontr\\xf3 el nodo \"cfdi:Comprobante\"')\n if 'Total' not in vouchNode.attrib.keys() or 'Fecha' not in vouchNode.attrib.keys():\n raise osv.except_osv(u'Informaci\\xf3n faltante', u'Compruebe que el CFDI tenga asignados los campos \"total\" y \"fecha\".')\n emitterNode = vouchNode.find('{http://www.sat.gob.mx/cfd/3}Emisor')\n if emitterNode is None:\n raise osv.except_osv(u'Estructura CFDI inv\\xe1lida', u'No se encontr\\xf3 el nodo \"cfdi:Emisor\"')\n if 'Rfc' not in emitterNode.attrib.keys():\n raise osv.except_osv(u'Informaci\\xf3n faltante', u'No se encontr\\xf3 el RFC emisor.')\n receiverNode = vouchNode.find('{http://www.sat.gob.mx/cfd/3}Receptor')\n if receiverNode is None:\n raise osv.except_osv(u'Estructura CFDI inv\\xe1lida', u'No se encontr\\xf3 el nodo \"cfdi:Receptor\"')\n if 'Rfc' not in receiverNode.attrib.keys():\n raise osv.except_osv(u'Informaci\\xf3n faltante', u'No se encontr\\xf3 el RFC receptor.')\n complNode = vouchNode.find('{http://www.sat.gob.mx/cfd/3}Complemento')\n if complNode is None:\n raise osv.except_osv(u'Estructura CFDI inv\\xe1lida', u'No se encontr\\xf3 el nodo \"cfdi:Complemento\"')\n stampNode = complNode.find('{http://www.sat.gob.mx/TimbreFiscalDigital}TimbreFiscalDigital')\n if stampNode is None:\n raise osv.except_osv(u'Estructura CFDI inv\\xe1lida', u'No se encontr\\xf3 el nodo \"tfd:TimbreFiscalDigital\"')\n if 'UUID' not in stampNode.attrib.keys():\n raise osv.except_osv(u'Informaci\\xf3n faltante', u'No se encontr\\xf3 el UUID')\n if len(stampNode.attrib['UUID']) != 36:\n raise osv.except_osv(u'Informaci\\xf3n incorrecta', u'El UUID %s es incorrecto: se esperaban 36 caracteres, se encontraron %s' % (stampNode.attrib['UUID'], len(stampNode.attrib['UUID'])))\n vals['uuid'] = stampNode.attrib['UUID'].upper()\n else:\n vouchNode = xmlTree.getroot()\n if vouchNode is None:\n raise osv.except_osv(u'Estructura CFD inv\\xe1lida', u'No se encontr\\xf3 el nodo \"Comprobante\"')\n if 'total' not in vouchNode.attrib.keys() or 'fecha' not in vouchNode.attrib.keys():\n raise osv.except_osv(u'Informaci\\xf3n faltante', u'Compruebe que el CFD tenga asignados los campos \"total\" y \"fecha\".')\n emitterNode = vouchNode.find('{http://www.sat.gob.mx/cfd/2}Emisor')\n if emitterNode is None:\n raise osv.except_osv(u'Estructura CFD inv\\xe1lida', u'No se encontr\\xf3 el nodo \"Emisor\"')\n if 'rfc' not in emitterNode.attrib.keys():\n raise osv.except_osv(u'Informaci\\xf3n faltante', u'No se encontr\\xf3 el RFC emisor.')\n receiverNode = vouchNode.find('{http://www.sat.gob.mx/cfd/2}Receptor')\n if receiverNode is None:\n raise osv.except_osv(u'Estructura CFD inv\\xe1lida', u'No se encontr\\xf3 el nodo \"Receptor\"')\n if 'rfc' not in receiverNode.attrib.keys():\n raise osv.except_osv(u'Informaci\\xf3n faltante', u'No se encontr\\xf3 el RFC receptor.')\n vals['compl_currency_id'] = currency_id.id if currency_id else False\n if 'TipoCambio' in vouchNode.attrib.keys():\n vals['exchange_rate'] = float(vouchNode.attrib['TipoCambio'])\n vals['cbb_series'] = vouchNode.attrib.get('serie', '')\n try:\n vals['cbb_number'] = int(vouchNode.attrib.get('folio', 0))\n except:\n vals.pop('cbb_number')\n vals['rfc'] = emitterNode.attrib['Rfc']\n vals['rfc2'] = receiverNode.attrib['Rfc']\n vals['compl_date'] = vouchNode.attrib['Fecha'][0:10]\n vals['amount'] = float(vouchNode.attrib['Total'])\n return {'value': vals}\n\n\n\neaccount_complements()\n","sub_path":"extrasGDL/facturacion/cfdi_33/factura_v33/models/account_moveline_fit.py","file_name":"account_moveline_fit.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"270279270","text":"\"\"\"\nThis includes the visual cube and messages\nA Draw class to make code neat.\n\"\"\"\nimport sqlite3, pygame, sys\nfrom algs import *\n\npygame.init()\nclass Cube:\n\t'''A 6 * 3 * 3 MATRIX'''\n\tdef __init__(self, scramble = []):\n\t\tself.list = [[[0,0,0], [0,0,0], [0,0,0]], \\\n\t\t\t\t\t [[1,1,1], [1,1,1], [1,1,1]], \\\n\t\t\t\t\t [[2,2,2], [2,2,2], [2,2,2]], \\\n\t\t\t\t\t [[3,3,3], [3,3,3], [3,3,3]], \\\n\t\t\t\t\t [[4,4,4], [4,4,4], [4,4,4]], \\\n\t\t\t\t\t [[5,5,5], [5,5,5], [5,5,5]]]\n\t\tself.U = self.list[0]\n\t\tself.D = self.list[1]\n\t\tself.L = self.list[2]\n\t\tself.F = self.list[3]\n\t\tself.R = self.list[4]\n\t\tself.B = self.list[5]\n\t\tself.dict = {0: (255,255,255), \\\n\t\t\t\t\t 1: (253,244,62), \\\n\t\t\t\t\t 2: (241,134,52), \\\n\t\t\t\t\t 3: (91,207,49), \\\n\t\t\t\t\t 4: (220,53,42), \\\n\t\t\t\t\t 5: (43,113,244)}\n\t\tif scramble:\n\t\t\tTurn(self).scramble(scramble)\n\n\treset = lambda self: self.__init__()\n\n\tdef check(self):\n\t\tfor i in self.list:\n\t\t\tfor a in range(3):\n\t\t\t\tfor b in range(3):\n\t\t\t\t\tif i[a][b] != i[0][0]:\n\t\t\t\t\t\treturn False\n\t\treturn True\n\n\tdef copy(self, scramble = []):\n\t\ta = Cube()\n\t\ta.list[0] = [[j for j in i] for i in self.list[0]]\n\t\ta.list[1] = [[j for j in i] for i in self.list[1]]\n\t\ta.list[2] = [[j for j in i] for i in self.list[2]]\n\t\ta.list[3] = [[j for j in i] for i in self.list[3]]\n\t\ta.list[4] = [[j for j in i] for i in self.list[4]]\n\t\ta.list[5] = [[j for j in i] for i in self.list[5]]\n\t\ta.U = a.list[0]\n\t\ta.D = a.list[1]\n\t\ta.L = a.list[2]\n\t\ta.F = a.list[3]\n\t\ta.R = a.list[4]\n\t\ta.B = a.list[5]\n\t\tif scramble:\n\t\t\tTurn(a).scramble(scramble)\n\t\treturn a\n\n\tdef print(self):\n\t\tfor i in self.list:\n\t\t\tprint(i)\n\n\nclass Box:\n\t'''BUILDING PARTS OF A CUBE'''\n\tdef __init__(self, dim, color, pos, surface, bolder = None):\n\t\tsuper().__init__()\n\t\tself.dim = dim\n\t\tself.color = color\n\t\tself.pos = pos\n\t\tself.surface = surface\n\t\tself.bolder = bolder\n\n\t'''self-explanatory'''\n\tdef draw(self):\n\t\tself.rect = pygame.Rect(self.pos, self.dim)\n\t\tpygame.draw.rect(self.surface, self.color, self.rect)\n\t\tself.bolder = pygame.Rect(self.pos, self.dim)\n\t\tpygame.draw.rect(self.surface, (0,0,0), self.bolder, 1)\n\n\nclass Control:\n\tdef __init__(self, cube):\n\t\tself.cube = cube\n\tdef keyboard_control(self):\n\t\tif (pygame.key.get_pressed()[pygame.K_r]):\n\t\t\t\t\tTurn(self.cube).execute(\"R\")\n\t\tif (pygame.key.get_pressed()[pygame.K_u]):\n\t\t\t\t\tTurn(self.cube).execute(\"U\")\n\t\tif (pygame.key.get_pressed()[pygame.K_l]):\n\t\t\t\t\tTurn(self.cube).execute(\"L\")\n\t\tif (pygame.key.get_pressed()[pygame.K_f]):\n\t\t\t\t\tTurn(self.cube).execute(\"F\")\n\t\tif (pygame.key.get_pressed()[pygame.K_d]):\n\t\t\t\t\tTurn(self.cube).execute(\"D\")\n\t\tif (pygame.key.get_pressed()[pygame.K_b]):\n\t\t\t\t\tTurn(self.cube).execute(\"B\")\n\t\tif (pygame.key.get_pressed()[pygame.K_x]):\n\t\t\t\t\tTurn(self.cube).execute(\"x\")\n\t\tif (pygame.key.get_pressed()[pygame.K_y]):\n\t\t\t\t\tTurn(self.cube).execute(\"y\")\n\t\tif (pygame.key.get_pressed()[pygame.K_z]):\n\t\t\t\t\tTurn(self.cube).execute(\"z\")\n\t\tif (pygame.key.get_pressed()[pygame.K_m]):\n\t\t\t\t\tTurn(self.cube).execute(\"M\")\n\t\tif (pygame.key.get_pressed()[pygame.K_c]):\n\t\t\t\t\tTurn(self.cube).execute(\"C\")\n\t\tif (pygame.key.get_pressed()[pygame.K_e]):\n\t\t\t\t\tTurn(self.cube).execute(\"E\")\n\t\tif (pygame.key.get_pressed()[pygame.K_a]) and \\\n\t\t\t(pygame.key.get_pressed()[pygame.K_s]):\n\t\t\t\t\tself.cube.reset()\n\t\t\t\t\t\nclass Draw:\n\tdef __init__(self, cube, surface):\n\t\tself.cube = cube\n\t\tself.surface = surface\n\t\tself.cell = 30\n\n\t'''cubes and their frames'''\n\tdef draw(self):\n\t\tfor i in range(2,6):\n\t\t\tfor j in range(3):\n\t\t\t\tfor k in range(3):\n\t\t\t\t\tBox((self.cell,self.cell), self.cube.dict[self.cube.list[i][j][k]], \n\t\t\t\t\t\t((self.cell * 3 * (i - 1)) + self.cell * k, self.cell * (j + 4)), self.surface).draw()\n\n\t\tfor j in range(3):\n\t\t\tfor k in range(3):\n\t\t\t\tBox((self.cell,self.cell), self.cube.dict[self.cube.U[j][k]], \n\t\t\t\t\t((self.cell * 6) + self.cell * k, self.cell * (j + 1)), self.surface).draw()\n\n\t\tfor j in range(3):\n\t\t\tfor k in range(3):\n\t\t\t\tBox((self.cell,self.cell), self.cube.dict[self.cube.D[j][k]], \n\t\t\t\t\t((self.cell * 6) + self.cell * k, self.cell * (j + 7)), self.surface).draw()\n\n\nclass Message:\n\n\tdef __init__(self, msg, color, screen, height, size = 20, x = 30):\n\t\tself.msg = msg\n\t\tself.color = color\n\t\tself.screen = screen\n\t\tself.height = height\n\t\tself.x = x\n\t\tself.font = pygame.font.SysFont(\"Marker Felt\", size)\n\n\tdef display(self):\n\n\t\tscreen_text = self.font.render(self.msg, True, self.color)\n\t\tself.screen.blit(screen_text, [self.x, self.height])\n\n\nclass Database:\n\n\tdef __init__(self):\n\t\tself.conn = sqlite3.connect(\"score_list_test.db\")\n\t\tself.c = self.conn.cursor()\n\t\tself.c.execute(\"CREATE TABLE IF NOT EXISTS database(id INT, scramble TEXT, score REAL)\")\n\n\tdef write(self, t_scramble, t_score):\n\t\tself.c.execute(\"SELECT id FROM database\") \n\t\tl = self.c.fetchall()\n\t\tself.c.execute(\"INSERT INTO database(id, scramble, score) VALUES(?, ?, ?)\",(len(l) + 1, t_scramble, t_score))\n\t\tself.conn.commit()\n\n\tdef close(self):\n\t\tself.c.close()\n\t\tself.conn.close()\n\t\n\tdef read_average(self):\n\t\tself.c.execute(\"SELECT score FROM database\") \n\t\tl = self.c.fetchall()\n\t\ttry:\n\t\t\treturn (float(sum([i[0] for i in l])) / len(l))\n\t\texcept ZeroDivisionError:\n\t\t\treturn 0.0\n\n\tdef read_best(self):\n\t\tself.c.execute(\"SELECT min(score) FROM database\")\n\t\treturn self.c.fetchall()[0][0]\n\n\tdef delete_last(self):\n\t\tself.c.execute(\"DELETE FROM database WHERE id = (SELECT MAX(id) FROM database)\")\n\t\tself.conn.commit()\n\n","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"517619464","text":"import datetime\nfrom frictionless import Package, Resource, helpers\n\n\nIS_MACOS = helpers.is_platform(\"macos\")\n\n\n# Parser\n\n\ndef test_spss_parser_write(tmpdir):\n source = Resource(\"data/table.csv\")\n if not IS_MACOS:\n target = source.write(str(tmpdir.join(\"table.sav\")))\n with target:\n assert target.header == [\"id\", \"name\"]\n assert target.read_rows() == [\n {\"id\": 1, \"name\": \"english\"},\n {\"id\": 2, \"name\": \"中国人\"},\n ]\n\n\ndef test_spss_parser_write_types(tmpdir):\n source = Package(\"data/storage/types.json\").get_resource(\"types\")\n if not IS_MACOS:\n target = source.write(str(tmpdir.join(\"table.sav\")))\n with target:\n\n # Assert schema\n assert target.schema == {\n \"fields\": [\n {\"name\": \"any\", \"type\": \"string\"}, # type fallback\n {\"name\": \"array\", \"type\": \"string\"}, # type fallback\n {\"name\": \"boolean\", \"type\": \"string\"}, # type fallback\n {\"name\": \"date\", \"type\": \"date\"},\n {\"name\": \"date_year\", \"type\": \"date\"}, # format removal\n {\"name\": \"datetime\", \"type\": \"datetime\"},\n {\"name\": \"duration\", \"type\": \"string\"}, # type fallback\n {\"name\": \"geojson\", \"type\": \"string\"}, # type fallback\n {\"name\": \"geopoint\", \"type\": \"string\"}, # type fallback\n {\"name\": \"integer\", \"type\": \"integer\"},\n {\"name\": \"number\", \"type\": \"number\"},\n {\"name\": \"object\", \"type\": \"string\"}, # type fallback\n {\"name\": \"string\", \"type\": \"string\"},\n {\"name\": \"time\", \"type\": \"time\"},\n {\"name\": \"year\", \"type\": \"integer\"}, # type downgrade\n {\"name\": \"yearmonth\", \"type\": \"string\"}, # type fallback\n ],\n }\n\n # Asssert rows\n assert target.read_rows() == [\n {\n \"any\": \"中国人\",\n \"array\": '[\"Mike\", \"John\"]',\n \"boolean\": \"true\",\n \"date\": datetime.date(2015, 1, 1),\n \"date_year\": datetime.date(2015, 1, 1),\n \"datetime\": datetime.datetime(2015, 1, 1, 3, 0),\n \"duration\": \"P1Y1M\",\n \"geojson\": '{\"type\": \"Point\", \"coordinates\": [33, 33.33]}',\n \"geopoint\": \"30,70\",\n \"integer\": 1,\n \"number\": 7.0,\n \"object\": '{\"chars\": 560}',\n \"string\": \"english\",\n \"time\": datetime.time(3, 0),\n \"year\": 2015,\n \"yearmonth\": \"2015-01\",\n },\n ]\n\n\ndef test_spss_storage_constraints(tmpdir):\n source = Package(\"data/storage/constraints.json\").get_resource(\"constraints\")\n if not IS_MACOS:\n target = source.write(str(tmpdir.join(\"table.sav\")))\n with target:\n\n # Assert schema\n assert target.schema == {\n \"fields\": [\n {\"name\": \"required\", \"type\": \"string\"}, # constraint removal\n {\"name\": \"minLength\", \"type\": \"string\"}, # constraint removal\n {\"name\": \"maxLength\", \"type\": \"string\"}, # constraint removal\n {\"name\": \"pattern\", \"type\": \"string\"}, # constraint removal\n {\"name\": \"enum\", \"type\": \"string\"}, # constraint removal\n {\"name\": \"minimum\", \"type\": \"integer\"}, # constraint removal\n {\"name\": \"maximum\", \"type\": \"integer\"}, # constraint removal\n ],\n }\n\n # Asssert rows\n assert target.read_rows() == [\n {\n \"required\": \"passing\",\n \"minLength\": \"passing\",\n \"maxLength\": \"passing\",\n \"pattern\": \"passing\",\n \"enum\": \"passing\",\n \"minimum\": 5,\n \"maximum\": 5,\n },\n ]\n\n\ndef test_spss_parser_write_timezone(tmpdir):\n source = Resource(\"data/timezone.csv\")\n if not IS_MACOS:\n target = source.write(str(tmpdir.join(\"table.sav\")))\n with target:\n\n # Assert schmea\n assert target.schema == {\n \"fields\": [\n {\"name\": \"datetime\", \"type\": \"datetime\"},\n {\"name\": \"time\", \"type\": \"time\"},\n ],\n }\n\n # Assert rows\n assert target.read_rows() == [\n {\n \"datetime\": datetime.datetime(2020, 1, 1, 15),\n \"time\": datetime.time(15),\n },\n {\n \"datetime\": datetime.datetime(2020, 1, 1, 15),\n \"time\": datetime.time(15),\n },\n {\n \"datetime\": datetime.datetime(2020, 1, 1, 15),\n \"time\": datetime.time(15),\n },\n {\n \"datetime\": datetime.datetime(2020, 1, 1, 15),\n \"time\": datetime.time(15),\n },\n ]\n","sub_path":"tests/plugins/test_spss.py","file_name":"test_spss.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440412569","text":"import sys\r\nimport os\r\nimport requests\r\nimport datetime\r\nimport pandas as pd\r\nimport argparse\r\nimport imutils\r\nimport time\r\nimport cv2\r\nimport csv\r\nimport pytz\r\nimport streamlink\r\nimport glob\r\nfrom PIL import Image\r\n\r\nTIME_LIM = 900\r\nDEF_AREA = 500\r\nREFERER = \"\"\r\n\r\n\r\nHEADERS = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0\",\r\n \"Accept\": \"*/*\",\r\n \"Accept-Language\": \"en-US,en;q=0.5\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Referer\": REFERER,\r\n \"DNT\": \"1\",\r\n \"Connection\": \"keep-alive\",\r\n \"Pragma\": \"no-cache\",\r\n \"Cache-Control\": \"no-cache\"\r\n}\r\n\r\ntry:\r\n path_to_insec = sys.argv[1]\r\n path_to_in = sys.argv[2]\r\n video_url = sys.argv[3] \r\n\r\nexcept IndexError:\r\n print(\"Usage: path/to/insec path/to/in urlvideo\")\r\n sys.exit(1)\r\n\r\nretpo_name=\"noname\"\r\nfor file in os.listdir(path_to_insec):\r\n if file.endswith(\".repo\"):\r\n retpo_name=file\r\nif retpo_name == \"noname\":\r\n print(\"repo file not found\")\r\n sys.exit()\r\nretpo_name=os.path.splitext(retpo_name)[0]\r\nlogi_name=\"noname\"\r\nfor file in os.listdir(path_to_insec):\r\n if file.endswith(\".login\"):\r\n logi_name=file\r\nif logi_name == \"noname\":\r\n print(\"login file not found\")\r\n sys.exit()\r\nlogi_name=os.path.splitext(logi_name)[0]\r\n\r\npass_name=\"noname\"\r\nfor file in os.listdir(path_to_insec):\r\n if file.endswith(\".pass\"):\r\n pass_name=file\r\nif pass_name == \"noname\":\r\n print(\"pass file not found\")\r\n sys.exit()\r\npass_name=os.path.splitext(pass_name)[0]\r\n\r\n\r\ndef detect_motion(file_name):\r\n max_rect = 0\r\n num1 = 0\r\n vs = cv2.VideoCapture(file_name)\r\n firstFrame = None\r\n while True:\r\n frame = vs.read()\r\n frame = frame[1]\r\n text = \"Unoccupied\"\r\n if frame is None:\r\n break \r\n frame = imutils.resize(frame, width=500)\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\r\n if firstFrame is None:\r\n firstFrame = gray\r\n continue\r\n frameDelta = cv2.absdiff(firstFrame, gray)\r\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\r\n thresh = cv2.dilate(thresh, None, iterations=2)\r\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = imutils.grab_contours(cnts)\r\n for c in cnts:\r\n if cv2.contourArea(c) < DEF_AREA:\r\n continue\r\n# if cv2.contourArea(c) <= max_rect:\r\n# continue\r\n max_rect += 1\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n if max_rect > 0:\r\n max_rect = 0\r\n frameOrig = frame.copy()\r\n folder1 = path_to_in+file_name.split('-')[0]\r\n if not os.path.exists(folder1):\r\n os.mkdir(folder1)\r\n \r\n folder1 = folder1+\"/\"+file_name.split('-')[1]+\"-\"+file_name.split('-')[2].split('.')[0]\r\n if not os.path.exists(folder1):\r\n os.mkdir(folder1)\r\n \r\n if not os.path.exists(folder1+\"/1\"):\r\n os.mkdir(folder1+\"/1\")\r\n \r\n numdir=1\r\n while os.path.exists(folder1+\"/\"+str(numdir)):\r\n numdir += 1\r\n numdir -=1\r\n listfile = os.listdir(folder1+\"/\"+str(numdir))\r\n number_files = len(listfile)\r\n if number_files > 200:\r\n numdir += 1\r\n os.mkdir(folder1+\"/\"+str(numdir))\r\n folder1 = folder1+\"/\"+str(numdir)\r\n filejpg=folder1+\"/\"+file_name.split('.')[0]+\"_\"+str(num1)+\"_.jpg\"\r\n while os.path.exists(filejpg):\r\n num1 += 1\r\n filejpg=folder1+\"/\"+file_name.split('.')[0]+\"_\"+str(num1)+\"_.jpg\"\r\n \r\n\r\n\r\n\r\n cv2.imwrite(filejpg, frameOrig)\r\n vs.release() \r\n return num1\r\n \r\ndef getSegs(m3):\r\n lines = m3.text.split('\\n')\r\n segments = []\r\n for line in lines:\r\n if '.ts' in line:\r\n segments.append(line)\r\n return segments\r\n\r\n\r\ndef dumpSegs( segments, path, append=False):\r\n with open(path, 'ab' if append else 'wb') as f:\r\n for segment in segments:\r\n segurl = segment\r\n success = False\r\n while not success:\r\n try:\r\n seg = requests.get(segurl, headers=HEADERS)\r\n success = True\r\n except:\r\n print('retrying...')\r\n f.write(seg.content)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"start\")\r\n streams = streamlink.streams(video_url)\r\n video_url = streams[\"best\"].url\r\n m3u8 = requests.get(video_url+\"?start_seq=0\", headers=HEADERS)\r\n segments = getSegs(m3u8)\r\n\r\n#EXT-X-PROGRAM-DATE-TIME:2020-04-15T09:35:14.388+00:00\r\n#EXT-X-TARGETDURATION:5\r\n datime = m3u8.text.split('EXT-X-PROGRAM-DATE-TIME:')[1].split('.')[0] \r\n timeadd = m3u8.text.split('EXT-X-TARGETDURATION:')[1].split('\\n#')[0] \r\n date = datetime.datetime.strptime(datime, \"%Y-%m-%dT%H:%M:%S\")\r\n timestamp = datetime.datetime.timestamp(date)\r\n\r\n\r\n aa = []\r\n bb = []\r\n print(\"csv\")\r\n fieldnames = ['data', 'time_start','time_stop','count_move','screen']\r\n file_csv='insec/names.csv'\r\n if not os.path.exists(file_csv):\r\n with open(file_csv, 'w', newline='') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames) \r\n writer.writeheader()\r\n writer.writerow({'data': '20200101', 'time_start': '010101',\r\n 'time_stop':'010101','count_move':'0',\r\n 'screen':'none' })\r\n print(\"read csv\") \r\n df = pd.read_csv(file_csv)\r\n datanow = df['data'].tolist()\r\n datanum = datanow[-1]\r\n timenow = df['time_stop'].tolist()\r\n timenum = timenow[-1] \r\n# tzloc = pytz.timezone('Europe/Tallinn')\r\n timeinurl = timestamp - int(timeadd)\r\n timesave = 0\r\n for i in segments:\r\n timeinurl += int(timeadd)\r\n valuetmp = datetime.datetime.fromtimestamp(timeinurl) \r\n print(valuetmp.strftime('%Y%m%d-%H%M%S'))\r\n if int(datanum) > int(valuetmp.strftime('%Y%m%d')):\r\n continue\r\n if int(datanum) == int(valuetmp.strftime('%Y%m%d')):\r\n if int(timenum) > int(valuetmp.strftime('%H%M%S')):\r\n continue\r\n \r\n aa.append(timeinurl)\r\n bb.append(i)\r\n# print(aa)\r\n print(aa[-1] - aa[0])\r\n if aa[-1] - aa[0] > TIME_LIM:\r\n print(bb)\r\n value = datetime.datetime.fromtimestamp(aa[0])\r\n value2 = datetime.datetime.fromtimestamp(aa[-1])\r\n file_video_name = value.strftime('%Y%m%d-%H%M%S')+value2.strftime('-%H%M%S')+\".ts\"\r\n dumpSegs( bb,file_video_name )\r\n out = detect_motion(file_video_name)\r\n print(out)\r\n#pogoda \r\n with open(file_csv, 'a', newline='') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\r\n writer.writerow({'data': value.strftime('%Y%m%d'), 'time_start': value.strftime('%H%M%S'),\r\n 'time_stop':value2.strftime('%H%M%S'),'count_move':out,\r\n 'screen':'none' if out == 0 else file_video_name.split('.')[0]+\".jpg\"})\r\n\r\n if out > 2:\r\n numdir = 1 \r\n folder1 = path_to_in+file_video_name.split('-')[0]\r\n folder1 = folder1+\"/\"+file_video_name.split('-')[1]+\"-\"+file_video_name.split('-')[2].split('.')[0]\r\n while os.path.exists(folder1+\"/\"+str(numdir)):\r\n fp_in = folder1+\"/\"+str(numdir)+\"/\"+\"*.jpg\"\r\n fp_out = folder1+\"/\"+str(numdir)+\"_\"+file_video_name.split('.')[0]+\".gif\"\r\n img, *imgs = [Image.open(f) for f in sorted(glob.glob(fp_in))]\r\n img.save(fp=fp_out, format='GIF', append_images=imgs,\r\n save_all=True, duration=200, loop=0)\r\n# os.system(\"rm -rf \"+folder1+\"/\"+str(numdir)+\"/\"+\"*.jpg\")\r\n numdir += 1 \r\n \r\n os.remove(file_video_name)\r\n\r\n os.system(\"git config --global user.name \\\"\"+logi_name+\"\\\"\")\r\n os.system(\"git config --global user.email \"+logi_name+\"@github.com\")\r\n os.system(\"git remote set-url origin https://\"+logi_name+\":\"+pass_name+\"@github.com/\"+logi_name+\"/\"+retpo_name+\".git\")\r\n os.system(\"git checkout master\")\r\n os.system(\"git add insec \"+path_to_in)\r\n os.system(\"git commit -m \\\"oinion csv files\\\"\")\r\n os.system(\"git push origin master \") \t\r\n \r\n aa = []\r\n bb = []\r\n","sub_path":"insec/ts_time_you.py","file_name":"ts_time_you.py","file_ext":"py","file_size_in_byte":8854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"263266676","text":"from gym_jobshop.envs.src import environment, class_Order, global_settings\nimport csv\nfrom math import ceil\nimport random\nfrom gym_jobshop.envs.src.csv_handler import csv_prefix\n\nrandom.seed(global_settings.random_seed)\n\n\ndef generate_order():\n \"Generate new orders with random due date and random product type\"\n # Orders have a fixed due date of their arrival time + 10 periods ( = 10 * 960 steps)\n product_type = random.randrange(1, 7) # set product type to a random number between 1 and 6\n global_settings.count_of_generated_orders += 1 # Increase global counter for the total amount of orders by 1\n due_date = global_settings.current_time + (\n global_settings.due_date_multiplier * global_settings.duration_of_one_period)\n environment.order_pool.append(\n class_Order.Order(\n # orderID,creation_date, due_date, product_type, planned_release_date\n global_settings.count_of_generated_orders, # orderid\n global_settings.current_time, # creationdate\n due_date, # due date\n product_type, # product type\n due_date - global_settings.planned_release_date_multiplier *\n global_settings.duration_of_one_period) # planned release date\n )\n environment.set_next_order_arrival_time()\n # DEBUG INFO:\n if global_settings.show_order_generation:\n print(\"Step \" + str(global_settings.current_time) + \" Order generated. Product type: \" + str(product_type)\n + \" || Due Date: \" + str(global_settings.current_time + (\n global_settings.due_date_multiplier * global_settings.duration_of_one_period)\n ) + \" || orderID: \" + str(global_settings.count_of_generated_orders))\n # write to file\n with open(str('./' + csv_prefix) + '_demand.csv', mode='a') as results_CSV:\n results_writer = csv.writer(results_CSV, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # results_writer.writerow(['Order Number', 'Product', 'Arrival', 'Due Date'])\n results_writer.writerow([global_settings.count_of_generated_orders, product_type, global_settings.current_time, due_date,\n ceil(global_settings.current_time / 960), ceil(due_date / 960)])\n results_CSV.close()\n return\n","sub_path":"gym-jobshop/gym_jobshop/envs/src/order_generation.py","file_name":"order_generation.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"441619188","text":"\"\"\"\nAuthor: Max Martinez Ruts\nDate: January 2018\n\nDescription:\nMulti-agent system approach to optimize robot search.\n\nSetup:\nA set of robots search for mineral in an unexplored\nenvironment and return to the mothership to deposit the mineral samples obtained. In the path returning to the\nmothership, the robots deposit crumbs, so that subsequent robots can identigy which path leads to a successful mineral source.\n\"\"\"\nimport numpy as np\nimport random\nimport math\nimport pygame\nimport time\n\nn = 40 # Number of robots\nw1 = np.random.randint(0,10,(n,n), dtype=int)\nr = np.random.rand(n,n)\n\n# Define samples, crumbs and obstacles positions\nsamples = np.where(r>0.99,w1,0)\ncrumbs = np.zeros((n,n), dtype=int)\nobstacles = np.zeros((n,n), dtype=int)\n\n# Position of the base\nbase = (0,0)\n\n# Helper to get local position of the robot\npos_distribution = np.linspace(-350,350,n)\ngrid_distribution = np.zeros((n,n,2))\n\n# Iterate over all positions to define the grid positioning\nfor i in range(0,n):\n for j in range(0,n):\n grid_distribution[j,i] = np.array([pos_distribution[i],pos_distribution[j]])\n\nprint(grid_distribution)\n\n# Screen parameters\nwidth = 800\nheight = 800\ncenter = np.array([width/2, height/2])\nscreen = pygame.display.set_mode((width, height))\n\n# Colors\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\nwhite = (255, 255, 255)\nyellow = (255,255, 0)\n\n\n# Returns if the rectangle is in the board\ndef try_pos(pos):\n if 0 <= pos[0] < n and 0 <= pos[1] < n:\n return True\n else:\n return False\n\n# Convert coordinates form cartesian to screen coordinates (used to draw in pygame screen)\ndef cartesian_to_screen(car_pos):\n factor = 1\n screen_pos = np.array([center[0]*factor+car_pos[0],center[1]*factor+car_pos[1]])/factor\n screen_pos = screen_pos.astype(int)\n return screen_pos\n\n# Drawing Board\ndef draw():\n pygame.event.get()\n screen.fill((0, 0, 0))\n\n for robot in robots:\n # print(robot.pos, grid_distribution[robot.pos])\n pygame.draw.circle(screen, green, cartesian_to_screen(grid_distribution[robot.pos]), 5)\n\n for i in range(n):\n for j in range(n):\n pygame.draw.circle(screen, yellow, cartesian_to_screen(grid_distribution[(i, j)]), samples[i,j])\n pygame.draw.circle(screen, red, cartesian_to_screen(grid_distribution[(i, j)]), int(crumbs[i,j]/2))\n pygame.display.flip()\n\n\n# Class robot\nclass Robot:\n def __init__(self):\n self.pos = (0,0)\n self.carrying = False\n self.samples_collected = 0\n\n # Perform action\n def act(self):\n global crumbs\n global samples\n\n # All possible directions\n near_pos = [(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)]\n\n # If carrying sample and arrived to base\n if self.carrying and self.pos == base:\n # Drop sample and increase counter\n self.carrying = False\n self.samples_collected +=1\n\n # If carrying sample and not in base\n elif self.carrying and self.pos!= base:\n # Leave 2 crumbs and travel in gradient up\n crumbs[self.pos] = crumbs[self.pos]+2\n distances = []\n for near in near_pos:\n distances.append(math.sqrt((self.pos[0]+near[0])**2 + (self.pos[1]+near[1])**2))\n min_index = np.argmin(np.array(distances))\n new_pos = (self.pos[0]+near_pos[min_index][0], self.pos[1]+ near_pos[min_index][1])\n if try_pos(new_pos):\n self.pos = new_pos\n\n # If found sample\n elif samples[self.pos] >0:\n # Carry sample\n self.carrying = True\n samples[self.pos] = samples[self.pos]-1\n\n # If found crumbs\n elif crumbs[self.pos] >0:\n # Pick 1 crumb and travel in gradient down\n crumbs[self.pos] = crumbs[self.pos]-1\n distances = []\n for near in near_pos:\n distances.append(math.sqrt((self.pos[0] + near[0]) ** 2 + (self.pos[1] + near[1]) ** 2))\n max_index = distances.index(max(distances))\n new_pos = (self.pos[0]+near_pos[max_index][0], self.pos[1]+ near_pos[max_index][1])\n if try_pos(new_pos):\n self.pos = new_pos\n\n # Otherwise\n else:\n # Move random pos\n a = random.randint(0,7)\n new_pos = (self.pos[0]+near_pos[a][0],self.pos[1]+near_pos[a][1])\n if try_pos(new_pos):\n self.pos = new_pos\n\n# Define list of robots\n\nrobots = []\nfor i in range(20):\n # Add\n robots.append(Robot())\n\n# Start and continue simulation\nwhile True:\n for robot in robots:\n robot.act()\n draw()\n\n\n","sub_path":"Multi_Agent_robots.py","file_name":"Multi_Agent_robots.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"349063073","text":"'''Tests the \"flaskapp\" example.'''\nimport unittest\nimport asyncio\n\nfrom pulsar import send, SERVER_SOFTWARE\nfrom pulsar.apps.http import HttpClient\nfrom pulsar.apps.test import dont_run_with_thread\n\nfrom examples.flaskapp.manage import server\n\n\nclass TestFlaskThread(unittest.TestCase):\n app_cfg = None\n concurrency = 'thread'\n\n @classmethod\n def name(cls):\n return 'flask_' + cls.concurrency\n\n @classmethod\n @asyncio.coroutine\n def setUpClass(cls):\n s = server(name=cls.name(),\n concurrency=cls.concurrency,\n bind='127.0.0.1:0')\n cls.app_cfg = yield from send('arbiter', 'run', s)\n cls.uri = 'http://{0}:{1}'.format(*cls.app_cfg.addresses[0])\n cls.client = HttpClient()\n\n @classmethod\n def tearDownClass(cls):\n if cls.app_cfg is not None:\n return send('arbiter', 'kill_actor', cls.app_cfg.name)\n\n @asyncio.coroutine\n def testResponse200(self):\n c = self.client\n response = yield from c.get(self.uri)\n self.assertEqual(response.status_code, 200)\n content = response.content\n self.assertEqual(content, b'Flask Example')\n headers = response.headers\n self.assertTrue(headers)\n self.assertEqual(headers['server'], SERVER_SOFTWARE)\n\n @asyncio.coroutine\n def testResponse404(self):\n c = self.client\n response = yield from c.get('%s/bh' % self.uri)\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.content, b'404 Page')\n\n\n@dont_run_with_thread\nclass TestFlaskProcess(TestFlaskThread):\n concurrency = 'process'\n","sub_path":"examples/flaskapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"192895931","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2019/5/21 8:35 AM\n@Author : ddlee\n@File : 84largestRectangleArea.py\n\"\"\"\nimport collections\n\n\nclass Solution:\n def largestRectangleAreaNaive(self, heights) -> int:\n '''O(N^2) 暴力 超时'''\n l = len(heights)\n if l < 1:\n return 0\n\n dp = [0 for i in range(l)]\n for i in range(l):\n mh = heights[i]\n for j in range(i)[::-1]:\n tmp = min(heights[j:i+1]) * (i - j + 1)\n mh = max(mh, tmp)\n dp[i] = mh\n # print(dp)\n return max(dp)\n\n\n def largestRectangleAreaStack(self, heights) -> int:\n '''O(N) 栈'''\n l = len(heights)\n if l < 1:\n return 0\n\n ma = 0\n stack = []\n heights = [0] + heights + [0]\n l = len(heights)\n\n for i in range(l):\n while stack and heights[stack[-1]] > heights[i]:\n top = stack.pop()\n ma = max(ma, (i - stack[-1] - 1) * heights[top])\n\n stack.append(i)\n return ma\n\n\n def maximalRectangle(self, matrix):\n '''借用上面栈的方法 时间复杂度O(N^2)'''\n if len(matrix) < 1:\n return 0\n\n matrix = [list(map(int, row)) for row in matrix]\n m = len(matrix)\n n = len(matrix[0])\n\n ma = 0\n heights = [0 for i in range(n)]\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n heights[j] = 0\n else:\n heights[j] += 1\n tmp = self.largestRectangleAreaStack(heights)\n ma = max(ma, tmp)\n return ma\n\n\nif __name__ == '__main__':\n # heights = [2, 1, 5, 6, 2, 3]\n # print(Solution().largestRectangleAreaStack(heights))\n mat = [[\"0\", \"1\", \"1\", \"0\", \"1\"],\n [\"1\", \"1\", \"0\", \"1\", \"0\"],\n [\"0\", \"1\", \"1\", \"1\", \"0\"],\n [\"1\", \"1\", \"1\", \"1\", \"0\"],\n [\"1\", \"1\", \"1\", \"1\", \"1\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\"]]\n\n # print(Solution().maximalSquare(mat))\n print(Solution().maximalRectangle(mat))\n","sub_path":"leetcode1/84largestRectangleArea.py","file_name":"84largestRectangleArea.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"277030493","text":"from django.shortcuts import render\nfrom luis.credentials import luis_api_key, luis_url\nfrom logging.handlers import TimedRotatingFileHandler\nimport logging\nimport requests\n\n\nformatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n\nhandler = TimedRotatingFileHandler('logs/app.log', when=\"midnight\", interval=1)\nhandler.setFormatter(formatter)\n\nlogger = logging.getLogger(\"LuisApp\")\nhandler.suffix = \"%Y-%m-%d.log\"\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\n\ndef get_luis_answer(request):\n headers = {\n 'Ocp-Apim-Subscription-Key': luis_api_key,\n }\n\n params = {\n 'q': request.POST.get('luis_query'),\n 'timezoneOffset': '0',\n 'verbose': 'false',\n 'spellCheck': 'false',\n 'staging': 'false',\n }\n\n try:\n logger.info(\"Sent query to LUIS - \" + request.POST.get('luis_query'))\n r = requests.get(luis_url, headers=headers, params=params)\n json_response = r.json()\n logger.info(\"Response from LUIS - \" + str(json_response['topScoringIntent']['intent']))\n context = {'intent': json_response}\n\n except Exception as e:\n context = {'error': e}\n logger.error(e)\n\n return render(request, 'luis/luis_answer.html', context)\n","sub_path":"luis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566070198","text":"import os, sys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nPARENT_PATH = os.path.abspath(\"..\")\nif PARENT_PATH not in sys.path:\n sys.path.insert(0, PARENT_PATH)\n\nfrom common.dsl import DSL\n\n\nclass ShippingPage(DSL):\n\n accept_terms_of_use_checkbox = (By.CSS_SELECTOR, '#uniform-cgv')\n proceed_to_checkout_button = (By.NAME, 'processCarrier')\n\n\n def click_on_accept_terms_of_use(self):\n super().click(EC.element_to_be_clickable(self.accept_terms_of_use_checkbox))\n\n\n def click_on_proceed_to_checkout(self):\n super().click(EC.element_to_be_clickable(self.proceed_to_checkout_button))\n","sub_path":"features/pages/shipping_page.py","file_name":"shipping_page.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"307861360","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/8/22 下午9:05\n# @Author : Zhixin Piao \n# @Email : piaozhx@shanghaitech.edu.cn\n\nimport os\nimport json\nimport multiprocessing\nimport math\nimport datetime\n\n\ndef get_user_name_and_run_time(pid):\n stdout = os.popen(\"ps -p %s -o user= -o etime=\" % pid).read()\n if stdout == '':\n user_name, run_time = 'dead', 'dead'\n else:\n user_name, run_time = stdout.split()\n run_time = list(run_time)\n\n run_time[-3] = '分钟'\n\n if len(run_time) > 5:\n run_time[-6] = '小时'\n run_time = ''.join(run_time)\n run_time = run_time.replace('-', '天')\n run_time += '秒'\n\n if user_name == 'root':\n user_name = os.popen(''' docker inspect --format '{{.Name}}' \"$(cat /proc/%d/cgroup |tail -n 1 |cut -d / -f 3)\" | sed 's/^\\///' ''' % pid).read()\n\n user_name = user_name.strip()\n run_time = run_time.strip()\n\n return user_name, run_time\n\ndef main():\n gpu_msg_list = os.popen(\"/public/anaconda3/bin/gpustat -p -u --json\").read()\n gpu_msg_list = json.loads(gpu_msg_list)\n\n for gpu_msg in gpu_msg_list['gpus']:\n for process in gpu_msg['processes']:\n user_name, run_time = get_user_name_and_run_time(process['pid'])\n process['username'] = user_name\n process['runtime'] = run_time\n\n cpu_info = os.popen(\"iostat -c | tail -n 2\").read()\n user, nice, system, iowait, steal, idle = map(lambda x: float(x), cpu_info.split())\n\n cpu_utils = 100 - idle\n cpu_num = multiprocessing.cpu_count()\n cpu_msg = '%.2f%% [%d/%d]' % (cpu_utils, math.ceil(cpu_num * cpu_utils * 0.01), cpu_num)\n\n _, total, used, _, _, _, available = os.popen('''free -h | head -n 2 | tail -n 1''').read().split()\n memory_msg = '%s/%s' % (used, total)\n\n gpu_msg_list['cpu_msg'] = cpu_msg\n gpu_msg_list['memory_msg'] = memory_msg\n\n # convert query time format\n query_time = gpu_msg_list['query_time']\n query_time = query_time[:query_time.find('.')]\n query_time = datetime.datetime.strptime(query_time, \"%Y-%m-%dT%H:%M:%S\")\n query_time = query_time.strftime('%Y-%m-%d %H:%M:%S')\n gpu_msg_list['query_time'] = query_time\n\n\n\n print(json.dumps(gpu_msg_list, ensure_ascii=False))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"gpu_tools/get_gpu_msg.py","file_name":"get_gpu_msg.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"666906","text":"#Prime Number Checker\r\nimport os\r\n\r\ndef is_prime(x):\r\n if x > 2:\r\n for a in range(2, x):\r\n #print(\"Trying number: \" + str(a))\r\n if x % a == 0:\r\n print(str(a) + \" goes into \" + str(x) + \" so the number IS NOT prime!\", end = \"\")\r\n return False\r\n else:\r\n print(\"Numbers \" + str(a) + \" and below don't go into \" + str(x) + \" so the number is prime!\", end = \"\")\r\n return True\r\n elif x < 2:\r\n print(str(x) + \" isn't prime.\", end = \"\")\r\n return False\r\n else:\r\n print(\"2 is prime.\", end = \"\")\r\n return True\r\n\r\ndef dataVerify(value, question):\r\n while True:\r\n while True:\r\n try:\r\n value = int(value)\r\n break\r\n \r\n except ValueError:\r\n print(\"Try again.\\n\")\r\n value = input(question)\r\n if value<1:\r\n print(\"Try again, value must be greater than 0.\\n\")\r\n value = input(question)\r\n else:\r\n break\r\n \r\n return value\r\n\r\ndef main(boolVal):\r\n while boolVal:\r\n print(\"PRIME NUMBER CHECKER\\n\")\r\n \r\n question = \"Which number would you like to check for primeness? \"\r\n primeInput = input(question)\r\n primeInput = dataVerify(primeInput, question)\r\n \r\n os.system('cls')\r\n print(\"PRIME NUMBER CHECKER\\n\")\r\n\r\n is_prime(primeInput)\r\n \r\n again = input(\" Check again? \")\r\n if again in ['no', 'n', 'nope']:\r\n boolVal = False\r\n else:\r\n os.system('cls')\r\n \r\nif __name__ == \"__main__\":\r\n boolVal = True\r\n main(boolVal)\r\n","sub_path":"@Calculator - Prime Number Checker.py","file_name":"@Calculator - Prime Number Checker.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"100106204","text":"import sys\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets, QtSerialPort\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib.figure import Figure\n\n\nclass SerialPortManager(QtCore.QObject):\n dataChanged = QtCore.pyqtSignal(list)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n self._serial = QtSerialPort.QSerialPort(baudRate=115200)\n self.serial.setPortName(\"COM3\")\n self.serial.readyRead.connect(self.on_ready_read)\n\n @property\n def serial(self):\n return self._serial\n\n def start(self):\n self.serial.open(QtCore.QIODevice.ReadOnly)\n\n @QtCore.pyqtSlot()\n def on_ready_read(self):\n if self.serial.canReadLine():\n line = self.serial.readLine().data().decode()\n values = line.strip().split(\",\")\n try:\n data = list(map(float, values))\n except ValueError as e:\n print(\"error\", e)\n else:\n self.dataChanged.emit(data)\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n\n fig = Figure(figsize=(5, 4), dpi=100)\n self.canvas = FigureCanvas(fig)\n\n self.max_button = QtWidgets.QPushButton(self.tr(\"GetMax\"))\n self.all_button = QtWidgets.QPushButton(self.tr(\"All\"))\n self.one_button = QtWidgets.QPushButton(self.tr(\"1\"))\n self.two_button = QtWidgets.QPushButton(self.tr(\"2\"))\n self.three_button = QtWidgets.QPushButton(self.tr(\"3\"))\n self.four_button = QtWidgets.QPushButton(self.tr(\"4\"))\n\n central_widget = QtWidgets.QWidget()\n self.setCentralWidget(central_widget)\n hlay = QtWidgets.QHBoxLayout(central_widget)\n hlay.addWidget(self.canvas, stretch=1)\n\n grid_layout = QtWidgets.QGridLayout()\n grid_layout.addWidget(self.max_button, 0, 0)\n grid_layout.addWidget(self.all_button, 0, 1)\n grid_layout.addWidget(self.one_button, 1, 0)\n grid_layout.addWidget(self.two_button, 1, 1)\n grid_layout.addWidget(self.three_button, 2, 0)\n grid_layout.addWidget(self.four_button, 2, 1)\n\n vlay = QtWidgets.QVBoxLayout()\n vlay.addLayout(grid_layout)\n vlay.addStretch()\n\n hlay.addLayout(vlay)\n\n self.axes = self.canvas.figure.add_subplot(111)\n self.axes.set_ylim([0, 100])\n self.axes.set_title(\"Titre 1\")\n self.axes.tick_params(\n axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False\n )\n\n self.axes.hlines(25, -0.5, 0.5, color=\"g\")\n self.axes.hlines(60, 1.5, 2.5, color=\"g\")\n self.axes.hlines(50, 3.5, 4.5, color=\"g\")\n self.axes.hlines(70, 5.5, 6.5, color=\"g\")\n\n self.containers = []\n\n self.update_bars([0, 0, 0, 0])\n\n self.resize(640, 480)\n\n @QtCore.pyqtSlot(list)\n def update_bars(self, values):\n if len(values) == 4:\n [c.remove() for c in self.containers]\n self.containers = []\n for index, value in zip((0, 2, 4, 6), values):\n c = self.axes.bar(index, value, color=\"b\")\n self.containers.append(c)\n self.canvas.draw()\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n\n w = MainWindow()\n w.show()\n\n manager = SerialPortManager()\n manager.dataChanged.connect(w.update_bars)\n manager.start()\n\n sys.exit(app.exec_())","sub_path":"200527-01-serial.py","file_name":"200527-01-serial.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"112101473","text":"import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport numpy as np\n\nfont_size = 18\nrcParams['font.size'] = font_size\nrcParams['axes.titleweight'] = 'bold'\n\n\nlabels = [\n 'affine transform\\n(3D, order=0)',\n 'affine transform\\n(3D, order=1)',\n 'affine transform\\n(3D, order=3)',\n 'affine transform\\n(3D, order=5)',\n ]\n\ngreg_cpu_means = [ # Intel(R) Core(TM) i9-7900X CPU @ 3.30GHz\n 0.0601, # 'affine transform (3D, order=0)',\n 0.1725, # 'affine transform (3D, order=1)',\n 1.4531, # 'affine transform (3D, order=3)',\n 4.0232, # 'affine transform (3D, order=5)',\n]\n\ngreg_dask_cpu_means = [ # Intel(R) Core(TM) i9-7900X CPU @ 3.30GHz\n 0.0590, # 'affine transform (3D, order=0)',\n 0.0841, # 'affine transform (3D, order=1)',\n 0.3200, # 'affine transform (3D, order=3)',\n 0.7143, # 'affine transform (3D, order=5)',\n]\n\ngpu_means_1080ti = [\n 0.0008766, # 'affine transform (3D, order=0)',\n 0.001235, # 'affine transform (3D, order=1)',\n 0.01149, # 'affine transform (3D, order=3)',\n 0.02505, # 'affine transform (3D, order=5)',\n]\n\ngpu_means_V100 = [ # V100 results\n 0.0002528, # 'affine transform (3D, order=0)',\n 0.0002610, # 'affine transform (3D, order=1)',\n 0.003318, # 'affine transform (3D, order=3)',\n 0.007357, # 'affine transform (3D, order=5)',\n]\n\ngpu_means_A100 = [ # A100 results\n 0.0002947, # 'affine transform (3D, order=0)',\n 0.0003364, # 'affine transform (3D, order=1)',\n 0.002134, # 'affine transform (3D, order=3)',\n 0.003948, # 'affine transform (3D, order=5)',\n]\n\n\nx = np.arange(len(labels)) # the label locations\nlog_scale = False\n\nn_gpus = 0 # number of GPU models to include in the plot. Must be between 1 and 3\n\nones = np.ones_like(gpu_means_A100)\naccels_dask = np.array(greg_cpu_means) / np.array(greg_dask_cpu_means)\naccels_ti = np.array(greg_cpu_means) / np.array(gpu_means_1080ti)\naccels_a100 = np.array(greg_cpu_means) / np.array(gpu_means_A100)\n[print(f\"{name}: acceleration = {acc}\") for name, acc in zip(labels, accels_a100)]\n\n\nif n_gpus == 0:\n # Plot Dask results vs. SciPy only\n width = 0.43 # the width of the bars\n figsize = [18, 5.58]\n fig, ax = plt.subplots(figsize=figsize)\n\n rects1 = ax.bar(x - width / 2, ones, width, label='SciPy (CPU): Intel Core i9-7900X', color='#FFB043')\n rects2 = ax.bar(x + width / 2, accels_dask, width, label='dask-image (CPU): Intel Core i9-7900X', color='#6A16F8')\n\nelif n_gpus == 1:\n # Plot Dask and 1080 Ti results vs. SciPy\n figsize = [22, 6.54]\n fig, ax = plt.subplots(figsize=figsize)\n\n width = 0.29 # the width of the bars\n rects1 = ax.bar(x - width, ones, width, label='SciPy (CPU): Intel Core i9-7900X', color='#FFB043')\n rects2 = ax.bar(x, accels_dask, width, label='dask-image (CPU): Intel Core i9-7900X', color='#2CC9D9')\n rects3 = ax.bar(x + width, accels_ti, width, label='CuPy (GPU): NVIDIA GTX 1080 Ti', color='#6A16F8')\n\nelif n_gpus == 2:\n # A100, V100 and GTX-1080 Ti results vs. SciPy\n figsize = [30, 7.36]\n fig, ax = plt.subplots(figsize=figsize)\n\n width = 0.23 # the width of the bars\n rects1 = ax.bar(x - 1.5 * width, ones, width, label='SciPy (CPU): Intel Core i9-7900X', color='#FFB043')\n rects2 = ax.bar(x - 0.5 * width, accels_dask, width, label='dask-image (CPU): Intel Core i9-7900X', color='#D028C8')\n rects3 = ax.bar(x + 0.5 * width, accels_ti, width, label='CuPy (GPU): NVIDIA GTX 1080 Ti', color='#2CC9D9')\n rects4 = ax.bar(x + 1.5 * width, accels_a100, width, label='CuPy (GPU): NVIDIA A100', color='#6A16F8')\nelse:\n raise NotImplementedError(\"only 3 GPU models have results stored in this script\")\n\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Performance Gain', fontdict=dict(fontweight='bold', fontsize=font_size))\nif n_gpus == 0:\n ax.set_title('3D Interpolation Performance: dask-image vs. SciPy')\nelse:\n ax.set_title('3D Interpolation Performance: SciPy / dask-image / CuPy')\n\nax.set_xticks(x)\nax.set_xticklabels(labels, fontdict=dict(fontweight='bold', fontsize=font_size))\nax.set_ylim([0, 5.0])\nax.legend()\n\nif log_scale:\n ax.set_yscale('log')\n #ax.set_ylim([0, 900.0])\n ax.set_ylim([0, 5000.0])\n ax.set_yticks([0.1, 1.0, 10.0, 100.0, 1000.0])\n ax.set_yticklabels(['0.1x', '1x', '10x', '100x', '1000x'])\n max_label_y = np.inf\nelse:\n max_label_y = 20\n ax.set_ylim([0, 10.0])\n\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n if height > 1e3:\n v = '{:0.4g}x'.format(height)\n else:\n v = '{:0.3g}x'.format(height)\n ax.annotate(v,\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\nautolabel(rects1)\nif n_gpus >= 0:\n autolabel(rects2)\nif n_gpus >= 1:\n autolabel(rects3)\nif n_gpus >= 2:\n autolabel(rects4)\n\nfig.tight_layout()\n\nplt.show()\n","sub_path":"benchmarks/viz/plot_gpu_cupy_interp_by_accel.py","file_name":"plot_gpu_cupy_interp_by_accel.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"629033955","text":"from flask import Flask,render_template,jsonify,request\napp = Flask(__name__)\nimport pymysql\nfrom flask_cors import CORS\nCORS(app)\n\n\n# 数据库连接\ndb = pymysql.connect(host=\"10.141.209.224\", user=\"root\", password=\"sdzh521\", db=\"guns\", charset='utf8')\ncursor = db.cursor()\n\n@app.route('/market/', methods=['GET'])\ndef market():\n cursor.execute(\"SELECT market_code,market_name,train_id FROM `info_market`\")\n res = cursor.fetchall()\n name_arr = []\n code_arr = []\n\n for line in res:\n code = line[0]\n name = line[1]\n train_id = line[2]\n print(train_id)\n # name = '快客便利店('+name+')'\n if train_id == 0:\n print(name)\n name_arr.append(name)\n print(code)\n code_arr.append(code)\n return jsonify({'code':code_arr,'name':name_arr})\n\n\n@app.route('/site/', methods=['GET'])\ndef site_view():\n siteId = (int)(request.args.get('id'))\n print(siteId)\n cursor.execute(\"SELECT lon_lan FROM `info_market` WHERE market_id = %s\", siteId)\n lon_lan = cursor.fetchone()\n return jsonify({'lat':lon_lan[0].split(',')[1],'lng':lon_lan[0].split(',')[0],\"success\": 0,\"message\": \"请求成功。\"})\n\n\n@app.route('/lei/')\ndef lei_view():\n print('lei used')\n return render_template('routeOriginal.html')\n\n@app.route('/zhuangche/')\ndef delivey_view_zhua():\n return render_template('load2.html')\n\n@app.route('/pick/')\ndef delivey_view_pick():\n return render_template('dynamic_pick22.html')\n\nfrom flask_sqlalchemy import SQLAlchemy\nimport traceback\nimport datetime\nimport json\nDIALECT = 'mysql'\nDRIVER = 'pymysql'\nUSERNAME = 'root'\nPASSWORD = 'sdzh521'\nHOST = '10.141.209.224'\nPORT = '3306'\nDATABASE = 'guns'\nSQLALCHEMY_DATABASE_URI = '{}+{}://{}:{}@{}:{}/{}?charset=utf8'.format(\n DIALECT, DRIVER, USERNAME, PASSWORD, HOST, PORT, DATABASE\n)\nSQLALCHEMY_COMMIT_ON_TEARDOWN = True\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_POOL_SIZE = 10\nSQLALCHEMY_MAX_OVERFLOW = 5\nkeys = [k for k in globals() if k.isupper()]\nconfig_dic = {k:globals()[k] for k in keys}\n# app.config.from_object('settings')\napp.config['JSON_AS_ASCII'] = False\napp.config.update(config_dic)\nCORS(app,supports_credentials=True)\ndb = SQLAlchemy(app)\nclass Traces(db.Model):\n __tablename__ = \"info_vehicletrace_history\"\n vehicletrace_id = db.Column(db.Integer, name='vehicletraceid', primary_key=True, autoincrement=True, nullable=False)\n train_id = db.Column(db.Integer, name='trainid', nullable=True)\n vehicle_id = db.Column(db.Integer, name='vehicleid', nullable=True)\n # longitude = db.Column(db.String(255), name='longitude', nullable=True)\n # latitude = db.Column(db.Integer, name='latitude', nullable=True)\n time = db.Column(db.DateTime, name='time', nullable=True)\n cur_load = db.Column(db.Float, name='cur_load', nullable=True)\n cur_volume = db.Column(db.Float, name='cur_volume', nullable=True)\n dispatchid = db.Column(db.Integer, name='dispatchid', nullable=True)\n lastshop = db.Column(db.Integer, name='last_shop', nullable=True)\n next_shops = db.Column(db.String, name='next_shops', nullable=True)\n next_orders = db.Column(db.String, name='next_orders', nullable=True)\n\n@app.route('/view/trace')\ndef view():\n result = {}\n try:\n base_query = db.session.query(Traces)\n stops = base_query.all()\n result['msg'] = \"success\"\n data = [st.__dict__ for st in stops]\n for item in data:\n item.pop('_sa_instance_state')\n # Session = db.session()\n\n vehicle2state = {}\n for d in data:\n v_id = d['vehicle_id']\n if v_id not in vehicle2state:\n vehicle2state[v_id] = {}\n vehicle2state[v_id]['volume'] = {}\n vehicle2state[v_id]['load'] = {}\n vehicle2state[v_id]['last_stop'] = {}\n cur_time = (d['time'] - datetime.datetime(1970, 1, 1)).seconds\n volume = d['cur_volume']\n load = d['cur_load']\n last_stop = d['lastshop']\n vehicle2state[v_id]['last_stop'][cur_time] =last_stop\n vehicle2state[v_id]['volume'][cur_time] = volume\n vehicle2state[v_id]['load'][cur_time] = load\n data_dict = {}\n for k in vehicle2state:\n data_dict[k] = {}\n volume_dic = vehicle2state[k]['volume']\n load_dic = vehicle2state[k]['load']\n lastshop_dic = vehicle2state[k]['last_stop']\n volumes = [round(volume_dic[t],1) for t in sorted(volume_dic.keys())]\n loads = [round(load_dic[t],1) for t in sorted(load_dic.keys())]\n laststops = [lastshop_dic[t] for t in sorted(lastshop_dic.keys())]\n data_dict[k]['load'] = loads\n data_dict[k]['volume'] = volumes\n data_dict[k]['last_shop'] = laststops\n result['data'] = data_dict\n result['success'] = 0\n db.session.close()\n except Exception as e:\n tb = traceback.format_exc()\n f = open(\"log.log\", mode=\"a\")\n f.write(str(datetime.datetime.now())+'\\n')\n f.write(tb)\n f.close()\n # msg = e.args[0]\n result['msg'] = tb\n result['success'] = 1\n db.session.close()\n return json.dumps(result, ensure_ascii=False)\n\n return json.dumps(result, ensure_ascii=False, default=str)\n\n\nif __name__ == '__main__':\n app.run(debug=True,host='0.0.0.0',port=5303)\n","sub_path":"leitao.py","file_name":"leitao.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"275408404","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('frontpage', '0014_auto_20151124_0142'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='contentgroup',\n options={'ordering': ['-publish_date']},\n ),\n migrations.AlterField(\n model_name='contentgroup',\n name='first_auxiliary_item_object_id',\n field=models.PositiveIntegerField(null=True, verbose_name='First Auxiliary Item', blank=True),\n ),\n migrations.AlterField(\n model_name='contentgroup',\n name='main_item_object_id',\n field=models.PositiveIntegerField(default=1, verbose_name='Main Item'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='contentgroup',\n name='second_auxiliary_item_object_id',\n field=models.PositiveIntegerField(null=True, verbose_name='Second Auxiliary Item', blank=True),\n ),\n migrations.AlterField(\n model_name='contentgroup',\n name='third_auxiliary_item_object_id',\n field=models.PositiveIntegerField(null=True, verbose_name='Third Auxiliary Item', blank=True),\n ),\n ]\n","sub_path":"rhizome/frontpage/migrations/0015_auto_20151124_0347.py","file_name":"0015_auto_20151124_0347.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"408550132","text":"import random, requests, signal, time, threading\n\nsignal.signal(signal.SIGINT, lambda signum, frame: exit())\n\n\ncount = 0\n\ndef monitor():\n global count\n while True:\n time.sleep(1)\n print(f\"{'*' * (count // 8)}\")\n count = 0\n\nthread = threading.Thread(target=monitor)\nthread.daemon = True\nthread.start()\n\n\n# Histogram of expovariate values:\n# value | count\n# ----- | -----\n# 64 | *************************************************************\n# 127 | ********************************\n# 191 | ***************\n# 254 | ******\n# 318 | ***\n# 382 | **\n# 445 | *\n# 509 |\n\nwhile True:\n value = int(random.expovariate(1) * 100)\n response = requests.get(f'http://127.0.0.1:8000/echo/{value}')\n count += 1\n","sub_path":"tests/talk/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176598806","text":"from library import allSequences, binomial\n\nN = 3\nset = [0, 1, 2]\ntime = [0, 1, 2]\nT = 1\n\nif T == N * max(time) or T == min(time) * N:\n numberCalc = 1\nelse:\n aux = [binomial(N, i) for i in range(1, N + 1)]\n if T > N * max(time) / 2:\n numberCalc = sum(aux[:abs(T - max(time))])\n else:\n numberCalc = sum(aux[:T])\n\nnumberSim = 0\nfor seq in allSequences(N, time):\n numberSim += sum(seq) == T\n\nprint(\n f'Using the formulae there are {numberCalc} sequences of length {N} with an time of {T}.'\n)\nprint(\n f'Using the simulation there are {numberSim} sequences of length {N} with an time of {T}.'\n)\n","sub_path":"telephone.py","file_name":"telephone.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"12859845","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 20 09:22:30 2018\r\n\r\n@author: KIIC\r\n\"\"\"\r\n\r\n#Compute the Kolmogorov-Smirnov statistic on 2 samples.\r\n\r\n#This is a two-sided test for the null hypothesis that 2 independent samples \r\n#are drawn from the same continuous distribution.\r\n\r\n\r\nimport numpy as np\r\nfrom scipy import stats\r\nnp.random.seed(12345678) #fix random seed to get the same result\r\nn1 = 200 # size of first sample\r\nn2 = 300 # size of second sample\r\nrvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)\r\nrvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)\r\nresult = stats.ks_2samp(rvs1, rvs2)\r\nprint(result)","sub_path":"Kolmogorov-Smirnoff test.py","file_name":"Kolmogorov-Smirnoff test.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"217231298","text":"# Started 01.11.2020\n# New methods for object vector cell / map calculations\n\nimport sys, os\nimport datajoint as dj\n\nimport numpy as np\nimport warnings # Disable np.nanmean Runtime warning\n\nfrom scipy.ndimage import gaussian_filter\nfrom helpers_topography.utils import corr2\nfrom physt import special_histograms\nfrom astropy.convolution import convolve, Gaussian2DKernel\n\nfrom pointpats import PointPattern\nfrom pointpats.centrography import weighted_mean_center\n\nfrom scipy.stats import circmean, circstd\n\nimport math\nfrom tqdm.auto import tqdm\nimport copy\nimport warnings # Disable np.nanmean Runtime warning\n\n#### LOAD DATABASE #########################################\nfrom .dj_conn import *\nimhotte = dj.schema(horst_imaging_db)\n\nfrom .utils import make_multi_session_object_dict, get_filtered_cells\n\n\n@imhotte\nclass OVParams(dj.Lookup):\n definition = \"\"\"\n # Object centered map parameters\n ov_params_id : char(1) # Parameter set ID, starting with A\n ---\n bin_size_dist_ov : float # Bin size for distance binning in mm\n bins_angular_ov : int # Number of bins in 360 degrees\n sigma_time_ov : float # 2D gaussian smoothing of occupancy\n sigma_signal_ov : float # 2D guassian smoothing of binned signal\n \"\"\"\n contents = [['A', 25., 72, 2., 2.]]\n\n\n@imhotte\nclass OVOccupancy(dj.Computed):\n definition = \"\"\"\n # Object centered occupancy\n -> Tracking.OpenField\n -> SignalTrackingParams\n -> OVParams\n -> ArenaObjectPos\n ---\n occupancy_ov : blob@imgstore # Smoothed 2D occupancy map [seconds], x: angles, y: distance\n mask_occ_ov : blob@imgstore # Mask (where time = 0), x: angles, y: distance\n occupancy_raw_ov : blob@imgstore # Raw, non-smoothed 2D occupancy map, x: angles, y: distance\n explor_ratio_ov : double # Exploration ratio (visited bins over all bins)\n explor_std_ov : double # Exploration standard deviation (of visited bins)\n radial_edges_ov : blob@imgstore # Histogram edges in y (distance)\n angular_edges_ov : blob@imgstore # Histogram edges in x (angles)\n occupancy_time_ov : double # Time in seconds in occupancy\n fraction_occupancy_ov : double # Fraction of time in occupancy map\n\n \"\"\"\n\n @property\n def key_source(self):\n return Tracking.OpenField * SignalTrackingParams * OVParams * ArenaObjectPos & Session.Apparatus\n\n def make(self, key):\n '''\n Get object centered occupancy maps as in\n Høydal, Ø. A. et al., doi:10.1038/s41586-019-1077-7\n\n These are used further in OVMap() to obtain object-centered, time-normalized firing rate maps.\n The basic layout follows the make routine of (2D) Occupancy()\n\n '''\n\n apparatus = (Apparatus.Geometry * Session.Apparatus & key).fetch1()\n params_ov = (OVParams & key).fetch1()\n params_st = (SignalTrackingParams & key).fetch1()\n\n if apparatus['arena_geometry'] == 'square':\n tracking_data = (Tracking * Tracking.OpenField & key).fetch1()\n object_data = (ArenaObjectPos & key).fetch1()\n\n tracking_frametime = 1/tracking_data['sample_rate']\n\n speed_filter = [(tracking_data['speed'] > params_st['speed_cutoff_low'])\n & (tracking_data['speed'] < params_st['speed_cutoff_high'])][0]\n\n # Max possible distance in arena (one corner to the diagonally opposite one)\n max_pos_dist = np.sqrt(apparatus['arena_x_dim'] ** 2 \\\n + apparatus['arena_y_dim'] ** 2)\n\n # Initialize distance and angular bins\n radial_bins = np.arange(0, max_pos_dist, params_ov['bin_size_dist_ov']) # Bin edges [mm]\n phi_bins = params_ov['bins_angular_ov'] # Number (!) of bins in 360 degrees\n\n\n # Calculate object centered angle and distance for every tracking point\n angles, dists = get_dist_angle_obj(\n tracking_data['x_pos'][speed_filter],\n tracking_data['y_pos'][speed_filter],\n object_data['obj_x_coord_calib'],\n object_data['obj_y_coord_calib']\n )\n # Get polar (radial) histogram\n # https://physt.readthedocs.io/en/latest/special_histograms.html#Polar-histogram\n radial_occupancy = special_histograms.polar_histogram(\n dists,\n angles,\n phi_bins = phi_bins,\n radial_bins = radial_bins,\n transformed = True\n )\n # Unpack in \"numpy\" style (returns structure similar to numpy.histogram)\n histogram, [yedges, xedges] = radial_occupancy.numpy_like\n # Convert to seconds\n histogram = histogram.astype(float) * tracking_frametime\n\n time_occupancy = np.sum(histogram)\n fraction_time_occupancy = time_occupancy / (len(tracking_data['x_pos']) * tracking_frametime)\n\n histogram_original = histogram.copy()\n # Smooth occupancy\n # ‘wrap’ (a b c d | a b c d | a b c d)\n # The input is extended by wrapping around to the opposite edge.\n histogram = gaussian_filter(histogram, sigma=params_ov['sigma_time_ov'], mode='wrap')\n masked_histogram = np.ma.masked_where(histogram_original < 0.001, histogram) # arbitrary threshold of 1 ms\n\n # Quick statistics on coverage:\n explor_ratio = len(histogram_original[histogram_original > 0]) / len(histogram_original.ravel())\n explor_std = np.std(histogram_original[histogram_original > 0].ravel())\n\n # Build entry dict\n entry_dict = {\n 'occupancy_ov' : histogram,\n 'mask_occ_ov' : masked_histogram.mask,\n 'occupancy_raw_ov' : masked_histogram.data,\n 'explor_ratio_ov' : explor_ratio,\n 'explor_std_ov' : explor_std,\n 'radial_edges_ov' : yedges,\n 'angular_edges_ov' : xedges,\n 'occupancy_time_ov': time_occupancy,\n 'fraction_occupancy_ov': fraction_time_occupancy\n }\n\n self.insert1({**key, **entry_dict})\n else:\n raise NotImplementedError('Geometry \"{}\" not implemented yet'.format(apparatus['arena_geometry']))\n\n\n@imhotte\nclass OVMap(dj.Computed):\n definition = \"\"\"\n # Object centered ratemap (vector map)\n -> SignalTracking.proj()\n -> OVOccupancy.proj(tracking_dataset='dataset_name')\n ---\n ovmap : blob@imgstore # Object centered ratemap (\"vector map\")\n ovmap_raw : blob@imgstore # Unsmoothed (raw) 2D ratemap\n mask_ovmap : blob@imgstore # Mask (where time = 0)\n binned_raw_ov : blob@imgstore # Raw, binned signal\n bin_max_ov : blob@imgstore # Bin with maximum signal (ovmap(bin_max) = max(ovmap))\n max_ov : double # Maximum\n \"\"\"\n\n @property\n def key_source(self):\n return super().key_source & 's_t_params_id = \"A\"'\n\n def make(self,key):\n occupancy_entry = (OVOccupancy & key).fetch1()\n signaltracking_entry = (SignalTracking & key).fetch1()\n params_ov = (OVParams & key).fetch1()\n\n object_data = (ArenaObjectPos & key).fetch1()\n\n occupancy = np.ma.array(occupancy_entry['occupancy_ov'], mask=occupancy_entry['mask_occ_ov'])\n angular_edges = occupancy_entry['angular_edges_ov'].copy()\n radial_edges = occupancy_entry['radial_edges_ov'].copy()\n\n # Logic:\n # angular_edges = values in x = angles\n # radial_edges = values in y = dists\n\n # Calculate object centered angle and distance for every signal tracking point\n angles, dists = get_dist_angle_obj(\n signaltracking_entry['x_pos_signal'],\n signaltracking_entry['y_pos_signal'],\n object_data['obj_x_coord_calib'],\n object_data['obj_y_coord_calib']\n )\n\n # Supplement 'angles' and 'dists' to retrieved signaltracking_entry\n signaltracking_entry['x_pos_signal'] = angles\n signaltracking_entry['y_pos_signal'] = dists\n\n # Need to rename a parameter for calc_ratemap() to work\n params_ov['sigma_signal'] = params_ov['sigma_signal_ov']\n\n # Get object vector (=rate) map\n ovmap_dict = calc_ratemap(occupancy, angular_edges, radial_edges, signaltracking_entry, params_ov, pad_mode='wrap')\n\n key['ovmap'] = ovmap_dict['ratemap'].data\n key['ovmap_raw'] = ovmap_dict['ratemap_raw'].data\n key['mask_ovmap'] = occupancy.mask # Unchanged!\n key['binned_raw_ov'] = ovmap_dict['binned_raw']\n key['bin_max_ov'] = ovmap_dict['bin_max']\n key['max_ov'] = ovmap_dict['max']\n\n self.insert1(key)\n\n\n\n@imhotte\nclass OVCFields(dj.Computed):\n '''\n Object vector cells\n\n PART A: Field based comparisons\n\n\n\n CAVEAT:\n This currently works only for a limited set of session types / configurations:\n - 1 baseline, 2 object sessions with one object each\n - 1 baseline, 1 object session with two objects\n\n '''\n\n definition = \"\"\"\n # Object vector cell (OVC) field based calculations\n -> Ratemap.proj(base_session='session_name')\n -> ShuffleParams\n ---\n object1_session : varchar(16) # Object session 1\n object2_session : varchar(16) # Object session 2\n\n \"\"\"\n\n ##### FIELD BASED ANALYSIS ###############################################################################################\n\n class Fields(dj.Part):\n definition = \"\"\"\n # OVC calculated field statistics (all fields)\n -> master\n object1_field_id : int # Field ID of field in object session 1\n object2_field_id : int # Field ID of closest field to object1_field_id in object session 2\n ---\n dist_fields : double # Euclidian distance between fields with object1_field_id and object2_field_id - object centered\n dist_to_object = NULL : double # Distance of field from object [average of field in object session 1 and 2]\n angle_to_object = NULL : double # Angle of field to object [average of field in object session 1 and 2]\n object1_field_base = NULL : double # (Object session 1 field mean rate in object session 1) / (Field mean rate in base session)\n object2_field_base = NULL : double # (Object session 2 field mean rate in object session 2) / (Field mean rate in base session)\n \"\"\"\n\n #######################################################################################################################\n\n\n @property\n def key_source(self):\n '''\n Filter for metasessions for which exactly 2 objects were presented either concurrently or in two subsequent sessions.\n Return base sessions for those.\n '''\n object_sessions = Session.SessionType.proj() * ArenaObjectPos & 'sessiontype = \"Open Field Object\"'\n meta_object_sessions = MetaSession.aggr(object_sessions, n=\"count(*)\") & 'n=2' # Filter out everything that is not 2 objects\n base_sessions = Session.SessionType & meta_object_sessions.proj() & 'sessiontype = \"Open Field\"'\n return Ratemap.proj(base_session='session_name') \\\n * ShuffleParams \\\n & base_sessions.proj(base_session='session_name') \\\n & Shuffled.proj(base_session='session_name') \\\n & 'signal_type = \"spikes\"'\\\n & 's_t_params_id = \"A\"'\n\n def make(self, key):\n # Clean up key\n # make() of OVC()\n key_ = key.copy()\n\n # Get rid of some keys\n for key2pop in ['base_session', 'session_order', 'signal_dataset', 'tracking_dataset']:\n _ = key.pop(key2pop)\n\n session_dict = make_multi_session_object_dict(key)\n\n bin_dict = {}\n # Complement session dictionary\n for session, session_entry in session_dict.items():\n # Ratemap and fields\n rm, mask = (Ratemap.proj('ratemap', 'mask_rm') & session_entry & key).fetch1('ratemap', 'mask_rm')\n session_dict[session]['ratemap'] = np.ma.array(rm, mask = mask)\n # Object\n if session != 'base':\n # Take care of firing fields\n session_dict[session]['fields'] = (Ratemap.Fields & session_entry & key).fetch(order_by='field_no ASC', as_dict=True)\n\n # Take care of objects / positions\n obj_x, obj_y = (ArenaObjectPos & session_entry & key).fetch1('obj_x_coord_calib','obj_y_coord_calib')\n x_edges, y_edges = (Occupancy & session_entry & key).fetch1('x_edges','y_edges')\n\n # ... Where is the object in ratemap \"coordinates\" (bins)\n bin_size_rm_x = np.mean(np.diff(x_edges))\n bin_size_rm_y = np.mean(np.diff(y_edges))\n\n # Save bin size for later\n bin_dict[session] = np.mean([bin_size_rm_x, bin_size_rm_y])\n\n obj_x_rm = ((obj_x - x_edges[0]) / bin_size_rm_x) - .5\n obj_y_rm = ((obj_y - y_edges[0]) / bin_size_rm_y) - .5\n\n session_dict[session]['object_x'] = obj_x\n session_dict[session]['object_y'] = obj_y\n session_dict[session]['object_x_rm'] = obj_x_rm\n session_dict[session]['object_y_rm'] = obj_y_rm\n\n\n master_dict = {\n 'base_session' : session_dict['base']['session_name'],\n 'object1_session' : session_dict['object1']['session_name'],\n 'object2_session' : session_dict['object2']['session_name'],\n }\n self.insert1({**key_,**master_dict})\n\n # CAVE!\n # As of November 2020 the field coordinate x/y values are \"inverted\"\n # both for field_peak and field_centroid\n\n dist_dict_list = []\n for field1 in session_dict['object1']['fields']:\n field1_no = field1['field_no']\n # Get angle and distance:\n y_diff = field1['field_centroid_x']-session_dict['object1']['object_y_rm']\n x_diff = field1['field_centroid_y']-session_dict['object1']['object_x_rm']\n\n object1_dist = np.sqrt(np.square(x_diff) + np.square(y_diff))\n object1_dist *= bin_dict['object1']\n object1_angle = np.arctan2(y_diff, x_diff)\n object1_angle = (object1_angle + 2 * np.pi) % (2 * np.pi) # Make sure it's [0,2*pi]\n\n field1_y, field1_x = y_diff, x_diff\n\n # Now for the second object session (or the second object)\n field2_no = []\n dists_1_2 = [] # Distances in between fields in object session 2 and current field in object session 1\n\n object2_dists = [] # Distance of field to object in object session2\n object2_angles = [] # Angles to object\n for field2 in session_dict['object2']['fields']:\n field2_no.append(field2['field_no'])\n # Get angle and distance:\n y_diff = field2['field_centroid_x']-session_dict['object2']['object_y_rm']\n x_diff = field2['field_centroid_y']-session_dict['object2']['object_x_rm']\n\n object2_dist = np.sqrt(np.square(x_diff) + np.square(y_diff))\n object2_dist *= bin_dict['object2']\n object2_angle = np.arctan2(y_diff, x_diff)\n object2_angle = (object2_angle + 2 * np.pi) % (2 * np.pi) # Make sure it's [0,2*pi]\n # Save\n object2_dists.append(object2_dist)\n object2_angles.append(object2_angle)\n\n field2_y, field2_x = y_diff, x_diff\n\n # Get distance between fields (field1 and current field2)\n dist_1_2 = np.sqrt(np.square(field2_x - field1_x) + np.square(field2_y - field1_y))\n dist_1_2 *= np.mean([bin_dict['object1'], bin_dict['object2']]) # Convert to [mm]\n dists_1_2.append(dist_1_2)\n\n\n # Field number of matching field in object session 2\n if len(dists_1_2):\n field_idx_min = np.argmin(dists_1_2)\n dist_dict = {\n 'field_no_object1' : field1_no,\n 'field_no_object2' : field2_no[field_idx_min],\n 'dist_fields' : np.min(dists_1_2),\n 'dist_to_object' : np.mean([object1_dist, object2_dists[field_idx_min]]),\n 'angle_to_object' : circmean([object1_angle, object2_angles[field_idx_min]])\n }\n\n dist_dict_list.append(dist_dict)\n else:\n # Just skip this field lookup\n continue\n\n if not len(dist_dict_list):\n # Just stop. A master entry is still being written so results are not re-calculated when missing\n return\n\n ################# FIELD RATE COMPARISONS #########################################################################################################################\n\n # Loop over fields and calculate rate change to baseline session\n # While doing that, insert into part table .Fields()\n\n # Transform ratemaps from masked to arrays with nans\n rm_base = session_dict['base']['ratemap'].filled(fill_value=np.nan)\n rm_object_1 = session_dict['object1']['ratemap'].filled(fill_value=np.nan)\n rm_object_2 = session_dict['object2']['ratemap'].filled(fill_value=np.nan)\n\n for dist_dict in dist_dict_list:\n points_field_object1 = (Ratemap.Fields & session_dict['object1'] & key\n & 'field_no = {}'.format(dist_dict['field_no_object1'])).fetch1('field_coords')\n points_field_object2 = (Ratemap.Fields & session_dict['object2'] & key\n & 'field_no = {}'.format(dist_dict['field_no_object2'])).fetch1('field_coords')\n\n with warnings.catch_warnings():\n warnings.filterwarnings(action='ignore', message='Mean of empty slice')\n rate_field_object1_base = np.nanmean([rm_base[point[0], point[1]] for point in points_field_object1])\n rate_field_object2_base = np.nanmean([rm_base[point[0], point[1]] for point in points_field_object2])\n rate_field_object1 = np.nanmean([rm_object_1[point[0], point[1]] for point in points_field_object1])\n rate_field_object2 = np.nanmean([rm_object_2[point[0], point[1]] for point in points_field_object2])\n\n rel_base_object1 = rate_field_object1 / (rate_field_object1_base + np.finfo(float).eps) # Prevent divisions by zero\n rel_base_object2 = rate_field_object2 / (rate_field_object2_base + np.finfo(float).eps) # Prevent divisions by zero\n\n field_dict = {\n 'object1_field_id' : dist_dict['field_no_object1'],\n 'object2_field_id' : dist_dict['field_no_object2'],\n 'dist_fields' : dist_dict['dist_fields'],\n 'dist_to_object' : dist_dict['dist_to_object'],\n 'angle_to_object' : dist_dict['angle_to_object'],\n 'object1_field_base' : rel_base_object1,\n 'object2_field_base' : rel_base_object2\n }\n\n self.Fields.insert1({**key_, **field_dict}, skip_duplicates=True)\n\n\n\n@imhotte\nclass OVCScores(dj.Computed):\n '''\n Object vector cells\n\n PART B: Object vector map based comparisons\n\n\n\n CAVEAT:\n This currently works only for a limited set of session types / configurations:\n - 1 baseline, 2 object sessions with one object each\n - 1 baseline, 1 object session with two objects\n\n '''\n\n definition = \"\"\"\n # Object vector cell (OVC) vector map score based calculations\n -> SignalTracking.proj(base_session='session_name')\n -> OVParams\n -> ShuffleParams\n ---\n object1_session : varchar(16) # Object session 1\n object2_session : varchar(16) # Object session 2\n ovscore : double # Object vector score (2D correlation between OV maps)\n \"\"\"\n\n ##### SHUFFLED OBJECT VECTOR SCORE ###############################################################################################\n\n class ShuffledOVScore(dj.Part):\n definition = \"\"\"\n # Shuffled Object vector (OV) score and shuffling\n -> master\n ---\n shuffled_ovscores_95perc : double # Object vector score shuffling for cell: 95th percentile\n shuffled_ovscores_99perc : double # Object vector score shuffling for cell: 99th percentile\n shuffled_ovscores : blob@imgstore # Object vector score shuffling for cell\n \"\"\"\n\n #######################################################################################################################\n\n @property\n def key_source(self):\n '''\n Filter for metasessions for which exactly 2 objects were presented either concurrently or in two subsequent sessions.\n Return base sessions for those.\n '''\n object_sessions = Session.SessionType.proj() * ArenaObjectPos & 'sessiontype = \"Open Field Object\"'\n meta_object_sessions = MetaSession.aggr(object_sessions, n=\"count(*)\") & 'n=2' # Filter out everything that is not 2 objects\n base_sessions = Session.SessionType & meta_object_sessions.proj() & 'sessiontype = \"Open Field\"'\n return SignalTracking.proj(base_session='session_name') \\\n * OVParams \\\n * ShuffleParams \\\n & base_sessions.proj(base_session='session_name') \\\n & Shuffled.proj(base_session='session_name') \\\n & 'signal_type = \"spikes\"'\\\n & 's_t_params_id = \"A\"'\n\n\n def make(self, key):\n # Clean up key\n # make() of OVC()\n key_ = key.copy()\n\n # Get rid of some keys\n for key2pop in ['base_session', 'session_order', 'signal_dataset', 'tracking_dataset']:\n _ = key.pop(key2pop)\n\n session_dict = make_multi_session_object_dict(key)\n\n # Complement session dictionary\n for session, session_entry in session_dict.items():\n # Object\n if session != 'base':\n # OVMap\n ovmap, mask = (OVMap.proj('ovmap', 'mask_ovmap') & session_entry & key).fetch1('ovmap', 'mask_ovmap')\n session_dict[session]['ovmap'] = np.ma.array(ovmap, mask = mask)\n\n\n # Get score\n ovscore = corr2(session_dict['object1']['ovmap'],session_dict['object2']['ovmap'])\n\n master_dict = {\n 'base_session' : session_dict['base']['session_name'],\n 'object1_session' : session_dict['object1']['session_name'],\n 'object2_session' : session_dict['object2']['session_name'],\n 'ovscore' : ovscore\n }\n\n self.insert1({**key_,**master_dict})\n\n ##### SHUFFLING\n object1_session_shuffled_ovmaps = get_shuffled_ovmaps(session_dict['object1'], copy.deepcopy(key))\n if session_dict['object1']['session_name'] == session_dict['object2']['session_name']:\n object1_session_shuffled_ovmaps, object2_session_shuffled_ovmaps = split_list(object1_session_shuffled_ovmaps)\n else:\n object2_session_shuffled_ovmaps = get_shuffled_ovmaps(session_dict['object2'], copy.deepcopy(key))\n\n # Get shuffled scores\n shuffled_ovscores = []\n for ovmap1 in object1_session_shuffled_ovmaps:\n for ovmap2 in object2_session_shuffled_ovmaps:\n shuffled_ovscores.append(corr2(ovmap1, ovmap2))\n\n shuffled_scores_dict = {\n 'shuffled_ovscores_95perc' : np.percentile(shuffled_ovscores, 95),\n 'shuffled_ovscores_99perc' : np.percentile(shuffled_ovscores, 99),\n 'shuffled_ovscores' : shuffled_ovscores,\n }\n\n self.ShuffledOVScore.insert1({**key_,**shuffled_scores_dict})\n\n\ndef get_shuffled_ovmaps(session_key, shuffling_key):\n '''\n This re-implements many of the functions\n already established for the main Shuffled() table.\n\n Returns\n -------\n shuffled_ovmaps : list : all shuffled OV maps\n\n '''\n #### OVMAP related\n occupancy_entry = (OVOccupancy & session_key).fetch1()\n params_ov = (OVParams & session_key).fetch1()\n\n object_data = (ArenaObjectPos & session_key).fetch1()\n occupancy = np.ma.array(occupancy_entry['occupancy_ov'], mask=occupancy_entry['mask_occ_ov'])\n\n angular_edges = occupancy_entry['angular_edges_ov'].copy()\n radial_edges = occupancy_entry['radial_edges_ov'].copy()\n\n #### Spikes / Tracking / Sync\n st_params = {}\n st_params['speed_cutoff_low'], st_params['speed_cutoff_high'], time_offset = (SignalTrackingParams & session_key & shuffling_key).fetch1(\n 'speed_cutoff_low', 'speed_cutoff_high', 'time_offset')\n\n spikes = (FilteredSpikes.proj(signal_dataset='dataset_name', spikes='filtered_spikes')\n & session_key & shuffling_key).fetch1('spikes')\n tracking = (Tracking.OpenField * Tracking.proj(tracking_dataset='dataset_name')\n & session_key & shuffling_key).fetch1()\n\n center_y, center_plane, proj_mean_img = ((Projection.proj('mean_image') * Cell.Rois).proj(\n ..., signal_dataset='dataset_name') & session_key & shuffling_key).fetch1('center_y', 'center_plane', 'mean_image')\n\n num_planes, frame_rate_si, seconds_per_line, width_SI, height_SI = (Tif.SI & (Session & session_key & shuffling_key)).fetch1(\n 'num_scanning_depths', 'framerate', 'seconds_per_line', 'width_scanimage', 'height_scanimage')\n\n\n # Special case where the SI image shape is not the same with the projection image shape\n # - suggesting the image has been cropped prior to suite2p analysis\n # - thus, the \"center_y\" is no longer accurate -> using the middle line for \"center_y\"\n if proj_mean_img.shape != (width_SI, height_SI):\n center_y = int(height_SI/2)\n\n sync_data_frames, sample_rate_sync, shuffling_key['sync_dataset_frames_imaging'], shuffling_key['sync_name_frames_imaging'] = \\\n (MetaSession.Setup * \\\n Setup.Sync * Sync \\\n & 'generic_name = \"frames_imaging\"' \\\n & session_key & shuffling_key).fetch1(\n 'sync_data', 'sample_rate', 'dataset_name', 'sync_name')\n\n tracking_type = (Dataset * Tracking & session_key).fetch1('datasettype')\n if tracking_type == 'DLC_tracking':\n tracking_generic = 'TrackingDLC'\n elif 'Tracking2D_2LED' in tracking_type:\n tracking_generic = 'Tracking2LED'\n else:\n raise NotImplementedError(f'Tracking dataset type {tracking_type} not implemented')\n\n sync_data_track = (MetaSession.Setup * Setup.Sync\n * Sync & f'generic_name = \"{tracking_generic}\"' & session_key & shuffling_key).fetch1('sync_data')\n\n # Sanity checks\n # 1. Compare length of tracking data and tracking sync data\n # 2. Compare length of spike data and frame sync data\n # 3. Compare last timestamp frame sync data and tracking sync data\n\n if len(tracking['x_pos']) != len(sync_data_track):\n raise IndexError('Mismatch between length of sync data and tracking data')\n if len(spikes) != len(sync_data_frames):\n raise IndexError('Mismatch between length of sync data and spiking data')\n if np.abs(sync_data_track[-1] - sync_data_frames[-1]) > np.mean(np.diff(sync_data_frames)):\n raise IndexError('There is more than one frame difference between the end of sync streams')\n\n # -> Compensate for the mismatch between timestamp at the beginning of each frame and the time it takes the laser to reach the cell body\n seconds_per_plane = 1 / (frame_rate_si * num_planes) # from Tif.SI()\n # Why is this correct? Because frame_rate_si returns the \"volume\" rate.\n\n # Careful! \"samples\" are floating point (real valued) timestamps for pre-synced setups since\n # sample rate = 1. for those sync data\n seconds_to_cell = center_plane * seconds_per_plane + center_y * seconds_per_line\n samples_to_cell = seconds_to_cell * sample_rate_sync\n samples_offset = samples_to_cell + (time_offset * sample_rate_sync) # from 'MapParams'\n\n # Only spikes are shuffled at the moment\n signal_data = spikes\n signal_idxs = np.argwhere(spikes > 0).squeeze()\n\n # Retrieve shuffling offsets from Shuffled() table, limit by 1 (since Shuffled() depends on many more parameter sets)\n shuffling_offsets = (Shuffled & session_key & shuffling_key).fetch('shuffling_offsets', limit=1)[0]\n\n shuffled_ovmaps = []\n for shift in tqdm(shuffling_offsets):\n rolled_sync_data_frames = np.roll(sync_data_frames, shift)\n\n # Look up signal tracking\n signal_dict = calc_signal_dict(rolled_sync_data_frames, sync_data_track,\n tracking, signal_data, signal_idxs, samples_offset, st_params)\n\n # Calculate object centered angle and distance for every signal tracking point\n angles, dists = get_dist_angle_obj(\n signal_dict['x_pos_signal'],\n signal_dict['y_pos_signal'],\n object_data['obj_x_coord_calib'],\n object_data['obj_y_coord_calib']\n )\n\n # Supplement 'angles' and 'dists' to retrieved signaltracking_entry\n signal_dict['x_pos_signal'] = angles\n signal_dict['y_pos_signal'] = dists\n\n # Need to rename a parameter for calc_ratemap() to work\n params_ov['sigma_signal'] = params_ov['sigma_signal_ov']\n ovmap_dict = calc_ratemap(occupancy, angular_edges, radial_edges, signal_dict, params_ov, pad_mode='wrap')\n\n shuffled_ovmaps.append(ovmap_dict['ratemap'])\n\n return shuffled_ovmaps\n\n\n##### OVC SUMMARY TABLES #########################################################################################################\n\n@imhotte\nclass OVCutoffs(dj.Lookup):\n definition = \"\"\"\n # Object vector cell cutoffs\n ov_cutoff_id : char(1) # Parameter set ID, starting with A\n ---\n info_content_cutoff : varchar(100) # Information content cutoff (>). Session level: session_95, session_99\n ovscore_cutoff : varchar(100) # Object vector score cutoff (>). Session level: session_95, session_99\n dist_fields_cutoff : float # Distance [mm] (object centered field distances) (<)\n dist_to_object_cutoff : float # Distance [mm] to object (>)\n object1_field_base_cutoff : float # Relative rate of field in object session 1 compared to base session (>)\n object2_field_base_cutoff : float # Relative rate of field in object session 1 compared to base session (>)\n \"\"\"\n contents = [\n {\n 'ov_cutoff_id' : 'A',\n 'info_content_cutoff' : 'information_content_95',\n 'ovscore_cutoff' : 'shuffled_ovscores_95perc',\n 'dist_fields_cutoff' : 250.,\n 'dist_to_object_cutoff': 40.,\n 'object1_field_base_cutoff': 1.5,\n 'object2_field_base_cutoff': 1.5,\n }\n ]\n\n@imhotte\nclass OVC(dj.Computed):\n '''\n Object vector cells\n\n Final table: Summary\n\n '''\n\n definition = \"\"\"\n # Object vector cell (OVC) summary table\n -> OVCScores\n -> OVCutoffs\n -> OVCFields\n ---\n object1_session : varchar(16) # Object session 1\n object2_session : varchar(16) # Object session 2\n ovscore : double # Object vector score (2D correlation between OV maps)\n is_ovc : tinyint # 0 - not an OVC according to cutoffs, 1 - putative OVC\n no_fields : int # Number of filtered fields (matching cutoff criteria)\n mean_dist_to_object = NULL : double # Average distance of (filtered) fields to object [mm]\n mean_dist_fields = NULL : double # Average distance between fields [mm]\n mean_angle_to_object = NULL : double # Circular mean of field angles to object [0, 2*pi]\n std_angle_to_object = NULL : double # Circular standard deviation for field angles to object [radians]\n field_ids = NULL : blob@imgstore # Field IDs list of dictionaries ('object1_field_id', 'object2_field_id')\n angles_to_object = NULL : blob@imgstore # Field angles [0, 2*pi]\n dists_to_object = NULL : blob@imgstore # Distances of (filtered) fields to object [mm]\n dists_fields = NULL : blob@imgstore # Distances of (filtered) fields to object [mm]\n \"\"\"\n\n @property\n def key_source(self):\n # Need to constrain by CutoffsOVScore since the computation for this is implemented separately\n return OVCScores.proj() * OVCutoffs.proj() * OVCFields.proj() & CutoffsOVScore\n\n def make(self, key):\n\n ovc_cutoffs = (OVCutoffs & key).fetch1()\n ovscore = (OVCScores & key).fetch1('ovscore')\n\n cell_key = {}\n for k,v in key.items():\n if k not in ['session_order','base_session','tracking_dataset','signal_dataset']:\n cell_key[k] = v\n object1_session, object2_session = (OVCScores & key).fetch1('object1_session','object2_session')\n\n ####### INFO CONTENT FILTER ############################################################################################\n info_labels = []\n for obj_session in [object1_session, object2_session]:\n # Check whether we are dealing with\n # - Session level cutoff\n # - Single number cutoff\n\n if ovc_cutoffs[\"info_content_cutoff\"] == 'session_95':\n info_content_cutoff = _get_info_content_session_95(cell_key, obj_session)\n elif ovc_cutoffs[\"info_content_cutoff\"] == 'session_99':\n info_content_cutoff = _get_info_content_session_99(cell_key, obj_session)\n else:\n info_content_cutoff = ovc_cutoffs[\"info_content_cutoff\"]\n\n label_ = len(Ratemap.Stats * Shuffled.RatemapStats\\\n & f'information_content > {info_content_cutoff}'\\\n & f'session_name = \"{obj_session}\"' \\\n & cell_key)\n\n info_labels.append(label_)\n info_content_filter = int((np.array(info_labels) > 0).all())\n\n\n ####### OVSCORE FILTER #################################################################################################\n # Score filter\n # Check whether we are dealing with\n # - Session level cutoff\n # - Single number cutoff\n\n if ovc_cutoffs[\"ovscore_cutoff\"] == 'session_95':\n ovscore_cutoff = (CutoffsOVScore & key).fetch('ovscore_95',limit=1)[0] # Because there can be multiple session params\n elif ovc_cutoffs[\"ovscore_cutoff\"] == 'session_99':\n ovscore_cutoff = (CutoffsOVScore & key).fetch('ovscore_99',limit=1)[0]\n else:\n ovscore_cutoff = ovc_cutoffs[\"ovscore_cutoff\"]\n\n score_filter = len(OVCScores * OVCScores.ShuffledOVScore.proj('shuffled_ovscores_95perc','shuffled_ovscores_99perc') \\\n & f'ovscore > {ovscore_cutoff}' \\\n & key)\n\n # Field filter\n field_filter = OVCFields * OVCFields.Fields \\\n & f'dist_fields < {ovc_cutoffs[\"dist_fields_cutoff\"]}' \\\n & f'dist_to_object > {ovc_cutoffs[\"dist_to_object_cutoff\"]}' \\\n & f'object1_field_base > {ovc_cutoffs[\"object1_field_base_cutoff\"]}' \\\n & f'object2_field_base > {ovc_cutoffs[\"object2_field_base_cutoff\"]}'\\\n & key\n no_fields = len(field_filter)\n\n # Collect field properties\n if no_fields:\n field_ids = []\n dists_to_object = []\n angles_to_object = []\n dists_fields = []\n for field in field_filter:\n # Transcribe field ID dict\n field_id_dict = {}\n field_id_dict['object1_field_id'] = field['object1_field_id']\n field_id_dict['object2_field_id'] = field['object2_field_id']\n field_ids.append(field_id_dict)\n\n dists_to_object.append(field['dist_to_object'])\n angles_to_object.append(field['angle_to_object'])\n dists_fields.append(field['dist_fields'])\n\n mean_dist_to_object = np.nanmean(dists_to_object)\n mean_dist_fields = np.nanmean(dists_fields)\n mean_angle_to_object = circmean(angles_to_object, nan_policy='raise')\n std_angle_to_object = circstd(angles_to_object, nan_policy='raise')\n\n else:\n field_ids = None\n dists_to_object = None\n angles_to_object = None\n dists_fields = None\n mean_dist_to_object = None\n mean_dist_fields = None\n mean_angle_to_object = None\n std_angle_to_object = None\n\n # Make a decision - OVC yes (1) or no (0) ?\n is_ovc = (np.array([info_content_filter, score_filter, int(no_fields>0)]) == 1).all()\n\n # Create dictionary\n entry_dict = {\n 'object1_session' : object1_session,\n 'object2_session' : object2_session,\n 'ovscore' : ovscore,\n 'is_ovc' : int(is_ovc),\n 'no_fields' : no_fields,\n 'mean_dist_to_object' : mean_dist_to_object,\n 'mean_dist_fields' : mean_dist_fields,\n 'mean_angle_to_object' : mean_angle_to_object,\n 'std_angle_to_object' : std_angle_to_object,\n 'field_ids' : field_ids,\n 'angles_to_object' : angles_to_object,\n 'dists_to_object' : dists_to_object,\n 'dists_fields' : dists_fields\n }\n self.insert1({**key,**entry_dict})\n\n\n##############################################################################################################################\n############# OVC SUMMARY TABLES HELPERS\n\ndef __get_session_key(cell_key, session_name=None):\n ''' Strip cell ID and add session name '''\n session_key = {}\n for k,v in cell_key.items():\n if k not in ['cell_id']:\n session_key[k] = v\n if session_name is not None:\n session_key['session_name'] = session_name\n return session_key\n\n#### SPATIAL INFORMATION CONTENT #############################################################################################\n\ndef _get_info_content_session_95(cell_key, session_name):\n '''\n Retrieving from Shuffled.RatemapStats\n with STANDARD cell parameters as written in \"CONSTANTS\"\n\n '''\n info_content_key = __get_session_key(cell_key, session_name)\n filtered_cells = get_filtered_cells(Session & info_content_key, verbose=False)\n information_content_shuffles = (Shuffled.RatemapStats & info_content_key & filtered_cells).fetch('information_content_shuffles')\n percentile95 = np.nanpercentile(np.concatenate(information_content_shuffles), 95)\n return percentile95\n\ndef _get_info_content_session_99(cell_key, session_name):\n '''\n Retrieving from Shuffled.RatemapStats\n with STANDARD cell parameters as written in \"CONSTANTS\"\n\n '''\n info_content_key = __get_session_key(cell_key, session_name)\n filtered_cells = get_filtered_cells(Session & info_content_key, verbose=False)\n information_content_shuffles = (Shuffled.RatemapStats & info_content_key & filtered_cells).fetch('information_content_shuffles')\n percentile99 = np.nanpercentile(np.concatenate(information_content_shuffles), 99)\n return percentile99\n\n#### OVSCORE ################################################################################################################\n\n# Deprecated since OVSCore session filtering has been implemented separately (since cutoff retrieval is expensive)\n# def _get_ovscore_session_95(cell_key):\n# ovscore_key = __get_session_key(cell_key)\n# filtered_cells = get_filtered_cells(Session & ovscore_key, verbose=False)\n# # \"base_name\" vs. \"session_name\" in filtered_cells does not matter (meta session level!)\n# ovscore_shuffles = (OVCScores.ShuffledOVScore & ovscore_key & filtered_cells).fetch('shuffled_ovscores')\n# percentile95 = np.nanpercentile(np.concatenate(ovscore_shuffles), 95)\n# return percentile95\n\n# def _get_ovscore_session_99(cell_key):\n# ovscore_key = __get_session_key(cell_key)\n# filtered_cells = get_filtered_cells(Session & ovscore_key, verbose=False)\n# # \"base_name\" vs. \"session_name\" in filtered_cells does not matter (meta session level!)\n# ovscore_shuffles = (OVCScores.ShuffledOVScore & ovscore_key & filtered_cells).fetch('shuffled_ovscores')\n# percentile99 = np.nanpercentile(np.concatenate(ovscore_shuffles), 99)\n# return percentile99\n\n\n\n##############################################################################################################################\n##############################################################################################################################\n\n############## HELPERS\n\ndef get_dist_angle_obj(tracking_x, tracking_y, obj_x, obj_y):\n '''\n Calculate object centered distance and angle for\n each tracking point.\n\n '''\n angles = []\n dists = []\n\n for x,y in zip(tracking_x, tracking_y):\n angle = np.arctan2(y-obj_y, x-obj_x)\n angle = (angle + 2 * np.pi) % (2 * np.pi) # Make sure range is [0, 2*pi]\n angles.append(angle)\n\n dist = np.sqrt(np.square(x-obj_x) + np.square(y-obj_y))\n dists.append(dist)\n\n angles = np.array(angles)\n dists = np.array(dists)\n\n return angles, dists\n\ndef find_nearest(array, value):\n ''' Find nearest element to \"value\" in \"array\" and return index of that element '''\n idx = np.searchsorted(array, value, side='left')\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return idx-1\n else:\n return idx\n\n\ndef calc_signal_dict(signal_sync, tracking_sync, tracking_data, signal_data, signal_idxs, samples_offset, params):\n '''\n # DUPLICATE FROM MAIN SCHEMA UNDER\n # SPATIAL_SCORES.PY!\n\n Look up tracking signal for every valid calcium event/spike based on sync data.\n\n Parameters\n ----------\n signal_sync : np.array\n Sync pulse number for every event in 'signal_data'\n tracking_sync : np.array\n Sync pulse number for every event in 'tracking_data'\n tracking_data : dict\n Fetched Tracking.OpenField() entry with:\n - x_pos\n - y_pos\n - head_angle\n - speed\n signal_data : np.array\n Calcium or spikes signal of length 'signal_sync'\n signal_idxs : np.array\n Indices of signal to keep (filter)\n samples_offset : int\n Number of samples to add to 'signal_sync' samples before\n lookup of tracking data. This shifts signal in time compared\n to tracking data and is used (1) to compensate for laser fly time\n in FOV and (2) for shuffling purposes\n params: dict\n - speed_cutoff_low: lower speed cutoff in unit 'speed' has in 'tracking_data'\n - speed_cutoff_low: upper speed cutoff in unit 'speed' has in 'tracking_data'\n\n Returns\n -------\n signal_dict : dict\n x_pos_signal\n y_pos_signal\n head_angle_signal\n speed_signal\n signal (amplitudes)\n '''\n\n tracking_keys = {'x_pos', 'y_pos', 'head_angle', 'speed'}\n signal_dict = {}\n\n tracking_indices_filtered = [] # Speed filtered\n signal_indices_filtered = [] # Spikes / Fluorescence\n\n signal_sync = signal_sync.astype(float)\n signal_sync += samples_offset # Shift in time\n signal_sync = signal_sync[signal_idxs]\n\n for idx, sync_pulse in zip(signal_idxs, signal_sync): # signal_idxs either filtered or whole series (spikes vs. delta f/f)\n tracking_idx = find_nearest(tracking_sync, sync_pulse)\n if (tracking_data['speed'][tracking_idx] > params['speed_cutoff_low']) and (tracking_data['speed'][tracking_idx] < params['speed_cutoff_high']):\n tracking_indices_filtered.append(tracking_idx)\n signal_indices_filtered.append(idx)\n\n # Build signal tracking dictionary\n for pos_key in tracking_keys:\n signal_dict[pos_key + '_signal'] = tracking_data[pos_key][tracking_indices_filtered]\n\n signal_dict['signal'] = signal_data[signal_indices_filtered].squeeze()\n if not signal_dict[\"signal\"].ndim == 1:\n # Somewhat convoluted logic: I'm not sure what circumstances call for the `.squeeze()`,\n # but where there is only a single value, that compresses it to a 0d array\n # Datajoint later converts that to a non-array floating point number during the\n # conversion to/from a blob. Therefore, enforce that the array still has at least 1 dimension\n signal_dict[\"signal\"] = np.expand_dims(signal_dict[\"signal\"], 0)\n return signal_dict\n\n\n\n\n# DUPLICATE FROM MAIN SCHEMA UNDER\n# SPATIAL_SCORES.PY!\ndef calc_ratemap(occupancy, x_edges, y_edges, signaltracking, params, pad_mode='symmetric'):\n '''\n\n # DUPLICATE FROM MAIN SCHEMA UNDER\n # SPATIAL_SCORES.PY!\n\n\n Calculate ratemap\n Parameters\n ----------\n occupancy : masked np.array\n Smoothed occupancy. Masked where occupancy low\n x_edges : np.array\n Bin edges in x\n y_edges : np.array\n Bin edges in y\n signaltracking : dict\n SignalTracking table entry\n params : dict\n MapParams table entry\n pad_mode : str\n Padding mode:\n - 'symmetric' (normal 2D ratemaps) or\n - 'wrap' (object vector maps)\n\n Returns\n -------\n ratemap_dict : dict\n - binned_raw : np.array: Binned raw (unsmoothed) signal\n - ratemap_raw: np masked array: Unsmoothed ratemap (mask where occupancy low)\n - ratemap : np masked array: Smoothed ratemap (mask where occupancy low)\n - bin_max : tuple : (x,y) coordinate of bin with maximum signal\n - max : float : Max of signal\n\n '''\n ratemap_dict = {}\n sigma_signal = params['sigma_signal']\n\n binned_signal = np.zeros_like(occupancy.data)\n # Add one at end to not miss signal at borders\n x_edges[-1] += 1\n y_edges[-1] += 1\n\n # Look up signal per bin\n for no_x in range(len(x_edges)-1):\n for no_y in range(len(y_edges)-1):\n boolean_x = (signaltracking['x_pos_signal'] >= x_edges[no_x]) & (signaltracking['x_pos_signal'] < x_edges[no_x+1])\n boolean_y = (signaltracking['y_pos_signal'] >= y_edges[no_y]) & (signaltracking['y_pos_signal'] < y_edges[no_y+1])\n extracted_signal = signaltracking['signal'][boolean_x & boolean_y]\n binned_signal[no_y, no_x] = np.nansum(extracted_signal)\n\n ratemap_dict['binned_raw'] = binned_signal\n binned_signal = np.ma.masked_where(occupancy.mask, binned_signal) # Masking. This step is probably unnecessary\n # since occupancy is already masked\n ratemap_dict['ratemap_raw'] = binned_signal / occupancy\n\n\n # Use astropy.convolve to smooth padded version of the spikemap\n\n binned_signal = np.ma.filled(binned_signal, np.nan) # First convert masked values to nan\n kernel = Gaussian2DKernel(x_stddev=sigma_signal) # Create astropy gaussian kernel\n\n if pad_mode == 'wrap':\n # For radial maps like object vector maps\n # Logic: Wrap around / pad the \"angular\" edges and leave the \"radial\" edges unchanged\n pad_width = ((0,\n 0),\n (int(5*sigma_signal),\n int(5*sigma_signal))) # Only pad one axis (axis=0: radius, axis=1: angles)\n\n binned_signal_padded = np.pad(binned_signal, pad_width=pad_width, mode='wrap') # 'wrap' for radial maps\n\n # There are some caveats here. For example for \"extended\" regions of nans that are\n # hard / impossible to interpolate. This will lead to edge effects but isn't usually\n # of concern.\n # However it does become problematic for OV (object-vector) map calculations.\n # Therefore, if pad_mode =='wrap', i.e. when radial 2D maps are fed in, convert nans to zeros and proceed.\n with warnings.catch_warnings():\n # Astropy throws a warning when nans are detected after smoothing (usually edge artefacts)\n warnings.filterwarnings(action='ignore', message=r'.*?') # Catch \"any\" warning\n binned_signal_smoothed = convolve(\n np.nan_to_num(binned_signal_padded),\n kernel,\n boundary='extend'\n )\n\n\n elif pad_mode == 'symmetric':\n # i.e. normal 2D ratemap. \"Symmetric\" is taken over from BNT function.\n pad_width = ((int(5*sigma_signal),\n int(5*sigma_signal)),\n (int(5*sigma_signal),\n int(5*sigma_signal))) # Symmetric in all directions\n\n binned_signal_padded = np.pad(binned_signal, pad_width=pad_width, mode='symmetric')\n\n # Everything else but wrap (i.e. usually 'symmetric') for normal 2D ratemaps\n with warnings.catch_warnings():\n # Astropy throws a warning when nans are detected after smoothing (usually edge artefacts)\n warnings.filterwarnings(action='ignore', message=r'.*?') # Catch \"any\" warning\n binned_signal_smoothed = convolve(\n binned_signal_padded,\n kernel,\n boundary='extend'\n )\n else:\n raise NotImplementedError(f'Pad mode \"{pad_mode}\" not implemented')\n\n # Crop out non-padded part\n binned_signal_smoothed = binned_signal_smoothed[pad_width[0][0]:[-pad_width[0][1] if pad_width[0][1] >0 else None][0],\n pad_width[1][0]:[-pad_width[1][1] if pad_width[1][1] >0 else None][0]]\n binned_signal_smoothed = np.ma.masked_where(occupancy.mask, binned_signal_smoothed) # Masking. This step is probably unnecessary\n # since occupancy is already masked\n masked_ratemap = binned_signal_smoothed / occupancy\n\n ratemap_dict['ratemap'] = masked_ratemap\n ratemap_dict['bin_max'] = np.unravel_index(masked_ratemap.argmax(), masked_ratemap.shape)\n ratemap_dict['max'] = np.max(masked_ratemap)\n\n return ratemap_dict\n\n# Part of helpers:\n\n### THIS COMES FROM HELPERS!\ndef split_list(a_list):\n half = len(a_list)//2\n return a_list[:half], a_list[half:]\n","sub_path":"dj_schemas/ovc.py","file_name":"ovc.py","file_ext":"py","file_size_in_byte":52602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"98659964","text":"# python3.6\n# Created: 08-26-2018\n# Last Updated: 09-03-2018\n\n'''\nSee README.md in folder for description\n'''\n\nimport random, sys, time, turtle\n\n''' ----------------------- START DEFINE FUNCTIONS ------------------------- '''\n\ndef initDraw():\n \"\"\" Initializes Turtle stuff.\n\n Returns\n -------\n None\n \"\"\"\n global penO, penX, penBoard, penCrossO, penCrossX\n global symbolSize, spacingSize\n penO = turtle.Turtle()\n penX =turtle.Turtle()\n penBoard = turtle.Turtle()\n penO.pensize = 10\n penX.pensize = 10\n penBoard.pensize = 5\n penO.pencolor(\"red\")\n penBoard.pencolor(\"black\")\n penX.pencolor(\"blue\")\n penO.hideturtle()\n penX.hideturtle()\n penBoard.hideturtle()\n symbolSize = 30\n spacingSize = 5\n getCoordMapX(gameHeight, gameWidth, symbolSize, spacingSize)\n getCoordMapO(gameHeight, gameWidth, symbolSize, spacingSize)\n\ndef initGame():\n \"\"\" Asks for game height, width, score amount. Inits other game parameters.\n\n Returns\n -------\n None\n \"\"\"\n global gameBoard\n global gameTurnCounter, gamePlayerTurn\n global gameP1Score, gameP2Score\n global gameScoreAmount\n global gameHoroListP1, gameVertListP1, gameDiagFSListP1, gameDiagBSListP1\n # FS = Forward slash = \\, BS = Back slash = /\n global gameHoroListP2, gameVertListP2, gameDiagFSListP2, gameDiagBSListP2\n global gameWidth, gameHeight\n\n print('***** Initializing Game *****')\n gameBoard = []\n gameTurnCounter = 1\n gamePlayerTurn = 1\n gameP1Score, gameP2Score = 0, 0\n gameScoreAmount = int(input('Enter amount to score: '))\n gameHoroListP1, gameVertListP1, gameDiagFSListP1 = [], [], []\n gameDiagBSListP1, gameHoroListP2, gameVertListP2 = [], [], []\n gameDiagFSListP2, gameDiagBSListP2 = [], []\n gameWidth = int(input('Enter game width: '))\n gameHeight = int(input('Enter game height: '))\n\ndef initStdGame():\n \"\"\" Intializes standard game of Tic Tac Toe 3x3, 3 to win. No user input.\n\n Returns\n -------\n None\n \"\"\"\n global gameBoard\n global gameTurnCounter, gamePlayerTurn\n global gameP1Score, gameP2Score\n global gameScoreAmount\n global gameHoroListP1, gameVertListP1, gameDiagFSListP1, gameDiagBSListP1\n # FS = Forward slash = \\, BS = Back slash = /\n global gameHoroListP2, gameVertListP2, gameDiagFSListP2, gameDiagBSListP2\n global gameWidth, gameHeight\n\n gameBoard = []\n gameTurnCounter = 1\n gamePlayerTurn = random.randint(1,2)\n gameP1Score, gameP2Score = 0, 0\n gameScoreAmount = 3\n gameHoroListP1, gameVertListP1, gameDiagFSListP1 = [], [], []\n gameDiagBSListP1, gameHoroListP2, gameVertListP2 = [], [], []\n gameDiagFSListP2, gameDiagBSListP2 = [], []\n gameWidth, gameHeight = 3, 3\n\n\ndef initGameBoard():\n \"\"\"Inits a blank game board based on maxRowIndex and maxColIndex\n\n Board is a 2D array\n Example of 3x3 board:\n [ ['B'], ['B'], ['B'],\n ['B'], ['B'], ['B'],\n ['B'], ['B'], ['B'] ]\n\n Returns\n -------\n None\n \"\"\"\n for rowNum in range(0, gameWidth):\n row = []\n for colNum in range(0, gameHeight):\n row.append('B')\n gameBoard.append(row)\n\n\ndef printRules():\n \"\"\"Prints rules. Use at start of program once to print rules.\n\n Returns\n -------\n None\n \"\"\"\n print('')\n print('Rules are as follows: ')\n print('Player 1 symbol is \"X\", Player 2 symbol is \"O\"')\n print('To score you need a total of:', gameScoreAmount, end='')\n print(' ... for every additional, its +1 point')\n print('Whoever goes first is randomly decided.')\n print('Enter \"QUIT\" or \"quit\" to end game prematurely when asked for move')\n print('')\n input('Press anything to begin the game!')\n print('')\n\ndef printGameBoard():\n \"\"\"Prints game board\n\n Returns\n -------\n None\n \"\"\"\n for row in gameBoard:\n print(row)\n\n\ndef updateScore():\n \"\"\"Use this to constantly update the scoreboard.\n\n You want to use this function after each move, after all 4 horo, vert,\n diagFS, diagBS lists are updated.\n Then this function will check all those newly updated lists to recalculate\n the score.\n\n Returns\n -------\n None\n \"\"\"\n global gameP1Score, gameP2Score\n gameP1Score = 0\n gameP2Score = 0\n\n for gameHoroScored in gameHoroListP1:\n gameP1Score += len(gameHoroScored) - (gameScoreAmount - 1)\n for gameVertScored in gameVertListP1:\n gameP1Score += len(gameVertScored) - (gameScoreAmount -1)\n for gameDiagScoredFS in gameDiagFSListP1:\n gameP1Score += len(gameDiagScoredFS) - (gameScoreAmount -1)\n for gameDiagScoredBS in gameDiagBSListP1:\n gameP1Score += len(gameDiagScoredBS) - (gameScoreAmount -1)\n\n for gameHoroScored in gameHoroListP2:\n gameP2Score += len(gameHoroScored) - (gameScoreAmount -1)\n for gameVertScored in gameVertListP2:\n gameP2Score += len(gameVertScored) - (gameScoreAmount -1)\n for gameDiagScoredFS in gameDiagFSListP2:\n gameP2Score += len(gameDiagScoredFS) - (gameScoreAmount -1)\n for gameDiagScoredBS in gameDiagBSListP2:\n gameP2Score += len(gameDiagScoredBS) - (gameScoreAmount -1)\n\ndef isCursorInBounds(c):\n \"\"\"Returns False if any cursor points to a spot outside the board.\n\n Parameters\n ----------\n c : tuple\n (int, int) for True path\n lc, c, rc all valid\n\n Returns\n -------\n bool\n \"\"\"\n if (c[0] == None) or (c[1] == None):\n return False\n if (c[0] < 0) or (c[0]>(gameHeight-1)) or (c[1] < 0) or (c[1]>(gameWidth-1)):\n return False\n return True\n\ndef isCursorMatchSymbol(c):\n \"\"\"Determines if any cursor is pointing to correct symbol for a point.\n\n Parameters\n ----------\n c : tuple\n (int, int) for True path\n lc, c, rc all valid\n\n Returns\n -------\n bool\n False if out of bounds or not this turn's symbol\n \"\"\"\n global gameBoard\n\n if isCursorInBounds(c) == False:\n return False\n if(gameBoard[c[0]][c[1]] == 'X') and (gamePlayerTurn == 1):\n return True\n if(gameBoard[c[0]][c[1]] == 'O') and (gamePlayerTurn == 2):\n return True\n return False\n\ndef moveCursors(lc, rc, direction):\n \"\"\"Moves left and right cursors 1 unit in direction chosen.\n\n If None is in lc, rc it leaves it alone.\n\n Parameters\n ----------\n lc : tuple\n (int, int) for True path\n rc : tuple\n (int, int) for True path\n direction : int\n 1 = Horizontal\n 2 = Vertical\n 3 = Diag forward slash \\\n 4 = Diag back slash /\n\n Returns\n -------\n tuple, tuple\n (int or None, int or None), (int or None, int or None),\n lc, rc\n Represents coordinates\n \"\"\"\n if direction == 1:\n if (None in lc) == False:\n lc = (lc[0], lc[1]-1)\n if (None in rc) == False:\n rc = (rc[0], rc[1]+1)\n elif direction == 2:\n if (None in lc) == False:\n lc = (lc[0]-1, lc[1])\n if (None in rc) == False:\n rc = (rc[0]+1, rc[1])\n elif direction == 3:\n if (None in lc) == False:\n lc = (lc[0]-1, lc[1]-1)\n if (None in rc) == False:\n rc = (rc[0]+1, rc[1]+1)\n elif direction == 4:\n if (None in lc) == False:\n lc = (lc[0]+1, lc[1]-1)\n if (None in rc) == False:\n rc = (rc[0]-1, rc[1]+1)\n if (None in lc) == False:\n if isCursorInBounds(lc) == False:\n lc = (None, None)\n if (None in rc) == False:\n if isCursorInBounds(rc) == False:\n rc = (None, None)\n return lc, rc\n\ndef initCursors(coord, direction):\n \"\"\"Inits lc, c, rc based on coord given and direction given\n\n Left cursor or right cursor out of bounds will be returned as tuple\n (None, None)\n\n Parameters\n ----------\n coord : tuple\n (int, int) for return value of (int, int) path\n direction : int\n 1 = Horizontal\n 2 = Vertical\n 3 = Diag forward slash \\\n 4 = Diag back slash /\n\n Returns\n -------\n tuple, tuple, tuple\n (int or None, int or None), (int or None, int or None),\n (int or None, int or None)\n lc, c, rc\n Represents coordinates\n \"\"\"\n c = coord\n if direction == 1:\n lc = (c[0], c[1]-1)\n rc = (c[0], c[1]+1)\n elif direction == 2:\n lc = (c[0]-1, c[1])\n rc = (c[0]+1, c[1])\n elif direction == 3:\n lc = (c[0]-1, c[1]-1)\n rc = (c[0]+1, c[1]+1)\n elif direction == 4:\n lc = (c[0]+1, c[1]-1)\n rc = (c[0]-1, c[1]+1)\n if isCursorInBounds(lc) == False:\n lc = (None, None)\n if isCursorInBounds(rc) == False:\n rc = (None, None)\n return lc, c, rc\n\ndef isInList(c, anyList):\n \"\"\"Finds if lc, c, rc is in hor, ver, diagFS '\\', diagBS / lists\n\n Parameters\n ----------\n c : tuple\n (int, int)\n Can be lc, c, rc\n anyList : list\n 2D Matrix, inner list represents a scoring line on board\n\n Returns\n -------\n bool\n True if cursor is pointing at a scoring line\n indexOfList : int\n Index of anyList that has cursor coordinates\n \"\"\"\n for indexOfList in range(0, len(anyList)):\n if (c in anyList[indexOfList]) == True:\n return True, indexOfList\n return False, None\n\n\ndef updateAnyList(listType, moveCoord):\n \"\"\"Updates horo, vert, diagFS, diagBS based on last move and player turn\n\n Also crosses out on the Turtle board when player scores.\n\n ***** PSUEDOCODE *****\n BELOW IS 1 WHILE LOOP\n Lc, c, rc are initialized\n Initialize checks of lc, rc\n Init blank lists for lc, rc, what they will gather when moving\n Check if lc, rc are in bounds\n Check if lc, rc are a matching symbol for point\n If lc or rc are not matching symbols, toggle stop flag for lc or rc\n If lc and rc are both flagged to stop, BREAK\n If lc and rc are both out of bounds, BREAK\n If lc is not flagged to stop\n if lc is in bounds and a valid symbol\n append to gather lc list\n if lc is already in list\n pop the whole scoring horo/vert/diagFS/diagBS\n if rc is not flagged to stop\n if rc is in bounds and a valid symbol\n append to gather rc list\n if rc is already in list\n pop the whole scoring horo/vert/diagFS/diagBS\n append lc gather list + list(c) + rc gather list to ...\n horo/vert/diagfs//diagbs list\n\n Parameters\n ----------\n listType : int\n 1 = Horizontal\n 2 = Vertical\n 3 = Diag forward slash \\\n 4 = Diag back slash /\n moveCoord : tuple\n (int, int)\n Last move\n\n Returns\n -------\n None\n \"\"\"\n global gameHoroListP1, gameVertListP1, gameDiagFSListP1, gameDiagBSListP1\n global gameHoroListP2, gameVertListP2, gameDiagFSListP2, gameDiagBSListP2\n\n if gamePlayerTurn == 1:\n if listType == 1:\n anyList = gameHoroListP1\n elif listType == 2:\n anyList = gameVertListP1\n elif listType == 3:\n anyList = gameDiagFSListP1\n elif listType == 4:\n anyList = gameDiagBSListP1\n elif gamePlayerTurn == 2:\n if listType == 1:\n anyList = gameHoroListP2\n elif listType == 2:\n anyList = gameVertListP2\n elif listType == 3:\n anyList = gameDiagFSListP2\n elif listType == 4:\n anyList = gameDiagBSListP2\n\n matchListLc = []\n matchListRc = []\n matchListCombined = []\n lc, c, rc = initCursors(moveCoord, listType)\n lcStop, rcStop = False, False\n while True:\n lcInBounds = isCursorInBounds(lc)\n rcInBounds = isCursorInBounds(rc)\n lcMatchSymbol = isCursorMatchSymbol(lc)\n rcMatchSymbol = isCursorMatchSymbol(rc)\n lcInList, lcIndexInList = isInList(lc, anyList)\n rcInList, rcIndexInList = isInList(rc, anyList)\n\n if (lcStop == False) and (lcMatchSymbol == False):\n lcStop = True\n if (rcStop == False) and (rcMatchSymbol == False):\n rcStop = True\n\n if (lcStop == True) and (rcStop == True):\n break\n elif (lcInBounds == False) and (rcInBounds == False): # redundant?\n break\n # Is the cursor valid -- symbol match, in bounds\n if lcStop == False:\n if (lcInBounds == True) and (lcMatchSymbol == True):\n # Add cursor to matchlist\n matchListLc.append(lc)\n # Is the cursor already in list, pop\n if lcInList == True:\n anyList.pop(lcIndexInList)\n if rcStop == False:\n if (rcInBounds == True) and (rcMatchSymbol == True):\n matchListRc.append(rc)\n if rcInList == True:\n anyList.pop(rcIndexInList)\n lc, rc = moveCursors(lc, rc, listType)\n\n matchListCombined = matchListLc\n matchListCombined.append(c)\n matchListCombined += matchListRc\n if len(matchListCombined) >= gameScoreAmount: # player got points\n anyList.append(matchListCombined)\n if gamePlayerTurn == 1:\n updateCrossOutX(penX, coordMapX, anyList)\n elif gamePlayerTurn == 2:\n updateCrossOutO(penO, coordMapX, anyList)\n\ndef turn():\n \"\"\"One turn of the game.\n\n Returns\n -------\n None\n \"\"\"\n global gamePlayerTurn\n global gameTurnCounter\n global gameP1Score, gameP2Score\n quitWords = ['quit', 'QUIT', 'q', 'exit', 'EXIT']\n\n updateScore()\n print('\\n***** Turn #' + str(gameTurnCounter) + ' *****')\n print('P1 Score:' + str(gameP1Score))\n print('P2 Score:' + str(gameP2Score))\n print('Player ' + str(gamePlayerTurn) + '\\'s Turn')\n printGameBoard()\n while True:\n cRowIndex = input('Enter a row: ')\n cColIndex = input('Enter a column: ')\n if cRowIndex in quitWords:\n closeGame()\n elif cColIndex in quitWords:\n closeGame()\n cRowIndex = int(cRowIndex) - 1\n cColIndex = int(cColIndex) -1\n c = (cRowIndex, cColIndex)\n if isCursorInBounds(c) == False:\n print('ERROR: Cursor out of bounds!')\n elif gameBoard[c[0]][c[1]] != 'B':\n print('ERROR: That space is not blank!')\n else:\n break\n\n if gamePlayerTurn == 1:\n gameBoard[cRowIndex][cColIndex] = 'X'\n drawX(penX, coordMapX, c)\n elif gamePlayerTurn == 2:\n gameBoard[cRowIndex][cColIndex] = 'O'\n drawO(penO, coordMapO, c)\n\n updateAnyList(1, c)\n updateAnyList(2, c)\n updateAnyList(3, c)\n updateAnyList(4, c)\n\n if gamePlayerTurn == 1:\n gamePlayerTurn = 2\n elif gamePlayerTurn == 2:\n gamePlayerTurn = 1\n gameTurnCounter += 1\n\ndef closeGame():\n \"\"\"Run this when the entire board is filled.\n\n Returns\n -------\n None\n \"\"\"\n updateScore()\n print('\\n***** GAME RESULTS *****')\n if gameP1Score > gameP2Score:\n print('Player 1 has won!')\n elif gameP2Score > gameP1Score:\n print('Player 1 has won!')\n elif gameP2Score == gameP1Score:\n print('It\\'s a draw!')\n print('Player 1\\'s total score = ' + str(gameP1Score))\n print('Player 2\\'s total score = ' + str(gameP2Score))\n print('Game concluded in ' + str(gameTurnCounter - 1) + ' turns')\n printGameBoard()\n time.sleep(5)\n quit()\n\ndef initPen(penSize, penColor, initX, initY, initAngle):\n \"\"\"Initializes and returns a pen.\n\n Parameters\n ----------\n penSize : int\n penColor : string\n initX : int\n initY : int\n initAngle: int\n\n Returns\n -------\n pen : Turtle object\n \"\"\"\n pen = turtle.Turtle()\n pen.speed(1)\n\n pen.penup()\n pen.pensize(penSize)\n pen.pencolor(penColor)\n pen.setx(initX)\n pen.sety(initY)\n pen.seth(initAngle)\n return pen\n\ndef drawGrid(pen, height, width, symbolSize, spacingSize):\n \"\"\"Draws the the grid of Tic Tac Toe board using pen of Turtle\n\n Parameters\n ----------\n pen : Turtle object\n height : int\n width : int\n symbolSize : int\n spacingSize : int\n\n Returns\n -------\n None\n \"\"\"\n halfHeight = ((height * symbolSize) + (height * (spacingSize * 2))) / 2\n halfWidth = ((width * symbolSize) + (width * (spacingSize * 2))) / 2\n topLeft = (-halfWidth, halfHeight)\n gridSize = symbolSize + (2 * spacingSize)\n totalWidth = gridSize * width\n totalHeight = gridSize * height\n\n horoList = [] # start coord for draw horo lines\n yCoord = topLeft[1]\n while len(horoList) < height - 1:\n yCoord -= gridSize\n horoList.append((topLeft[0], yCoord))\n vertList = []\n xCoord = topLeft[0]\n while len(vertList) < width - 1:\n xCoord += + gridSize\n vertList.append((xCoord, topLeft[1]))\n\n for coord in horoList:\n pen.penup()\n pen.setpos(coord)\n pen.seth(0)\n pen.pendown()\n pen.forward(totalWidth)\n for coord in vertList:\n pen.penup()\n pen.setpos(coord)\n pen.seth(-90)\n pen.pendown()\n pen.forward(totalHeight)\n pen.penup()\n\n\ndef getCoordMapO(height, width, symbolSize, spacingSize):\n \"\"\"Gets coordinates of points to draw when crossing out a scoring line.\n\n Parameters\n ----------\n height : int\n width : int\n symbolSize : int\n spacingSize : int\n\n Returns\n -------\n None\n \"\"\"\n global coordMapO\n gridSize = symbolSize + (2 * spacingSize)\n halfHeight = ((height * symbolSize) + (height * (spacingSize * 2))) / 2\n halfWidth = ((width * symbolSize) + (width * (spacingSize * 2))) / 2\n halfGridSize = gridSize / 2\n topLeft = (-halfWidth, halfHeight)\n coordMapO = []\n for multHeight in range(0, height):\n row = []\n yCoord = (topLeft[1]-(gridSize*multHeight) - spacingSize)\n for multRow in range(0, width):\n xCoord = (topLeft[0]+(multRow*gridSize) + (gridSize / 2))\n row.append((xCoord, yCoord))\n coordMapO.append(row)\n\ndef getCoordMapX(height, width, symbolSize, spacingSize):\n \"\"\"Get coordinates of points to draw when crossing out a scoring line.\n\n Parameters\n ----------\n height : int\n width : int\n symbolSize : int\n spacingSize : int\n\n Returns\n -------\n None\n \"\"\"\n global coordMapX\n gridSize = symbolSize + (2 * spacingSize)\n halfHeight = ((height * symbolSize) + (height * (spacingSize * 2))) / 2\n halfWidth = ((width * symbolSize) + (width * (spacingSize * 2))) / 2\n halfGridSize = gridSize / 2\n topLeft = (-halfWidth, halfHeight)\n coordMapX = []\n for multHeight in range(0, height):\n row = []\n yCoord = (topLeft[1]-(gridSize*multHeight) - spacingSize)\n for multRow in range(0, width):\n xCoord = (topLeft[0]+(multRow*gridSize) + (gridSize / 2))\n row.append((xCoord, yCoord))\n coordMapX.append(row)\n for rowIndex in range(0, len(coordMapX)):\n for colIndex in range(0, len(coordMapX[0])):\n coordMapX[rowIndex][colIndex] = (coordMapX[rowIndex][colIndex][0], coordMapX[rowIndex][colIndex][1] - (symbolSize/2))\n return coordMapX\n\ndef drawO(pen, coordMapO, moveCoord):\n \"\"\"Draws an O.\n\n Parameters\n ----------\n pen : Turtle object\n coordMapO : list\n moveCoord : tuple (int, int)\n\n Returns\n -------\n None\n \"\"\"\n pen.penup()\n pen.setpos(coordMapO[moveCoord[0]][moveCoord[1]])\n pen.seth(180)\n pen.pendown()\n pen.circle(symbolSize/2)\n pen.penup()\n pen.seth(270)\n pen.forward(symbolSize/2)\n\ndef drawX(pen, coordMapX, moveCoord):\n \"\"\"Draws an X.\n\n Parameters\n ----------\n pen : Turtle object\n coordMapX : list\n moveCoord : tuple (int, int)\n\n Returns\n -------\n None\n \"\"\"\n pen.penup()\n pen.setpos(coordMapX[moveCoord[0]][moveCoord[1]])\n pen.seth(135) # NW\n pen.forward(symbolSize/2)\n\n pen.pendown()\n pen.backward(symbolSize)\n\n pen.penup()\n pen.forward(symbolSize/2)\n pen.seth(45)\n pen.forward(symbolSize/2)\n\n pen.pendown()\n pen.backward(symbolSize)\n pen.penup()\n pen.forward(symbolSize/2)\n\ndef updateCrossOutX(penCrossX, coordMapX, anyList):\n \"\"\"Crosses out on the board when player scores.\n\n Parameters\n ----------\n penCrossX : Turtle object\n coordMapX : list\n anyList : list\n a horo, vert, diagFS, diagBS list of P1 or P2\n\n Returns\n -------\n None\n\n See Also\n --------\n updateAnyList function\n \"\"\"\n for anyScoringLine in anyList:\n penCrossX.penup()\n for rowIndex in range(0, gameHeight):\n for colIndex in range(0, gameWidth):\n if (rowIndex, colIndex) in anyScoringLine:\n penCrossX.setpos(coordMapX[rowIndex][colIndex])\n penCrossX.pendown()\n\ndef updateCrossOutO(penCrossO, coordMapO, anyList):\n \"\"\"Crosses out on the board when player scores.\n\n Parameters\n ----------\n penCrossO : Turtle object\n coordMapO : list\n anyList : list\n a horo, vert, diagFS, diagBS list of P1 or P2\n\n Returns\n -------\n None\n\n See Also\n --------\n updateAnyList function\n \"\"\"\n for anyScoringLine in anyList:\n penCrossO.penup()\n for rowIndex in range(0, gameHeight):\n for colIndex in range(0, gameWidth):\n if (rowIndex, colIndex) in anyScoringLine:\n penCrossO.setpos(coordMapO[rowIndex][colIndex])\n penCrossO.pendown()\n\n''' ------------------------ END DEFINE FUNCTIONS -------------------------- '''\n\n\n''' ------------------------------ START MAIN ------------------------------ '''\n\ndef main():\n initGame()\n printRules()\n initGameBoard()\n initDraw()\n drawGrid(penBoard, gameHeight, gameWidth, symbolSize, spacingSize)\n while True:\n turn()\n if gameTurnCounter == ((gameHeight * gameWidth) + 1):\n closeGame()\n break\n\nmain()\n\n''' --------------------------- END MAIN ------------------------------------'''\n\n\n\n\n''' ---------------------- START TESTING FUNCTIONS ------------------------ '''\n\ntestBoard0 = [ ['B', 'B', 'B', 'B', 'B'],\n ['B', 'B', 'B', 'B', 'B'],\n ['B', 'B', 'B', 'B', 'B'],\n ['B', 'B', 'B', 'B', 'B'],\n ['B', 'B', 'B', 'B', 'B'] ]\n\ntestBoard3 = [ ['X', 'B', 'X', 'B', 'B'],\n ['B', 'B', 'B', 'B', 'X'],\n ['B', 'B', 'X', 'B', 'B'],\n ['B', 'B', 'B', 'B', 'X'],\n ['X', 'B', 'B', 'B', 'X'] ]\n'''\nHor Test\n (0,1)\nVert test\n (2,4)\nDiag FS test\n (3,3)\nDiag BS test\n (4, 1)\n'''\n\n'''\n# TAB = Tested function\n# NO INDENT = Not tested\n def initStdGame():\n def initGameBoard():\n def printRules():\n def printGameBoard():\n def updateScore():\n def isCursorInBounds(c):\n def isCursorMatchSymbol(c):\n def moveCursors(lc, rc, direction):\n def initCursors(coord, direction):\n def isInList(c, anyList):\ndef updateAnyList(listType, moveCoord):\n'''\n\n'''\ndef initStdGame():\ndef initGameBoard():\ndef printRules():\ndef printGameBoard():\ndef updateScore():\n'''\n\n'''\ninitStdGame()\ninitGameBoard()\nprintRules()\nprintGameBoard()\ngameVertListP1.append([(0,0),(1,0),(2,0),(3,0),(4,0)])\nupdateScore()\nprint('P1 Score:', gameP1Score)\ngameVertListP1.append([(0,1),(1,1),(2,1)])\nupdateScore()\nprint('P1 Score:', gameP1Score)\ngameHoroListP2.append([(0,0),(0,1),(0,2),(0,3),(0,4)])\nupdateScore()\nprint('P2 Score:', gameP2Score)\ngameHoroListP2.append([(1,0),(1,1),(1,2)])\nupdateScore()\nprint('P2 Score:', gameP2Score)\n\n# def initCursors(coord, direction):\nprint('')\nlc, c, rc = initCursors((0, 1), 1)\nprint('Cursors:', lc, c, rc)\nlc, c, rc = initCursors((1, 1), 2)\nprint('Cursors:', lc, c, rc)\nlc, c, rc = initCursors((2, 2), 3)\nprint('Cursors:', lc, c, rc)\nlc, c, rc = initCursors((2, 2), 4)\nprint('Cursors:', lc, c, rc)\n'''\n\n'''\n# def moveCursors(lc, rc, direction):\nprint('')\ngameBoard = testBoard0\ngameHeight = 5\ngameWidth = 5\nlc, rc = (2, 1), (2, 3)\nlc, rc = moveCursors(lc, rc, 1)\nprint('Cursors:', lc, rc)\nlc, rc = (1, 2), (3, 2)\nlc, rc = moveCursors(lc, rc, 2)\nprint('Cursors:', lc, rc)\nlc, rc = (1, 1), (3, 3)\nlc, rc = moveCursors(lc, rc, 3)\nprint('Cursors:', lc, rc)\nlc, rc = (3, 1), (1, 3)\nlc, rc = moveCursors(lc, rc, 4)\nprint('Cursors:', lc, rc)\n\n# def isInList(c, anyList):\ngameVertListP1 = [[(0,0), (1,0), (2,0)]]\ninList, matchIndex = isInList((1,0), gameVertListP1)\nprint('Is in list=', inList, ' Match index:', matchIndex)\n\n\n# !!!!!!!!!!!!! BOARD is 5x5 all blank at this point !!!!!!!!!!!!\n\n# def isCursorInBounds(c):\nprint('')\nprint('Testing isCursorInBounds')\nc = (-1,0)\nprint('(-1,0)=', isCursorInBounds(c))\nc = (0,-1)\nprint('(0,-1)=', isCursorInBounds(c))\nc = (4,4)\nprint('(4,4)=', isCursorInBounds(c))\nc = (5,4)\nprint('(5,4)=', isCursorInBounds(c))\n\n# def isCursorMatchSymbol(c):\ninitTestGame()\ninitGameBoard()\ngameBoard[4][4]='X'\ngameBoard[3][4]='O'\nprintGameBoard()\n\nprint('')\nprint('Testing isCursorMatchSymbol')\nc = (0,0)\nprint('(0,0)=', isCursorMatchSymbol(c))\nc = (0,-1)\nprint('(0,-1)=', isCursorMatchSymbol(c))\nc = (4,4)\nprint('(4,4)=', isCursorMatchSymbol(c))\nc = (3,4)\nprint('(3,4)=', isCursorMatchSymbol(c))\n\n# Hor Test\n# (0,1)\n# Vert test\n# (2,4)\n# Diag FS test\n# (3,3)\n# Diag BS test\n# (4, 1)\n\n# def updateAnyList(listType, moveCoord):\ngameBoard = testBoard3\nprint('\\nTest updateAnyList')\nupdateAnyList(1, (0,1))\nprint(gameHoroListP1)\nupdateAnyList(2, (2,4))\nprint(gameVertListP1)\nupdateAnyList(3, (3,3))\nprint(gameDiagFSListP1)\nupdateAnyList(4, (3,1))\nprint(gameDiagBSListP1)\n'''\n\n\n''' ----------------------- END TESTING FUNCTIONS -------------------------- '''\n","sub_path":"deluxe_tic_tac_toe_v3.py","file_name":"deluxe_tic_tac_toe_v3.py","file_ext":"py","file_size_in_byte":25702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"314392288","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/philewels/GitHub/MultiQC/multiqc/modules/goleft_indexcov/goleft_indexcov.py\n# Compiled at: 2017-12-07 11:39:51\n# Size of source mod 2**32: 6496 bytes\n\"\"\"MultiQC module to plot output from goleft indexcov\n\nhttps://github.com/brentp/goleft/tree/master/indexcov\n\"\"\"\nfrom __future__ import print_function\nimport collections, logging\nfrom multiqc import config\nfrom multiqc.plots import linegraph, scatter\nfrom multiqc.modules.base_module import BaseMultiqcModule\nlog = logging.getLogger(__name__)\n\nclass MultiqcModule(BaseMultiqcModule):\n\n def __init__(self):\n super(MultiqcModule, self).__init__(name='goleft indexcov', anchor='goleft_indexcov', href='https://github.com/brentp/goleft/tree/master/indexcov',\n info='quickly estimates coverage from a whole-genome bam index.')\n roc_plot = self.roc_plot()\n bin_plot = self.bin_plot()\n if not roc_plot:\n if not bin_plot:\n raise UserWarning\n\n def _short_chrom(self, chrom):\n \"\"\"Plot standard chromosomes + X, sorted numerically.\n\n Allows specification from a list of chromosomes via config\n for non-standard genomes.\n \"\"\"\n default_allowed = set(['X'])\n allowed_chroms = set(getattr(config, 'goleft_indexcov_config', {}).get('chromosomes', []))\n chrom_clean = chrom.replace('chr', '')\n try:\n chrom_clean = int(chrom_clean)\n except ValueError:\n if chrom_clean not in default_allowed:\n if chrom_clean not in allowed_chroms:\n chrom_clean = None\n\n if not allowed_chroms or chrom in allowed_chroms or chrom_clean in allowed_chroms:\n return chrom_clean\n else:\n if isinstance(chrom_clean, int) or chrom_clean in default_allowed:\n return chrom_clean\n\n def roc_plot(self):\n helptext = 'Lower coverage samples have shorter curves where the proportion of regions covered \\n drops off more quickly. This indicates a higher fraction of low coverage regions.'\n max_chroms = 50\n data = collections.defaultdict(lambda : collections.defaultdict(dict))\n for fn in self.find_log_files('goleft_indexcov/roc', filehandles=True):\n header = fn['f'].readline()\n sample_names = [self.clean_s_name(x, fn['root']) for x in header.strip().split()[2:]]\n for parts in (l.rstrip().split() for l in fn['f']):\n if len(parts) > 2:\n chrom, cov = parts[:2]\n sample_vals = parts[2:]\n if self._short_chrom(chrom) is not None:\n for val, sample in zip(sample_vals, sample_names):\n data[chrom][sample][float(cov)] = float(val)\n\n for chrom in data:\n data[chrom] = self.ignore_samples(data[chrom])\n\n if data:\n\n def to_padded_str(x):\n x = self._short_chrom(x)\n try:\n return '%06d' % x\n except TypeError:\n return x\n\n chroms = sorted((data.keys()), key=to_padded_str)\n log.info('Found goleft indexcov ROC reports for %s samples' % len(data[chroms[0]]))\n if len(chroms) > max_chroms:\n log.info('Too many chromosomes found: %s, limiting to %s' % (len(chroms), max_chroms))\n chroms = chroms[:max_chroms]\n pconfig = {'id':'goleft_indexcov-roc-plot', 'title':'goleft indexcov: ROC - genome coverage per scaled depth by chromosome', \n 'xlab':'Scaled coverage', \n 'ylab':'Proportion of regions covered', \n 'ymin':0, \n 'ymax':1.0, 'xmin':0, \n 'xmax':1.5, 'data_labels':[{'name': self._short_chrom(c)} for c in chroms]}\n self.add_section(name='Scaled coverage ROC plot',\n anchor='goleft_indexcov-roc',\n description='Coverage (ROC) plot that shows genome coverage at at given (scaled) depth.',\n helptext=helptext,\n plot=(linegraph.plot([data[c] for c in chroms], pconfig)))\n return True\n return False\n\n def bin_plot(self):\n helptext = 'We expect bins to be around 1, so deviations from this indicate problems. \\n Low coverage bins (< 0.15) on the x-axis have regions with low or missing coverage. \\n Higher values indicate truncated BAM files or missing data. \\n Bins with skewed distributions (<0.85 or >1.15) on the y-axis detect dosage bias. \\n Large values on the y-axis are likely to impact CNV and structural variant calling. \\n See the \\n goleft indexcov bin documentation \\n for more details.'\n data = {}\n for fn in self.find_log_files('goleft_indexcov/ped', filehandles=True):\n header = fn['f'].readline()[1:].strip().split('\\t')\n for sample_parts in (l.split('\\t') for l in fn['f']):\n cur = dict(zip(header, sample_parts))\n cur['sample_id'] = self.clean_s_name(cur['sample_id'], fn['root'])\n total = float(cur['bins.in']) + float(cur['bins.out'])\n data[cur['sample_id']] = {'x':float(cur['bins.lo']) / total, 'y':float(cur['bins.out']) / total}\n\n data = self.ignore_samples(data)\n if data:\n log.info('Found goleft indexcov bin reports for %s samples' % len(data))\n pconfig = {'id':'goleft_indexcov-bin-plot', \n 'title':'goleft indexcov: Problematic low and non-uniform coverage bins', \n 'xlab':'Proportion of bins with depth < 0.15', \n 'ylab':'Proportion of bins with depth outside of (0.85, 1.15)', \n 'yCeiling':1.0, \n 'yFloor':0.0, 'xCeiling':1.0, 'xFloor':0.0}\n self.add_section(name='Problem coverage bins',\n anchor='goleft_indexcov-bin',\n description='This plot identifies problematic samples using binned coverage distributions.',\n helptext=helptext,\n plot=(scatter.plot(data, pconfig)))\n return True\n return False","sub_path":"pycfiles/multiqc-1.8.tar/goleft_indexcov.cpython-37.py","file_name":"goleft_indexcov.cpython-37.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"451846674","text":"#!flask/bin/python\n# -*- coding: utf-8 -*-\n\n# \n# REST interface em Flask\n# Disponibiliza os end points\n# Responsabilidades: preprocessa a requisição (validação, roteamento), repassa para a datalayer e formata a resposta\n# Uma requisição incompleta ou com argumentos inválidos é respondida apenas com HTTP code 400 (Bad Request)\n# como estratégia para evitar ataques de information leakage\n# TODO: incluir HTTP authentication e padronizar o tratamento de erros\n# \n\nfrom flask import Flask\nfrom flask import Response\nfrom flask import request\nfrom flask import jsonify\nfrom flask import make_response\nfrom flask import abort\n\nimport locale\nimport datalayer\n\napp = Flask(__name__, static_url_path = \"\")\n\nlocale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')\n\n@app.before_request\ndef before_request():\n datalayer.startDataServices()\n\n@app.after_request\ndef after_request(response):\n datalayer.stopDataServices()\n return response\n\n@app.errorhandler(404)\ndef not_found(error):\n result = jsonify( { 'error': 'Not found' })\n return make_response(result, 404)\n\n@app.errorhandler(400)\ndef bad_request(error):\n result = jsonify( { 'error': 'Bad request' })\n return make_response(result, 400)\n \n# Requisito 4.\n@app.route('/titulo_tesouro/', methods = ['GET'])\ndef historico(titulo_id):\n\n categoria_titulo = datalayer.loadCategoria(titulo_id)\n\n result = {'id': titulo_id, 'categoria_titulo': categoria_titulo[0]}\n \n args = request.args\n data_inicio_param = args.get('data_inicio')\n data_fim_param = args.get('data_fim')\n group_by_param = args.get(\"group_by\")\n\n group_by = group_by_param is not None\n\n historico = list()\n\n if not group_by:\n historico = datalayer.loadHistorico(titulo_id, data_inicio_param, data_fim_param)\n result['historico'] = historico\n else:\n historico = datalayer.loadHistoricoGroupby(titulo_id, data_inicio_param, data_fim_param)\n result['historico'] = historico\n\n for item in historico:\n if 'valor_venda' in item:\n item['valor_venda'] = locale.currency(item['valor_venda'], grouping = True, symbol=None)\n if 'valor_resgate' in item:\n item['valor_resgate'] = locale.currency(item['valor_resgate'], grouping = True, symbol=None)\n\n resp = jsonify(result)\n return resp\n \n# Requisito 6 e 7\n@app.route('/titulo_tesouro//', methods = ['GET'])\ndef historicoAcao(acao, titulo_id):\n\n categoria_titulo = datalayer.loadCategoria(titulo_id)\n\n result = {'id': titulo_id, 'categoria_titulo': categoria_titulo[0]}\n \n args = request.args\n data_inicio_param = args.get('data_inicio')\n data_fim_param = args.get('data_fim')\n group_by_param = args.get(\"group_by\")\n\n group_by = group_by_param is not None\n\n historico = list()\n\n if not group_by:\n historico = datalayer.loadHistoricoAcao(titulo_id, data_inicio_param, data_fim_param, acao)\n result['historico'] = historico\n else:\n historico = datalayer.loadHistoricoAcaoGroupby(titulo_id, data_inicio_param, data_fim_param, acao)\n result['historico'] = historico\n\n for item in historico:\n if 'valor' in item:\n item['valor'] = locale.currency(item['valor'], grouping = True, symbol=None)\n\n resp = jsonify(result)\n return resp\n\n# Requisito 1.\n# TODO: No requisito 1, o Parâmetro valor não foi citado.\n# Verificar com requisitos. Nessa implementação, ele foi incluído em modo obrigatório\n# até que esse ponto seja esclarecido.\n@app.route(\"/titulo_tesouro\", methods=['POST'])\ndef add():\n if not request.json or not 'valor' in request.json:\n abort(400)\n\n if not 'mes' in request.json or not 'ano' in request.json:\n abort(400)\n\n if not 'acao' in request.json or not 'categoria_titulo' in request.json:\n abort(400)\n\n req_json = request.get_json()\n\n acao = req_json['acao']\n\n if acao != \"venda\" and acao != \"resgate\":\n abort(400)\n\n try:\n mes_param = req_json['mes'].strip()\n mes = int(mes_param)\n\n if mes < 1 or mes > 12:\n abort(400)\n\n ano_param = req_json['ano'].strip()\n\n ano = int(ano_param)\n\n if ano < 2006 or ano > 2017:\n abort(400)\n\n valor_param = req_json['valor'].strip()\n valor = float(valor_param)\n \n if valor <= 0:\n abort(400)\n\n except ValueError:\n abort(400)\n\n categoria_titulo_param = req_json['categoria_titulo'].strip()\n\n message = datalayer.addValor(mes, ano, acao, categoria_titulo_param, valor)\n\n codigo = 400\n if message == \"sucesso\":\n codigo = 200\n elif message == \"falha\":\n codigo = 500\n \n result = jsonify( { 'message': message })\n return make_response(result, codigo)\n\n# Requisito 2.\n# TODO: O requisito apenas cita o ID do título, não esclarecendo como a entrada do histórico será identificada\n# Verificar com requisitos. Nessa implementação, os parâmetros 'mes', 'ano' e 'acao' foram incluídos como obrigatórios\n# até que esse ponto seja esclarecido.\n@app.route('/titulo_tesouro/', methods = ['DELETE'])\ndef delete_titulo(titulo_id):\n\n args = request.args\n if not 'mes' in args or not 'ano' in args:\n abort(400)\n\n if not 'acao' in args:\n abort(400)\n \n acao = args.get('acao').strip()\n\n if acao != \"venda\" and acao != \"resgate\":\n abort(400)\n\n try:\n mes_param = args.get('mes').strip()\n mes = int(mes_param)\n\n if mes < 1 or mes > 12:\n abort(400)\n\n ano_param = args.get('ano').strip()\n\n ano = int(ano_param)\n\n if ano < 2006 or ano > 2017:\n abort(400)\n\n except ValueError:\n abort(400)\n\n message = datalayer.removerValor(mes, ano, acao, titulo_id)\n\n codigo = 400\n if message == \"sucesso\":\n codigo = 200\n elif message == \"falha\":\n codigo = 500\n \n result = jsonify( { 'message': message })\n return make_response(result, codigo)\n\n# Requisito 3.\n# TODO: A especificação sugere que os parâmetros 'mes', 'ano' e 'acao' seriam opcionais. Apenas o 'valor' seria obrigatório.\n# Verificar com requisitos. O que acontece se ano, mês e ação não forem informados? \n# Atualiza todas as entradas de um título com o mesmo valor?\n# A implementação atual considera esses parâmetros como obrigatório até que esse ponto seja esclarecido.\n\n@app.route('/titulo_tesouro/', methods = ['PUT', 'PATCH', 'UPDATE'])\ndef update_titulo(titulo_id):\n\n args = request.args\n if not 'mes' in args or not 'ano' in args:\n abort(400)\n\n if not 'acao' in args:\n abort(400)\n \n acao = args.get('acao').strip()\n\n if acao != \"venda\" and acao != \"resgate\":\n abort(400)\n\n try:\n mes_param = args.get('mes').strip()\n mes = int(mes_param)\n\n if mes < 1 or mes > 12:\n abort(400)\n\n ano_param = args.get('ano').strip()\n\n ano = int(ano_param)\n\n if ano < 2006 or ano > 2017:\n abort(400)\n\n valor_param = args.get('valor').strip()\n valor = float(valor_param)\n \n if valor <= 0:\n abort(400)\n\n except ValueError:\n abort(400)\n\n message = datalayer.updateValor(mes, ano, acao, titulo_id, valor)\n\n codigo = 400\n if message == \"sucesso\":\n codigo = 200\n elif message == \"falha\":\n codigo = 500\n \n result = jsonify( { 'message': message })\n return make_response(result, codigo)\n\n# Requisito 5.\n# TODO: o formato de saída descrita na especificação dessa funcionalidade apresenta um único valor para o ano.\n# O mesmo se dá com o mês. Isso faz sentido quando se pensa em comparar o valor de venda e resgate de um título\n# com outro para o mesmo mês e ano. Por outro lado, essa estrutura de saída entra em conflito com a parametrização da funcionalidade\n# (data_inicio, data_fim e group_by). O que acontece se for definido um período contemplando mais de um mês e ano? O formato solicitado \n# para a saída não comporta esse cenário. \n# A funcionalidade foi implementada levando em consideração os parâmetros 'id' (lista de ids de títulos para comparação), 'mes' e 'ano'\n# até que esse ponto seja esclarecido.\n\n@app.route('/titulo_tesouro/comparar', methods = ['GET'])\ndef comparar():\n\n args = request.args\n if not 'id' in args:\n abort(400)\n\n ids = request.args.getlist('id')\n\n if not len(ids):\n abort(400)\n\n if not 'mes' in args or not 'ano' in args:\n abort(400)\n\n try:\n mes_param = args.get('mes').strip()\n mes = int(mes_param)\n\n if mes < 1 or mes > 12:\n abort(400)\n\n ano_param = args.get('ano').strip()\n\n ano = int(ano_param)\n\n # TODO: os limites abaixo não foram estabelecidos na documentação\n # verificar com requisitos para obter os limites válidos\n if ano < 2006 or ano > 2017:\n abort(400)\n\n except ValueError:\n abort(400)\n\n result = {'ano': ano, 'mes': mes}\n\n valores = datalayer.loadHistoricoIds(ano, mes, ids)\n result['valores'] = valores\n\n for item in valores:\n if 'valor_venda' in item:\n item['valor_venda'] = locale.currency(item['valor_venda'], grouping = True, symbol=None)\n if 'valor_resgate' in item:\n item['valor_resgate'] = locale.currency(item['valor_resgate'], grouping = True, symbol=None)\n\n resp = jsonify(result)\n return resp\n\nif __name__ == '__main__':\n app.run(debug = True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"574336624","text":"# -*- coding:utf-8 -*-\n\n'''\n1,字符串 (str) 不可变类型的数据\n\n定义:用‘’或“”包裹的字符\n\n运算: + *\n\n使用[]提取字符\na = '123' print(a[0]) 输出 1\n\n切片 [start:end:step]\nstep 步长为正,则从前往后切,否则从后往前切\n[:] 表示切整串\n'''\n#python的内置函数--字符串操作\n#find rfind 查找子串在字符串中第一次出现的位置 返回下标\na = '1234512'\n# print(a.find('1')) -- 0\n\n# strip()清除两侧字符,默认清除空格\na.lstrip('1') # 清除字符串a左边的1\n\n# 1.14.5.7 字符串对齐\n# # 使用center()、ljust()、rjust()可以在指定长度的空间中,居中,左对齐,右对齐。\n# print(a.center(20, '#') ) --> ######1234512####### 以20个#宽度居中\n\n# 1.14.5.8 字符串大小写\n# 让字符串首字母变成大写可以使用capitalize()函数,让所有的单词的开头字母变成大写,使用title()函数,让所有字母变成大写使用upper()函数,让所有单词变成小写使用lower()函数,\nb = 'hello world'\n# print(b.upper())\n# print(b.capitalize())\n# print(b.title()) -- > Hello World\n\n\n# 1.14.5.9 使用partition()函数\n# 使用partition(str)函数可以将字符串分成三部分,str之前,str和str之后。\n#\n# 1.14.5.10 使用isalpha()和isdigit()函数\n# isalpha()函数可以检测字符串是否都是字母,如果都是字母则返回True,否则返回False.\n# isdigit()函数可以检测字符串是否都是数字,如果是则返回True,否则返回False.\n# isalnum()函数检测字符串是否由字母和数字组成.\n# isspace()函数检测字符串是否全是空格.\n# 1.14.5.11 使用count()函数\n# 使用count(str)可以统计字符串str出现的次数。\n# print(a.count('1')) --> 2 表示1字符在a中出现2次\n'''\n2、 list 合利用顺序和位置定义某中种元素,尤其是当元素的顺序和位置经常发生改变的时候\n列表中的元素可以是不同类型的 li = [1,'a']\n查找时元素,时间复杂度是O(n)\n'''\n\n# 使用[] 获取元素\n# a = [1,2,3] a[0] --> 1\n\n\nd = [6, 7, 8, 9]\n# c.append('a')\n# print(c)\n# extend() 合并列表 相当于 c+= d\n# c.extend(d)\n# print(c)\n# c += d\n# print(c)\n\n# insert() 在指定位置插入元素\n# c.insert(0,'h')\n# print(c)\n\n# remove() 删除指定位置的元素\n# del c[0]\n# print(c)\n# index() 查询元素的位置 -- ��回列表在元素中的位置\n# print(c.index(2))\n# list = ['aa', 'bb', 'cc']\n# str = ','.join(list)\n#\n# print(str)\n\n\"\"\"\n3, 元素是由任意元素类型组成的序列,元祖一旦被定义,将无法进行增加,删除,修改操作\n元祖: 暂用空间小,不可改变\n只有一个元素(1,)\n可以通过[]访问元祖\n\"\"\"\n# count() 查询元素的个数\n\n# index() 查询元素的位置,返回角标\n\n\n\"\"\"\n4, 字典 可变类型 字典中元素没有顺序,通过key访问value\n\n\"\"\"\ndict = {'name':'张三', 'age': 24}\n# dict['name'] --> 输出‘张三’\n\n# 通过[]添加、或修改元素\ndict['sex'] = 'm'\nprint(dict) # {'age': 24, 'sex': 'm', 'name': '张三'}\n\n# del dict['sex'] 删除 对应的值和键\n\nprint(dict.keys())\n\n","sub_path":"01_str&list.py","file_name":"01_str&list.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"653894513","text":"#!/usr/bin/env python3\nimport csv\nimport unittest\nfrom atta.cleaner.title import cat_title\n\n\nclass TestTitleCategory(unittest.TestCase):\n\n def test_mapping(self):\n with open('./data/title-category.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.assertEqual(cat_title(row['QUESTION']), row['ANSWER'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_title.py","file_name":"test_title.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"276742587","text":"import numpy as np\n\n\nclass OnPolicyReplay:\n \n def __init__(self):\n self.data_keys = [\"states\", \"actions\", \"rewards\", \"next_states\", \"dones\"]\n self.total_experiences = 0\n self.n_episodes = 0\n self.reset()\n \n def reset(self):\n for key in self.data_keys:\n setattr(self, key, [])\n\n self.current_episode = {key: [] for key in self.data_keys}\n self.batch_size = 0\n \n def update(self, state, action, reward, next_state, done):\n transition = (state, action, reward, next_state, done)\n \n for i, key in enumerate(self.data_keys):\n self.current_episode[key].append(transition[i])\n \n if done:\n self.n_episodes += 1\n for key in self.data_keys:\n getattr(self, key).append(np.array(self.current_episode[key]))\n\n self.current_episode = {key: [] for key in self.data_keys}\n \n self.total_experiences += 1\n self.batch_size += 1\n \n def sample(self):\n batch = {key: getattr(self, key) for key in self.data_keys}\n self.reset()\n return batch\n \n \nif __name__ == \"__main__\":\n import gym\n import numpy as np\n \n env = gym.make(\"MountainCarContinuous-v0\")\n memory = OnPolicyReplay()\n\n total_experiences = 0\n n_episodes = 0\n total_rewards = []\n\n for _ in range(5):\n n_episodes += 1\n total_reward = 0.0\n obs = env.reset()\n while True:\n action = env.action_space.sample()\n next_obs, reward, done, _ = env.step(action)\n memory.update(obs, action, reward, next_obs, done)\n total_experiences += 1\n total_reward += reward\n next_obs = obs\n if done:\n total_rewards.append(total_reward)\n break\n\n assert memory.total_experiences == total_experiences\n assert memory.batch_size == total_experiences\n\n batch = memory.sample()\n\n assert all(len(batch[key]) == n_episodes for key in memory.data_keys)\n assert memory.total_experiences == total_experiences\n assert memory.batch_size == 0\n","sub_path":"examples/utils/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"309369456","text":"class simpulbiner(object):\r\n def __init__(self, data):\r\n self.data=data\r\n self.kiri=None\r\n self.kanan=None\r\n\r\n def __str__(self):\r\n return str(self.data)\r\n\r\nA=simpulbiner('Ambarawa')\r\nB=simpulbiner('Bantul')\r\nC=simpulbiner('Cimahi')\r\nD=simpulbiner('Denpasar')\r\nE=simpulbiner('Enrekang')\r\nF=simpulbiner('Flores')\r\nG=simpulbiner('Garut')\r\nH=simpulbiner('Halmahera Timur')\r\nI=simpulbiner('Indramayu')\r\nJ=simpulbiner('Jakarta')\r\n\r\nA.kiri=B; A.kanan=C\r\nB.kiri=D; B.kanan=E\r\nC.kiri=F; C.kanan=G\r\nE.kiri=H\r\nG.kanan=I\r\n\r\ndatalist=[A.data, B.data, C.data, D.data, E.data, F.data, G.data, H.data, I.data, J.data]\r\nlevel=[]\r\n\r\ndef preord(sub):\r\n if sub is not None:\r\n print(sub.data)\r\n preord(sub.kiri)\r\n preord(sub.kanan)\r\ndef inord(sub):\r\n if sub is not None:\r\n inord(sub.kiri)\r\n print(sub.data)\r\n inord(sub.kanan)\r\n\r\ndef postord(sub):\r\n if sub is not None:\r\n postord(sub.kiri)\r\n postord(sub.kanan)\r\n print(sub.data)\r\n\r\ndef size(node):\r\n if node is None: \r\n return 0\r\n else: \r\n return (size(node.kiri)+ 1 + size(node.kanan)) \r\n\r\ndef maxDepth(node): \r\n if node is None: \r\n return 0 ; \r\n \r\n else : \r\n lDepth = maxDepth(node.kiri) \r\n rDepth = maxDepth(node.kanan) \r\n \r\n if (lDepth > rDepth): \r\n return lDepth+1\r\n else: \r\n return rDepth+1\r\n\r\n\r\ndef traverse(root):\r\n lvlist=[]\r\n current_level = [root]\r\n lv=0\r\n while current_level:\r\n #print(' '.join(str(node) for node in current_level))\r\n next_level = list()\r\n for n in current_level:\r\n if n.kiri:\r\n next_level.append(n.kiri)\r\n level.append(lv+1)\r\n if n.kanan:\r\n next_level.append(n.kanan)\r\n level.append(lv+1)\r\n current_level = next_level\r\n \r\n lv+=1\r\n lvlist.append(lv)\r\n return lvlist\r\n \r\ndef cetakdatadanlevel(root):\r\n traverse(A)\r\n print(root.data, ', Level 0')\r\n for i in range(len(level)):\r\n print(datalist[i+1], ', Level', level[i])\r\n \r\n\r\nprint('Ukuran dari Binary Tree adalah', size(A))\r\nprint('')\r\nprint('Tinggi maksimal dari Binary Tree adalah', maxDepth(A))\r\nprint('')\r\ncetakdatadanlevel(A)\r\n\r\n","sub_path":"Modul-9/modul9.py","file_name":"modul9.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"240517945","text":"'''\n@Author: Rashmi\n@Date: 2021-09-24 22:30\n@Last Modified by: Rashmi\n@Last Modified time: 2021-09-24 22:40\n@Title :Write a Python program to create a histogram from a given list of integers.\n'''\n \ndef histogram(items):\n\n '''Description : to take values and print the histogram according to values'''\n for number in items: # each element of item input\n output = ''\n times = number\n while( times > 0 ):\n output += '*' #add * for every number\n times = times - 1 # decrement the number \n print(output)\n\nif __name__ == '__main__':\n histogram([1,2,4,5,1]) ","sub_path":"DataStructures/Basic Python/Histogram.py","file_name":"Histogram.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"346746432","text":"#!C:\\Program Files (x86)\\Microsoft Visual Studio\\Shared\\Python37_64\\python.exe\nimport os\nimport sys\nimport json\nimport modify\n# Read request content\ncontent_length = int(os.environ[\"CONTENT_LENGTH\"])\nrequest_body = sys.stdin.read(content_length)\njson_data = json.loads(request_body)\n\n# Headers\nSLIDESHOW_HEADERS = [\"WorkerID\", \"filePath\", \"onset_call\", \"requested_onset\",\n \"rt \", \"fullscreen\"]\n\nEVENT_MARKING_HEADERS = [\"WorkerID\", \"trial\", \"marker_id\", \"frame\"]\n\n# Check if parameters have been supplied\nif 'turkID' in json_data:\n if 'data_type' in json_data:\n if 'demographics' in json_data:\n pass\n\n elif 'slideshow' in json_data['data_type']:\n f = open('%s_%s.txt' %\n (json_data['turkID'], json_data['data_type']), 'w')\n if json_data['data_type'] == \"slideshow_1\":\n f.write(\" \\t\".join(SLIDESHOW_HEADERS) + \"\\n\")\n else:\n f.write(\" \\t\".join(SLIDESHOW_HEADERS) + \"\\n\")\n for row in json_data['data_content']:\n f.write(\"\\t\".join([str(row[str(c).rstrip()])\n for c in SLIDESHOW_HEADERS[:6]]) + \"\\n\")\n\n f.close()\n\n result = {'success': 'true',\n 'message': 'The command completed successfully', 'json': json_data}\n\n elif 'recollection' in json_data['data_type']:\n f = open('%s_%s.txt' %\n (json_data['turkID'], json_data['data_type']), 'w')\n f.write(str(json_data['data_content']))\n f.close()\n\n result = {'success': 'true',\n 'message': 'The command completed successfully', 'json': json_data}\n\n elif 'event_marking' in json_data['data_type']:\n f = open('%s_%s.txt' %\n (json_data['turkID'], json_data['data_type']), 'w')\n\n f.write(\" \\t\".join(EVENT_MARKING_HEADERS) + \"\\n\")\n for row in json_data['data_content']:\n f.write(\" \\t\".join([str(row[str(c).rstrip()])\n for c in EVENT_MARKING_HEADERS]) + \"\\n\")\n f.close()\n # modify.updateReady(json_data['turkID'])\n result = {'success': 'true',\n 'message': 'The command completed successfully', 'json': json_data}\n\n else:\n result = {'success': 'false',\n 'message': 'Invalid data type', 'json': json_data}\n else:\n result = {'success': 'false',\n 'message': 'No data type detected', 'json': json_data}\nelse:\n result = {'success': 'false',\n 'message': 'Invalid mTurk ID', 'json': json_data}\n\n# print('Content-type: text/plain; charset=UTF-8\\n\\n')\n\nprint('Content-type: application/json; charset=UTF-8\\n\\n')\nprint(json.dumps(result))\n","sub_path":"event_marking/cgi/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"331513628","text":"import requests, sys, os, time, json, warnings, argparse, sys\n\nimport pytz\n\nfrom datetime import datetime\nimport pathos.pools as pp \nfrom crawler_proto import CrawlerProto, ProfileData\n\n#querycounter = 0\n\nclass InstagramCrawler(CrawlerProto):\n\n\tdef __init__(self, results_directory):\n\t\tself.results_directory = results_directory + '/instagram/'\n\n\tdef query(self, query_data):\n\t\t\"\"\"\n\n\t\tArgs:\n\t\t\ttarget: Username to be queried\n\t\tReturns:\n\t\t\tRaw data returned by the crawl of a profile.\n\n\t\t\"\"\"\n\t\ttarget = query_data[1]\n\t\tsince = datetime.strptime(query_data[2], '%y-%m-%d %H:%M:%S')\n\t\tuntil = datetime.strptime(query_data[3], '%y-%m-%d %H:%M:%S')\n\n\t\t#global querycounter\n\t\t#querycounter += 1\n\n\t\trequestcounter = 0\n\t\tfailedcounter = 0\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\trequestcounter += 1\n\t\t\t\tprofile_data = requests.get('https://instagram.com/{target}/?__a=1'.format(target=target), stream=True)\n\t\t\t\t#print('https://instagram.com/{target}/?__a=1'.format(target=target))\n\t\t\t\tbreak\n\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\tfailedcounter += 1\n\t\t\t\ttime.sleep(0.1)\n\n\t\tif not (profile_data.status_code is requests.codes.ok):\n\t\t\traise ValueError('Profile {target} was not found on Instagram.'.format(target=target))\n\t\tprofile_json = profile_data.json()\n\n\t\tif profile_json['user']['is_private']:\n\t\t\traise ValueError(\"{target} is a private user, whose posts cannot be read.\".format(target=target))\n\n\t\tfollowerCount = profile_json['user']['followed_by']['count']\n\n\t\tpostList = []\n\n\t\tfor post in profile_json['user']['media']['nodes']:\n\t\t\tpost_date = datetime.fromtimestamp(post['date'])\n\t\t\tif post_date > until:\n\t\t\t\tcontinue\n\t\t\telif post_date < since:\n\t\t\t\tbreak\n\t\t\tpostList.append(post)\n\n\t\thas_next_page = profile_json['user']['media']['page_info']['has_next_page']\n\t\tid_next_page = profile_json['user']['media']['page_info']['end_cursor']\n\n\t\twhile True:\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\trequestcounter += 1\n\t\t\t\t\tnew_data = requests.get('https://instagram.com/{target}/?__a=1&max_id={id_next_page}'.format(target=target, id_next_page=id_next_page), stream=True)\n\t\t\t\t\t#print('https://instagram.com/{target}/?__a=1&max_id={id_next_page}'.format(target=target, id_next_page=id_next_page))\n\t\t\t\t\tbreak\n\t\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\t\tfailedcounter += 1\n\t\t\t\t\ttime.sleep(0.1)\n\n\t\t\tif not (new_data.status_code is requests.codes.ok):\n\t\t\t\tbreak\n\t\t\tnew_json = new_data.json()\n\t\t\thas_next_page = new_json['user']['media']['page_info']['has_next_page']\n\t\t\tid_next_page = new_json['user']['media']['page_info']['end_cursor']\n\n\t\t\tif not has_next_page:\n\t\t\t\tbreak\n\n\t\t\tfor post in new_json['user']['media']['nodes']:\n\t\t\t\tpost_date = datetime.fromtimestamp(post['date'])\n\t\t\t\tif post_date > until:\n\t\t\t\t\tcontinue\n\t\t\t\telif post_date < since:\n\t\t\t\t\tbreak\n\t\t\t\tpostList.append(post)\n\n\t\t#print(\"Query #\" + str(querycounter) + \" had \" + str(requestcounter) + \" requests.\")\n\t\t#print(str(requestcounter), str(failedcounter))\n\n\t\treturn [query_data, followerCount, postList]\n\n\n\tdef format(self, raw_data):\n\t\t\"\"\"\n\n\t\tArgs:\n\t\t\traw_data: Raw data returned by the crawl of a profile.\n\t\tReturns:\n\t\t\tProfileData tuple of the formatted profile data.\n\n\t\t\"\"\"\n\t\ttarget = raw_data[0]\n\t\tfollowerCount = raw_data[1]\n\t\tpostList = raw_data[2]\n\n\t\treturn ProfileData(Artist_Name=target[0], Artist_Login=target[1], File_create_datetime=str(datetime.now()), Follower_Count=followerCount, Posts=postList)\n\n","sub_path":"crawlers/instagram_crawler.py","file_name":"instagram_crawler.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"341764220","text":"class Student:\n schoolname =\"KV\" # this is a static variable ,class level variable\n\n def __init__(self):\n Student.name = 'sunjay' #these are instance variable (object level variable)\n #self.rno =rno\n @classmethod\n def m1(cls):\n cls.d =40\n\ns=Student()\ns.m1()\nprint(Student.__dict__) #print class level objects","sub_path":"Tutorial/OOP/staticvariable.py","file_name":"staticvariable.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"267935308","text":"#FEM code for 1D 2nd order steady state equation\n#Marcel Frehner, ETH Zurich, 2017\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef kappa_loc(el,el_tot, kappa_0):\n x = el/el_tot\n\n kappa = kappa_0 * (x-0.5)**2.0+0.5**2.0 \n\n return kappa_0\n\ndef source_loc(el,el_tot, source_0):\n\n return source_0\n\n\ndef T_analytical(t, Tmax, sigma, kappa,x):\n\n T = ((Tmax)/(np.sqrt(1+4*t*kappa/(sigma**2.0))))*np.exp((-x**2.0)/(sigma**2.0+4*t*kappa))\n\n\n return T\n\n#Shape functions and their derivatives\ndef N(xsi,eta,i):\n if i == 1:\n return 0.25*(1-xsi)*(1-eta)\n elif i == 2:\n return 0.25*(1-xsi)*(1+eta)\n elif i == 3:\n return 0.25*(1+xsi)*(1+eta)\n elif i == 4:\n return 0.25*(1+xsi)*(1-eta)\n else:\n raise ValueError('Variable i must be 1 or 2')\n\ndef dNdxsi(xsi,eta,i):\n if i == 1:\n return -0.25*(1-eta)\n elif i == 2:\n return -0.25*(1+eta)\n elif i == 3:\n return 0.25*(1+eta)\n elif i == 4:\n return 0.25*(1-eta)\n else:\n raise ValueError('Variable i must be 1 or 2')\n\ndef dNdeta(xsi,eta,i):\n if i == 1:\n return -0.25*(1-xsi)\n elif i == 2:\n return 0.25*(1-xsi)\n elif i == 3:\n return 0.25*(1+xsi)\n elif i == 4:\n return -0.25*(1+xsi)\n else:\n raise ValueError('Variable i must be 1 or 2')\n\ndef create_gnum(n_el_x, n_el_y):\n n_el = n_el_x*n_el_y \n gnum = np.empty((4,0),dtype=int)\n\n num = 1\n for i in range(0,n_el):\n if num % (n_el_x+1) == 0: \n num = num + 1\n col = np.array([num, num+n_el_x+1, num+n_el_x+2, num+1]).reshape(4,1)\n gnum = np.append(gnum, col, axis=1)\n num = num + 1\n \n return gnum-1\n\ndef create_nf(n_nodes,dof):\n#Relationship between nodes and equation numbers (dof is degree of freedom at each node)\n nf = np.empty((dof,0))\n num = 1\n for i in range(0,n_nodes):\n col = np.array([[num,num+1]]).reshape(2,1)\n nf = np.append(nf,col,axis=1)\n num = num+2\n\n return nf-1\n\ndef create_gg (gnum, nf):\n n_lines = len(gnum[:,0])*len(nf[:,0])\n gg = np.empty((n_lines,0))\n\n for i in range(0,len(gnum[0,:])):\n col = np.empty((0,1))\n idxs = gnum[:,i] \n for idxs_i in idxs:\n col = np.append(col,nf[:,idxs_i].reshape((dof,1)),axis=0)\n\n gg = np.append(gg,col,axis=1)\n\n return gg\n\n\n\n\n\n#GENERAL STUFF\n\n#GEOMETRICAL PARAMETERS\nxmin = 0\nLx = 20 #length of model in direction x [m]\nLy = 20 #length of model in direction y\n \n#PHYSICAL PARAMETERS\n#kappa\t=\t1.0 # thermal diffusivity [m2/s]\n#source\t=\t1.0 # heat source [K/s]\n \n#NUMERICAL PARAMETERS\n\ndim = 2 #dimension\ndof = 2 #degree of freedom at nodes\nn_el_x = 3#number of elements in a row\nn_el_y = 3 #number of elements in a column\n\nel_tot = n_el_x * n_el_y #total number of elements #elements total\nn_nodes_x = n_el_x + 1 #number of nodes in a row\nn_nodes_y = n_el_y + 1 #number of nodes in a column\nn_per_el\t=\t4 #nodes per element\n\nn_nodes_tot = n_nodes_x*n_nodes_y\n\n\n\n\n'''\n \n#CREATE NUMERICAL GRID\nGCOORD = np.empty((0,n_nodes_tot))\n\nxv,yv = np.meshgrid(np.linspace(0,Lx,n_nodes_x),np.linspace(0,Ly,n_nodes_y))\nxv = np.ndarray.flatten(xv).reshape((1,n_nodes_tot))\nyv = np.ndarray.flatten(yv).reshape((1,n_nodes_tot))\n\nGCOORD = np.append(GCOORD,xv,axis=0)\nGCOORD = np.append(GCOORD,yv,axis=0)\n\n#time paramaters\ndt = 0.5\nt_final = 50\n\t\n#LOCAL-TO-GLOBAL MAPPING\ng_num = create_gnum(n_el_x=n_el_x,n_el_y=n_el_y) #relates local to global node numbers per element\n\n\t\n#BOUNDARY CONDITIONS\nbc_dof = np.ndarray.flatten(np.array([np.linspace(0,n_el_x,n_nodes_x),np.linspace(n_nodes_tot-1-n_el_x,n_nodes_tot-1,n_nodes_x)], dtype=int)) #dof's to which Dirichlet bc's are assigned\nbc_val = 100.0 # value for these dof's\n \n#INITIALIZATION OF ALL KINDS OF STUFF\nLG\t=\tnp.zeros((n_nodes_tot,n_nodes_tot)) #global stiffness matrix\nRG = np.zeros((n_nodes_tot,n_nodes_tot)) #global right hand side matrix\nFG\t=\tnp.zeros((n_nodes_tot,1)) #global force vector\nT = np.zeros((n_nodes_tot,1)) #temperature vector\n\n\n#Integration definition of variables\nn_ip = 4 #Number of integration points\nxsi = np.array([-np.sqrt(1.0/3), np.sqrt(1.0/3)])\neta = np.array([-np.sqrt(1.0/3),np.sqrt(1.0/3)])\n\nweight = np.array([1.0, 1.0])\n\n\n\n############INTEGRATION##################\nfor iel in range(0,el_tot): # ELEMENT LOOP\n\n kappa = kappa_loc(iel,el_tot,1.0)\n source = source_loc(iel,el_tot,0.0)\n\n D = np.array([[kappa,0],[0,kappa]])\n\n \n \n #Initialize the M,K,F matrices\n Mloc = np.zeros((n_per_el,n_per_el))\n Kloc = np.zeros((n_per_el,n_per_el))\n Floc = np.zeros((n_per_el,1))\n\n \n #Start a loop over all integration points\n for i, xsi_i in enumerate(xsi):\n for j, eta_i in enumerate(eta):\n N_l = np.array([N(xsi_i,eta_i,1),N(xsi_i,eta_i,2),N(xsi_i,eta_i,3),N(xsi_i,eta_i,4)]).reshape(1,4)\n\n dN_l = np.empty((0,4))\n\n dNdxsi_l = np.array([dNdxsi(xsi_i,eta_i,1),dNdxsi(xsi_i,eta_i,2),dNdxsi(xsi_i,eta_i,3),dNdxsi(xsi_i,eta_i,4)]).reshape(1,4)# derivative of local shape function at this integration point\n dNdeta_l = np.array([dNdeta(xsi_i,eta_i,1),dNdeta(xsi_i,eta_i,2),dNdeta(xsi_i,eta_i,3),dNdeta(xsi_i,eta_i,4)]).reshape(1,4)\n\n dN_l = np.append(dN_l,dNdxsi_l,axis=0)\n dN_l = np.append(dN_l,dNdeta_l,axis=0)\n\n #calculate the Jacobian with equation 5.23 in the script\n n_now = g_num[:,iel] #which nodes are in the current element?\n gcoord_nodes = GCOORD[:,n_now].transpose() #what are the corresponding x and y coordinates of the nodes? transpose to have same form as in script (4x2)\n\n jac = np.matmul(dN_l,gcoord_nodes) #Calculate the Jacobian matrix\n\n dN_g = np.matmul(np.linalg.inv(jac),dN_l) #Convert the derivatives from the local coordinates to the global coordinates\n\n det_jac = np.linalg.det(jac) #calculate the determinate of the jacobian\n\n #perform vector multiplication involving shape functions or derivatives (evaluated at integration points), multiplied by weight and by the det(J)\n #sum with the previous multiplication\n Mloc = Mloc + np.matmul(N_l.transpose(),N_l)*weight[i]*weight[j]*det_jac\n Kloc = Kloc + np.matmul(dN_g.transpose(),np.matmul(D,dN_g))*weight[i]*weight[j]*det_jac\n Floc = Floc + source*N_l.transpose()*weight[i]*weight[j]*det_jac\n\n\n\n\n \n #########################################\n \n Lloc = Mloc/dt + Kloc\n Rloc = Mloc/dt \n\n n_now = g_num[:,iel] #which nodes are in the current element?\n\n LG[np.ix_(n_now,n_now)] = LG[np.ix_(n_now,n_now)] + Lloc \n RG[np.ix_(n_now,n_now)] = RG[np.ix_(n_now,n_now)] + Rloc \n FG[np.ix_(n_now)] = FG[np.ix_(n_now)]+Floc\n\n\n\ntime = 0.0 \n\nwhile time < t_final: #main time step loop starts\n\n#APPLY BOUNDARY CONDITIONS\n\n\n b = np.matmul(RG,T)+FG #forming the right hand side vector b\n\n \n for j,i in enumerate(bc_dof): #boundary conditions for fixed temperature, for Neumann: comment this block as it is the default bc-condition by construction \n LG[i, : ] = 0.0\n LG[i,i] = 1.0\n b[i]= bc_val\n #SOLVER\n\n T = np.linalg.solve(LG,b) #new temperature\n\n time = time + dt\n\n #PLOTTING\n\n #plt.plot(x_ana, T_ana, '-', color='red')\n\n\nxv = xv.reshape(n_nodes_x,n_nodes_y)\nyv = yv.reshape(n_nodes_x,n_nodes_y)\nT = T.reshape(n_nodes_x,n_nodes_y)\n\nprint(xv)\nprint(yv)\nplt.figure()\nplt.contourf(xv,yv,T)\nplt.colorbar()\nplt.show()\n'''\n\n\n \n","sub_path":"fem_elasticity.py","file_name":"fem_elasticity.py","file_ext":"py","file_size_in_byte":7654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"627883126","text":"import urllib.request\nimport os\nimport json\nimport pprint\nfrom bs4 import BeautifulSoup\n\n#self-created modules below\nimport lib.moveMatcher as moveMatcher\n\n#======================================================\n#=======Should probably put this stuff in a json file=====\n#======================================================\ncharaImageLinks = open(\"lib/characterImageList.txt\", 'r')\ncharaImageLines = charaImageLinks.read().splitlines()\n\ncharaDataFile = open(\"lib/characterList.txt\", 'r')\ncharaLines = charaDataFile.read().splitlines()\n\n#declare dictionaries\ncharLocalPageDict = dict()\ncharFullUrlDict = dict()\ncharImgurDict = dict()\n\n#Create Dictionaries for referring to locally stored webpages\nfor lines in charaLines:\n charVar = lines.split(\",\")\n charLocalPageDict[charVar[0]] = charVar[0] + '.html'\n charFullUrlDict[charVar[0]] = charVar[1]\n\n#Create dictionary for imgur urls of character images, used in embed thumbnails\nfor lines in charaImageLines:\n charImg = lines.split(\",\")\n charImgurDict[charImg[0]] = charImg[1]\n#======================================================\n#====END Should probably put this stuff in a module====\n#======================================================\ndef does_char_exist(user_Chara_Name):\n doesCharacterExist = 0\n\n for characterName in charLocalPageDict:\n if user_Chara_Name.lower() == characterName:\n print(\"\\n======================\")\n print(\"Chara Found: \" + user_Chara_Name)\n doesCharacterExist = 1\n break\n\n return doesCharacterExist\n\ndef charJsonMassConverter():\n for game_character in charLocalPageDict:\n print (game_character)\n charUrl = game_character + '.html'\n get_charJson(charUrl)\n\n\ndef get_charJson(chara_Name):\n dirStr = os.getcwd()\n charFilePath = 'file:///' + dirStr + '/webpages/' + chara_Name\n name = chara_Name.replace(\".html\", \"\")\n jsonFilePath = dirStr + '/json/' + name + '.json'\n\n try:\n if os.path.isfile(jsonFilePath): #if path exists\n file = open(jsonFilePath, 'r')\n content = file.read()\n jsonconvert = json.loads(content)\n else:\n charSpecific = urllib.request.urlopen(charFilePath).read()\n charPageSoup = BeautifulSoup(charSpecific, \"html.parser\")\n moveAttribute_List_of_Dicts = []\n\n for table_row in charPageSoup.select(\"table tr\"):\n col = table_row.find_all('td')\n\n addmove = {\n \"Command\": col[0].text,\n \"Hit level\": col[1].text,\n \"Damage\": col[2].text,\n \"Start up frame\": col[3].text,\n \"Block frame\": col[4].text,\n \"Hit frame\": col[5].text,\n \"Counter hit frame\": col[6].text,\n \"Notes\": col[7].text\n }\n\n if addmove[\"Command\"] == \"Command\":\n continue\n\n for key in addmove:\n if addmove[key] == \"\":\n addmove[key] = \"-\"\n\n moveAttribute_List_of_Dicts.append(addmove)\n \n file = open(jsonFilePath, 'w')\n json.dump(moveAttribute_List_of_Dicts, file, indent=4)\n\n #Probably have better way of doing\n file = open(jsonFilePath, 'r')\n content = file.read()\n jsonconvert = json.loads(content)\n except IOError as e:\n print(e)\n \n return jsonconvert\n\ndef get_Move_Details(chara_Name, chara_Move, is_case_sensitive):\n charMoves_json = get_charJson(charLocalPageDict[chara_Name])\n\n move_Attribute_Dict = moveMatcher.move_Compare_Main(chara_Move, charMoves_json, is_case_sensitive, chara_Name)\n\n if not move_Attribute_Dict:\n print('MOVE NOT FOUND: ' + chara_Move)\n print(\"======================\")\n return move_Attribute_Dict\n\ndef get_Similar_Moves(chara_Name, chara_Move):\n charMoves_json = get_charJson(charLocalPageDict[chara_Name])\n\n similar_Moves_List = moveMatcher.move_Compare_Similar(chara_Move, charMoves_json)\n return similar_Moves_List\n\ndef get_Misc_Chara_Details(chara_Name):\n #misc character details used in embed display\n chara_WebUrl = charFullUrlDict[chara_Name]\n chara_ImgurPortrait = charImgurDict[chara_Name]\n\n misc_chara_details_dict = {'char_url' : chara_WebUrl,\n 'char_imgur' : chara_ImgurPortrait\n }\n\n return misc_chara_details_dict","sub_path":"lib/tekkenFinder.py","file_name":"tekkenFinder.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"336505336","text":"from django.db import models\nfrom address.models import AddressField\n#from event_manager.models import Event\nfrom racelog.storage_backends import PrivateMediaStorage\nfrom django.contrib.auth.models import User\n\nclass RacemanProfile(models.Model):\n types = (('of','Race Official'),\n ('dr','Driver'))\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n licence_no = models.CharField(primary_key=True,max_length=100)\n licence_expiry = models.DateField()\n address = models.CharField(max_length=500)\n photo = models.ImageField(storage=PrivateMediaStorage())\n role = models.CharField(max_length=2,choices=types)\n\n def __str__(self):\n return '{0} - {1} {2}'.format(self.licence_no,self.user.first_name,self.user.last_name)\n\n\n","sub_path":"driver_manager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"137431528","text":"import os\nimport re\nimport sys\nimport shutil\nimport glob\nfrom distutils.dir_util import copy_tree\n\ndef create_var_file(path,var_list):\n with open(path+'/inpt/variables.dat', mode='w') as f:\n n=var_list[4]\n f.write(str(n)+'\\n')\n if var_list[2] == 'number_density':\n rate = (var_list[3]+100)/100\n f.write(str(rate)+'D0\\n')\n f.write('1.D0\\n')\n elif var_list[2] == 'packing_fraction':\n rate = (var_list[3]+100)/100\n f.write('1.D0\\n')\n f.write(str(rate)+'D0\\n')\n else:\n print('error#1')\n sys.exit()\n \n with open(path+'/inpt/variables2.dat',mode='w')as f:\n f.write('TEMP={}\\n'.format(var_list[0]))\n f.write('DENS={}\\n'.format(var_list[1]))\n\ndef adjust_arfor(path, var_list):\n with open(path+'/inpt/ar.for',mode='r') as f:\n data_lines = f.read()\n data_lines = re.sub(r'np=\\d+',r'np='+var_list[4],data_lines)\n with open(path+'/inpt/ar.for',mode='w') as f:\n f.write(data_lines)\n\n with open(path+'/inpt/cmmn.for',mode='r') as f:\n data_lines = f.read()\n data_lines = re.sub(r'np=\\d+',r'np='+var_list[4],data_lines)\n with open(path+'/inpt/cmmn.for',mode='w') as f:\n f.write(data_lines)\n\ndef adjust_jobsh(path, var_list):\n with open(path+'/job.sh',mode='r',encoding='utf-8')as f:\n data_lines = f.read()\n # print(re.findall(r'INPUTTEMP .*',data_lines),'INPUTTEMP = '+var_list[0])\n data_lines = re.sub(r'INPUTTEMP .+','INPUTTEMP = '+var_list[0],data_lines)\n data_lines = re.sub(r'INPUTDENS .+','INPUTDENS = '+var_list[1],data_lines)\n with open(path+'/job.sh',mode='w')as f:\n f.write(data_lines)\n \ndef adjust_vfor(path, var_list):\n with open(path+'/vis/v.for', mode='r')as f:\n data = f.read()\n data = re.sub(r' temp=.+',' temp='+var_list[0],data)\n data = re.sub(r' dens=.+',' dens='+var_list[1],data)\n with open(path+'/vis/v.for', mode='w')as f:\n f.write(data)\n\n\n\n\ndef make_dir_tree(project_name, temps, variables):\n dens = {'124k': '1.1249D+3', \n '100k': '1.3110D+3', \n '90k': '1.3876D+3'}\n n=256\n sl = '/'\n path_list = []\n for temp in temps:\n for variable in variables:\n for irate in range(-10,11,5):\n rate = re.sub('-','m',str(irate))\n ftemp = re.sub('k','.D0', temp)\n path = project_name+sl+temp+sl+variable+sl+rate\n path_list.append(path)\n print(path)\n os.makedirs(path, exist_ok=True)\n copy_tree('./standard',path)\n var_list = [ftemp,dens[temp],variable,irate,str(n)]\n create_var_file(path,var_list)\n adjust_arfor(path,var_list)\n adjust_jobsh(path,var_list)\n adjust_vfor(path,var_list)\n \n return path_list\n\ndef main():\n project_name = 'kadai_500_90k'\n temps = ['90k']\n variables = ['number_density', 'packing_fraction']\n path_list = make_dir_tree(project_name, temps,variables)\n \n\n\n\n\nif __name__ == '__main__':\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n main()","sub_path":"create_project.py","file_name":"create_project.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"483986432","text":"'''\n5. Property Tax\n A county collects property taxes on the assessment value of property, which is 60 percent\n of the property’s actual value. If an acre of land is valued at $10,000, its assessment value\n is $6,000. The property tax is then $0.64 for each $100 of the assessment value. The tax\n for the acre assessed at $6,000 will be $38.40. Write a GUI program that displays the\n assessment value and property tax when a user enters the actual value of a property.\n'''\n\nimport tkinter as tk\n\ndef main():\n # instantiate the window\n window = tk.Tk()\n \n # Set the window title and geometry (as desired)\n window.title('Property Tax Calculator')\n # window.geometry('400x400')\n\n # define the property tax calculation function\n def calc_property_tax():\n # get the entered property value\n value_property = prop_var.get()\n # calculate the assessed value to be 60% of the property value\n value_assessed = value_property * 0.6\n # calulate the property tax to be assessed value/100 * 0.64 ($0.64 per $100 of the assessed value)\n tax_property = (value_assessed/100) * 0.64\n # build the return labels\n label_assessed = tk.Label(text=f'${value_assessed:,.2f}', justify='center')\n label_assessed.grid(row=1, column=1, padx=5, pady=5)\n label_tax = tk.Label(text=f'${tax_property:,.2f}', justify='center')\n label_tax.grid(row=1, column=2, padx=5, pady=5)\n\n # Create the Label headers\n label_head_property_val = tk.Label(text='Actual Property Value:')\n label_head_property_val.grid(row=0, column=0, padx=5, pady=15)\n label_head_assessed_val = tk.Label(text='Assessed Property Value:')\n label_head_assessed_val.grid(row=0, column=1, padx=5, pady=15)\n label_head_property_tax = tk.Label(text='Property Tax:')\n label_head_property_tax.grid(row=0, column=2, padx=5, pady=15)\n\n # create the property value variable and entry fields\n prop_var = tk.IntVar()\n entry_property_val = tk.Entry(textvariable=prop_var, justify='center')\n entry_property_val.grid(row=1, column=0, padx=5, pady=5)\n\n # create the button to call the calc_property_tax function when clicked\n button_calc = tk.Button(text='Calculate Propery Tax', command=calc_property_tax)\n button_calc.grid(row=2, column=1, pady=15)\n\n # call the window mainloop\n window.mainloop()\n# Call the main function\nmain()","sub_path":"studentCode/GUI/gui_property_tax.py","file_name":"gui_property_tax.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"206029896","text":"#!/usr/bin/env python\n\nfrom __future__ import division\nimport argparse\nimport pickle\n\n\n\"\"\"\n===============================================================================\n\tPlease complete the following function.\n===============================================================================\n\"\"\"\n\ndef solve(P, M, N, C, items, constraints, mode, input_file):\n\t\"\"\"\n\tWrite your amazing algorithm here.\n\n\tReturn: a list of strings, corresponding to item names.\n\t\"\"\"\n\t#P = pounds\n\t#M = money\n\t#N = num items\n\t#C = num constraints\n\tinput_file_digits = [s for s in list(input_file) if s.isdigit()]\n\tfile_num = \"\".join(input_file_digits)\n\n\t#all items are now sorted by value density, and all useless items have been removed\n\titems = [item for item in items if item[4]-item[3] > 0 and item[2] < P and item[3] < M]\n\t\n\n\tif mode == \"full_solve\":\n\t\tadjacency_matrixer(P, M, N, C, items, constraints, file_num)\n\t\theuristic_num_constraints(P, M, N, C, items, constraints, file_num)\n\t\treturn heuristic_average_value_density(P, M, N, C, items, constraints, file_num)\n\telif mode == \"choose_classes\":\n\t\theuristic_num_constraints(P, M, N, C, items, constraints, file_num)\n\t\treturn heuristic_average_value_density(P, M, N, C, items, constraints, file_num)\n\telse:\n\t\t'''\n\t\tmatrix_file = \"matrices/matrix\" + file_num + \".p\"\n\t\tadjacency_matrix = pickle.load( open( matrix_file, \"rb\" ) )\n\n\t\tclasses = make_list_of_classes(items, file_num, adjacency_matrix)\n\n\t\tclass_values_profit_density = class_heuristic_average_profit_density(classes)\n\n\t\tchosen_classes = choose_classes(adjacency_matrix, class_values_profit_density)\n\n\t\treturn choose_items_greedy(P, M, N, C, items, chosen_classes, file_num)\n\t\t'''\n\t\treturn heuristic_decider(P, M, N, C, items, file_num)\n\ndef heuristic_decider(P, M, N, C, items, file_num):\n\titems = [item for item in items if item[4]-item[3] > 0 and item[2] < P and item[3] < M]\n\n\tmatrix_file = \"matrices/matrix\" + file_num + \".p\"\n\tadjacency_matrix = pickle.load( open( matrix_file, \"rb\" ) )\n\n\tclasses = make_list_of_classes(items, file_num, adjacency_matrix)\n\n\ttot_prof_sort, avg_prof_sort, tot_prof_density_sort, avg_prof_density_sort, tot_perc_sort, avg_perc_sort = god_class_sorting_heuristic(classes)\n\n\tleast_constraints_file = \"picked_classes/picked_classes\" + file_num + \".p\"\n\tleast_constraints_classes = pickle.load( open( least_constraints_file, \"rb\" ) )\n\n\titems_by_pd = sorted(items, key=lambda x: (x[2]/(x[4]-x[3])) )\n\titems_by_p = sorted(items, key=lambda x: (x[4]-x[3]), reverse=True )\n\t#items_by_percent_profit\n\titems_by_pp = sorted(items, key=lambda x: (x[4]/(x[3]+0.000001)), reverse=True )\n\n\n\n\ttot_prof_classes = choose_classes(adjacency_matrix, tot_prof_sort)\n\tavg_prof_classes = choose_classes(adjacency_matrix, avg_prof_sort)\n\ttot_prof_density_classes = choose_classes(adjacency_matrix, tot_prof_density_sort)\n\tavg_prof_density_classes = choose_classes(adjacency_matrix, avg_prof_density_sort)\n\ttot_perc_classes = choose_classes(adjacency_matrix, tot_perc_sort)\n\tavg_perc_classes = choose_classes(adjacency_matrix, avg_perc_sort)\n\n\n\t#choose items by profit density\n\ttot_prof_items1, tot_prof_money1 = choose_items_greedy(P, M, N, C, items_by_pd, tot_prof_classes, file_num)\n\tavg_prof_items1, avg_prof_money1 = choose_items_greedy(P, M, N, C, items_by_pd, avg_prof_classes, file_num)\n\ttot_prof_density_items1, tot_prof_density_money1 = choose_items_greedy(P, M, N, C, items_by_pd, tot_prof_density_classes, file_num)\n\tavg_prof_density_items1, avg_prof_density_money1 = choose_items_greedy(P, M, N, C, items_by_pd, avg_prof_density_classes, file_num)\n\ttot_perc_items1, tot_perc_money1 = choose_items_greedy(P, M, N, C, items_by_pd, tot_perc_classes, file_num)\n\tavg_perc_items1, avg_perc_money1 = choose_items_greedy(P, M, N, C, items_by_pd, avg_perc_classes, file_num)\n\n\t#choose items by total profit\n\ttot_prof_items2, tot_prof_money2 = choose_items_greedy(P, M, N, C, items_by_p, tot_prof_classes, file_num)\n\tavg_prof_items2, avg_prof_money2 = choose_items_greedy(P, M, N, C, items_by_p, avg_prof_classes, file_num)\n\ttot_prof_density_items2, tot_prof_density_money2 = choose_items_greedy(P, M, N, C, items_by_p, tot_prof_density_classes, file_num)\n\tavg_prof_density_items2, avg_prof_density_money2 = choose_items_greedy(P, M, N, C, items_by_p, avg_prof_density_classes, file_num)\n\ttot_perc_items2, tot_perc_money2 = choose_items_greedy(P, M, N, C, items_by_p, tot_perc_classes, file_num)\n\tavg_perc_items2, avg_perc_money2 = choose_items_greedy(P, M, N, C, items_by_p, avg_perc_classes, file_num)\n\n\t#choose items by percent profit\n\ttot_prof_items3, tot_prof_money3 = choose_items_greedy(P, M, N, C, items_by_pp, tot_prof_classes, file_num)\n\tavg_prof_items3, avg_prof_money3 = choose_items_greedy(P, M, N, C, items_by_pp, avg_prof_classes, file_num)\n\ttot_prof_density_items3, tot_prof_density_money3 = choose_items_greedy(P, M, N, C, items_by_pp, tot_prof_density_classes, file_num)\n\tavg_prof_density_items3, avg_prof_density_money3 = choose_items_greedy(P, M, N, C, items_by_pp, avg_prof_density_classes, file_num)\n\ttot_perc_items3, tot_perc_money3 = choose_items_greedy(P, M, N, C, items_by_pp, tot_perc_classes, file_num)\n\tavg_perc_items3, avg_perc_money3 = choose_items_greedy(P, M, N, C, items_by_pp, avg_perc_classes, file_num)\n\n\n\n\n\tleast_constraints_items1, least_constraints_money1 = choose_items_greedy(P, M, N, C, items_by_pd, least_constraints_classes, file_num)\n\n\tleast_constraints_items2, least_constraints_money2 = choose_items_greedy(P, M, N, C, items_by_p, least_constraints_classes, file_num)\n\n\tleast_constraints_items3, least_constraints_money3 = choose_items_greedy(P, M, N, C, items_by_pp, least_constraints_classes, file_num)\n\n\twinner = max(tot_prof_money1,\n\t\ttot_prof_money2,\n\t\ttot_prof_money3,\n\t\tavg_prof_money1,\n\t\tavg_prof_money2,\n\t\tavg_prof_money3,\n\t\ttot_prof_density_money1,\n\t\ttot_prof_density_money2,\n\t\ttot_prof_density_money3, \n\t\tavg_prof_density_money1,\n\t\tavg_prof_density_money2,\n\t\tavg_prof_density_money3, \n\t\ttot_perc_money1,\n\t\ttot_perc_money2,\n\t\ttot_perc_money3,\n\t\tavg_perc_money1,\n\t\tavg_perc_money2,\n\t\tavg_perc_money3,\n\t\tleast_constraints_money1,\n\t\tleast_constraints_money2,\n\t\tleast_constraints_money3)\n\n\tif winner == tot_prof_money1:\n\t\tprint(\"Problem \" + file_num + \": tot_prof classes prof_density items with \" + str(tot_prof_money1))\n\t\treturn tot_prof_items1\n\telif winner == avg_prof_money1:\n\t\tprint(\"Problem \" + file_num + \": avg_prof classes prof_density items with \" + str(avg_prof_money1))\n\t\treturn avg_prof_items1\n\telif winner == tot_prof_density_money1:\n\t\tprint(\"Problem \" + file_num + \" tot_prof_density classes prof_density items with \" + str(tot_prof_density_money1))\n\t\treturn tot_prof_density_items1\n\telif winner == avg_prof_density_money1:\n\t\tprint(\"Problem \" + file_num + \" avg_prof_density classes prof_density items with \" + str(avg_prof_density_money1))\n\t\treturn avg_prof_density_items1\n\telif winner == least_constraints_money1:\n\t\tprint(\"Problem \" + file_num + \" least_constraints classes prof_density items with \" + str(least_constraints_money1))\n\t\treturn least_constraints_items1\n\telif winner == tot_perc_money1:\n\t\tprint(\"Problem \" + file_num + \" tot_perc classes prof_density items with \" + str(tot_perc_money1))\n\t\treturn tot_perc_items1\n\telif winner == avg_perc_money1:\n\t\tprint(\"Problem \" + file_num + \" avg_perc classes prof_density items with \" + str(avg_perc_money1))\n\t\treturn avg_perc_items1\n\t#TOTAL PROFIT WINNERS START HERE\n\telif winner == tot_prof_money2:\n\t\tprint(\"Problem \" + file_num + \" tot_prof classes tot_prof items with \" + str(tot_prof_money2))\n\t\treturn tot_prof_items2\n\telif winner == avg_prof_money2:\n\t\tprint(\"Problem \" + file_num + \" avg_prof classes tot_prof items with \" + str(avg_prof_money2))\n\t\treturn avg_prof_items2\n\telif winner == tot_prof_density_money2:\n\t\tprint(\"Problem \" + file_num + \" tot_prof_density classes tot_prof items with \" + str(tot_prof_density_money2))\n\t\treturn tot_prof_density_items2\n\telif winner == avg_prof_density_money2:\n\t\tprint(\"Problem \" + file_num + \" avg_prof_density classes tot_prof items with \" + str(avg_prof_density_money2))\n\t\treturn avg_prof_density_items2\n\telif winner == tot_perc_money2:\n\t\tprint(\"Problem \" + file_num + \" tot_perc classes tot_prof items with \" + str(tot_perc_money2))\n\t\treturn tot_perc_items2\n\telif winner == avg_perc_money2:\n\t\tprint(\"Problem \" + file_num + \" avg_perc classes tot_prof items with \" + str(avg_perc_money2))\n\t\treturn avg_perc_items2\n\telif winner == least_constraints_money2:\n\t\tprint(\"Problem \" + file_num + \" least_constraints classes tot_prof items with \" + str(least_constraints_money2))\n\t\treturn least_constraints_items2\n\t#PERCENT PROFIT WINNERS START HERE\n\telif winner == tot_prof_money3:\n\t\tprint(\"Problem \" + file_num + \" tot_prof classes percent items with \" + str(tot_prof_money3))\n\t\treturn tot_prof_items3\n\telif winner == avg_prof_money3:\n\t\tprint(\"Problem \" + file_num + \" avg_prof classes percent items with \" + str(avg_prof_money3))\n\t\treturn avg_prof_items3\n\telif winner == tot_prof_density_money3:\n\t\tprint(\"Problem \" + file_num + \" tot_prof_density classes percent items with \" + str(tot_prof_density_money3))\n\t\treturn tot_prof_density_items3\n\telif winner == avg_prof_density_money3:\n\t\tprint(\"Problem \" + file_num + \" avg_prof_density classes percent items with \" + str(avg_prof_density_money3))\n\t\treturn avg_prof_density_items3\n\telif winner == tot_perc_money3:\n\t\tprint(\"Problem \" + file_num + \" tot_perc classes percent items with \" + str(tot_perc_money3))\n\t\treturn tot_perc_items3\n\telif winner == avg_perc_money3:\n\t\tprint(\"Problem \" + file_num + \" avg_perc classes percent items with \" + str(avg_perc_money3))\n\t\treturn avg_perc_items3\n\t#elif winner = least_constraints_money3:\n\telse:\n\t\tprint(\"Problem \" + file_num + \" least_constraints classes percent items with \" + str(least_constraints_money3))\n\t\treturn least_constraints_items3\n\n\ndef adjacency_matrixer(P, M, N, C, items, constraints, file_num):\n\tprint(\"hang0\")\n\tlist_of_all_classes = []\n\tfor i in items:\n\t\t#[item_name]; [class]; [weight]; [cost]; [resale value]\n\t\tif i[1] not in list_of_all_classes:\n\t\t\tlist_of_all_classes += [i[1]]\n\t#list_of_all_classes = list(filter(None, list_of_all_classes))\n\t#list_of_all_classes.sort()\n\tnum_classes = max(list_of_all_classes)\n\n\tprint(\"hang1\")\n\n\tadjacency_matrix = [[] for _ in range(num_classes+1)]\n\tfor current_class in list_of_all_classes:\n\t\tfor constraint in constraints:\n\t\t\tif current_class in constraint:\n\t\t\t\tadjacency_matrix[current_class] += constraint\n\n\tprint(\"hang2\")\n\n\t#gets rid of all instances of class_i in the adjacency list of node i\n\t#also sorts in ascending order, and removes duplicates\n\tfor class_index in range(num_classes+1):\n\t\tconstraint_i = adjacency_matrix[class_index]\n\t\tconstraint_i = [class_i for class_i in constraint_i if class_i != class_index]\n\t\tconstraint_i = list(set(constraint_i))\n\t\tconstraint_i.sort()\n\t\t#put the class as the first thing in its own adjacency list so the file is easier to read\n\t\tconstraint_i = [class_index] + constraint_i\n\t\tadjacency_matrix[class_index] = constraint_i\n\n\tmatrix_file = \"matrices/matrix\" + file_num + \".p\"\n\tpickle.dump( adjacency_matrix, open( matrix_file, \"wb\" ) )\n\treturn adjacency_matrix\n\ndef heuristic_num_constraints(P, M, N, C, items, constraints, file_num, copy_of_adjacency_matrix):\n\t#choose class with the least # of constraints until you can no longer choose classes\n\t#TRY MOST NUMBER OF CONSTRAINTS NEXT??\n\t#adjacency_matrix, num_classes = get_adjacency_matrix(P, M, N, C, items, constraints)\n\t#matrix_file = \"matrices/matrix\" + file_num + \".p\"\n\t#adjacency_matrix = pickle.load( open( matrix_file, \"rb\" ) )\n\tadjacency_matrix = copy_of_adjacency_matrix\n\t#find the list in the adjacency matrix with the smallest length (the least number of constraints)\n\t#remove all classes that it conflicts with from the total classes list, put the class index into list of used classes\n\tclasses_picked = []\n\twhile len(adjacency_matrix) > 0:\n\t\t#go through adjacency matrix, looking for the list with smallest length that isn't 0\n\t\tsmallest_len = 100000000000\n\t\tsmallest_index = 0\n\t\tfor class_index in range(len(adjacency_matrix)):\n\t\t\tcurrent_list = adjacency_matrix[class_index]\n\t\t\tif len(current_list) < smallest_len and len(current_list) > 0:\n\t\t\t\tsmallest_len = len(current_list)\n\t\t\t\tsmallest_index = class_index\n\n\t\t#pick that list\n\t\tpicked_list = adjacency_matrix[smallest_index]\n\n\t\t\n\t\t#put first item into classes_picked\n\t\tclasses_picked += [adjacency_matrix[smallest_index][0]]\n\n\t\t#the rest cannot be used\n\t\t#delete each list in adjacency_matrix that starts with one of the numbers in the adjacency list that you picked\n\t\t#do this by first turning it into a blank list, since deleting it outright will mess up the iterating of the for loop\n\t\tfor class_index in range(len(adjacency_matrix)):\n\t\t\tif adjacency_matrix[class_index][0] in picked_list:\n\t\t\t\tadjacency_matrix[class_index] = []\n\n\t\t#get rid of the empty lists you made\n\t\tadjacency_matrix = [adj_list for adj_list in adjacency_matrix if adj_list != []]\n\n\tpicked_classes_file = \"picked_classes/picked_classes\" + file_num + \".p\"\n\tpickle.dump( classes_picked, open( picked_classes_file, \"wb\" ) )\n\n\n#problem 7 2075348129.0\n#problem 10 869796.4500000004\n\ndef make_list_of_classes(items, file_num, adjacency_matrix):\n\thighest_num_class = 0\n\tfor item in items:\n\t\tif item[1] > highest_num_class:\n\t\t\thighest_num_class = item[1]\n\n\tclasses = [[] for _ in range(highest_num_class+1)]\n\tfor item in items:\n\t\tclasses[item[1]] += [item]\n\n\t#delete empty classes\n\tclasses = [class_i for class_i in classes if class_i != []]\n\treturn classes\n\n#used to be (P, M, N, C, items, constraints, file_num, adjacency_matrix, classes_sorted_by_heuristic_value)\ndef choose_classes(adjacency_matrix, classes_sorted_by_heuristic_value):\n\tchosen_classes = []\n\twhile len(classes_sorted_by_heuristic_value) > 0:\n\t\tchosen_class = classes_sorted_by_heuristic_value[0]\n\t\tchosen_classes += [chosen_class]\n\t\tconstraint = adjacency_matrix[chosen_class]\n\t\tclasses_sorted_by_heuristic_value = [class_i for class_i in classes_sorted_by_heuristic_value if class_i not in constraint]\n\n\treturn chosen_classes\n\ndef god_class_sorting_heuristic(classes):\n\t#C is class, V is values, t it total, a is average, p is profit, d is density, pp is percent profit\n\tCV_tp = []\n\tCV_ap = []\n\tCV_tpd = []\n\tCV_apd = []\n\tCV_tpp = []\n\tCV_app = []\n\tfor class_i in classes:\n\t\tclass_i_total_profit = sum([(item[4]-item[3]) for item in class_i])\n\t\tclass_i_average_profit = class_i_total_profit / len(class_i)\n\n\t\tclass_i_total_profit_density = sum([(item[4]-item[3])/(item[2] + 0.001) for item in class_i])\n\t\tclass_i_average_profit_density = class_i_total_profit_density / len(class_i)\n\n\t\tclass_i_total_percent_profit = sum([(item[4]/(item[3] + 0.001)) for item in class_i])\n\t\tclass_i_average_percent_profit = class_i_total_percent_profit / len(class_i)\n\n\n\n\t\t#class_i contains all items of class i, so in order to get the name of class i, have to do class_i[0][1]\n\t\tclass_name = class_i[0][1]\n\t\tCV_tp += [(class_name, class_i_total_profit)]\n\t\tCV_ap += [(class_name, class_i_average_profit)]\n\t\tCV_tpd += [(class_name, class_i_total_profit_density)]\n\t\tCV_apd += [(class_name, class_i_average_profit_density)]\n\t\tCV_tpp += [(class_name, class_i_total_percent_profit)]\n\t\tCV_app += [(class_name, class_i_average_percent_profit)]\n\n\tCV_tp = sorted(CV_tp, key=lambda x: x[1], reverse=True )\n\tCV_ap = sorted(CV_ap, key=lambda x: x[1], reverse=True )\n\tCV_tpd = sorted(CV_tpd, key=lambda x: x[1], reverse=True )\n\tCV_apd = sorted(CV_apd, key=lambda x: x[1], reverse=True )\n\tCV_tpp = sorted(CV_tpp, key=lambda x: x[1], reverse=True )\n\tCV_app = sorted(CV_app, key=lambda x: x[1], reverse=True )\n\n\tCV_tp = [class_value[0] for class_value in CV_tp]\n\tCV_ap = [class_value[0] for class_value in CV_ap]\n\tCV_tpd = [class_value[0] for class_value in CV_tpd]\n\tCV_apd = [class_value[0] for class_value in CV_apd]\n\tCV_tpp = [class_value[0] for class_value in CV_tpp]\n\tCV_app = [class_value[0] for class_value in CV_app]\n\treturn CV_tp, CV_ap, CV_tpd, CV_apd, CV_tpp, CV_app\n\n\n#assumes items are already sorted in the correct order\ndef choose_items_greedy(P, M, N, C, items, classes_to_choose_from, file_num):\n\titems_to_choose_from = [item for item in items if item[1] in classes_to_choose_from]\n\titems_chosen = []\n\ttotal_profit = 0\n\tfor item in items_to_choose_from:\n\t\tp = item[2]\n\t\tm = item[3]\n\t\tif P >= p and M >= m:\n\t\t\t#only need the names of the items\n\t\t\titems_chosen += [item[0]]\n\t\t\t#TOTAL PROFIT DOESNT HAVE COST SUBTRACTED BECAUSE COST IS ALREADY BEING SUBTRACTED FROM M\n\t\t\ttotal_profit += item[4]\n\t\t\tP -= p\n\t\t\tM -= m\n\n\t#print(\"Problem \" + file_num + \" total money: \" + str(M + total_profit))\n\t#print(\"Problem \" + file_num + \" leftover weight: \" + str(P))\n\n\treturn (items_chosen, M + total_profit)\n\n#could maybe apply this to ALL of your classes?\n'''\ndef choose_items_hybrid(P, M, N, C, items, classes_to_choose_from, file_num):\n\t#list items by total profit\n\t#pick until you can no longer pick\n\t#if you reached your money limit first, it doesn't help\n\t#however, if you reach your weight limit first, you can replace some heavy items with some more economic choices\n\t#you have 2 lists of items, the large profit items you chose, and the unused items that could have better profit density\n\t#\n\n\n\t#reorganize remainder of items to be \n\titems_to_choose_from = [item for item in items if item[1] in classes_to_choose_from]\n\titems_chosen = []\n\ttotal_profit = 0\n\tfor item in items_to_choose_from:\n\t\tp = item[2]\n\t\tm = item[3]\n\t\tif P >= p and M >= m:\n\t\t\t#only need the names of the items\n\t\t\titems_chosen += [item[0]]\n\t\t\t#TOTAL PROFIT DOESNT HAVE COST SUBTRACTED BECAUSE COST IS ALREADY BEING SUBTRACTED FROM M\n\t\t\ttotal_profit += item[4]\n\t\t\tP -= p\n\t\t\tM -= m\n\n\t#print(\"Problem \" + file_num + \" total money: \" + str(M + total_profit))\n\t#print(\"Problem \" + file_num + \" leftover weight: \" + str(P))\n\n\treturn (items_chosen, M + total_profit)\n'''\n\ndef heuristic_average_value_density(P, M, N, C, items, constraints, file_num):\n\tpicked_classes_file = \"picked_classes/picked_classes\" + file_num + \".p\"\n\tclasses_picked = pickle.load( open( picked_classes_file, \"rb\" ) )\n\n\t#dont think you need sorted items just yet\n\t#first just choose valid items from the regular list (items), only including them if their class is in classes_picked\n\t#if statement is to get rid of items that would cause division by 0 error. Doesn't matter since they cost more than they're worth\n\titems_to_choose_from = [item for item in items if item[1] in classes_picked if item[4]-item[3] > 0]\n\n\titems_to_choose_from_sorted = sorted(items_to_choose_from, key=lambda x: (x[2]/(x[4]-x[3])) )\n\n\titems_chosen = []\n\ttotal_resale = 0\n\tfor item_index in range(len(items_to_choose_from_sorted)):\n\t\tpossible_item = items_to_choose_from_sorted[item_index]\n\t\tp = possible_item[2]\n\t\tm = possible_item[3]\n\t\tif P >= p and M >= m:\n\t\t\t#only need the names of the items\n\t\t\titems_chosen += [possible_item[0]]\n\t\t\ttotal_resale += possible_item[4]\n\t\t\tP -= p\n\t\t\tM -= m\n\n\tpicked_classes_file = \"picked_classes/picked_classes\" + file_num + \".p\"\n\n\ttotal_profit_file = \"output/total_profit\" + file_num + \".p\"\n\tpickle.dump( total_profit, open( total_profit_file, \"wb\" ) )\n\n\treturn items_chosen\n\n\"\"\"\n===============================================================================\n\tNo need to change any code below this line.\n===============================================================================\n\"\"\"\n\ndef read_input(filename):\n\t\"\"\"\n\tP: float pounds\n\tM: float money\n\tN: integer num items\n\tC: integer num constraints\n\titems: list of tuples\n\tconstraints: list of sets\n\t\"\"\"\n\twith open(filename) as f:\n\t\tP = float(f.readline())\n\t\tM = float(f.readline())\n\t\tN = int(f.readline())\n\t\tC = int(f.readline())\n\t\titems = []\n\t\tconstraints = []\n\t\tfor i in range(N):\n\t\t\tname, cls, weight, cost, val = f.readline().split(\";\")\n\t\t\titems.append((name, int(cls), float(weight), float(cost), float(val)))\n\t\tfor i in range(C):\n\t\t\tconstraint = set(eval(f.readline()))\n\t\t\tconstraints.append(constraint)\n\treturn P, M, N, C, items, constraints\n\ndef write_output(filename, items_chosen):\n\twith open(filename, \"w\") as f:\n\t\tfor i in items_chosen:\n\t\t\tf.write(\"{0}\\n\".format(i))\n\nif __name__ == \"__main__\":\n\n\tparser = argparse.ArgumentParser(description=\"PickItems solver.\")\n\tparser.add_argument(\"input_file\", type=str, help=\"____.in\")\n\tparser.add_argument(\"output_file\", type=str, help=\"____.out\")\n\tparser.add_argument(\"mode\", type=str, help=\"____.dongus\")\n\targs = parser.parse_args()\n\n\tP, M, N, C, items, constraints = read_input(args.input_file)\n\tmode = args.mode\n\titems_chosen = solve(P, M, N, C, items, constraints, mode, args.input_file)\n\twrite_output(args.output_file, items_chosen)","sub_path":"CS170Project_Greedy_Round4/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":20588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"593985032","text":"__author__: \"Bai Neng\"\nimport es_stocks_cash as es\nimport to_Json_Yaml as tf\nimport urllib.request\nimport tushare as ts\nimport re,json,sys,os\nimport datetime,time\n\ndef sinaStockUrl( max_pg ):\n \n #print( 'pageNum : ' + str( pageNum ))\n #http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_ssggzj?page=38\n max_pg = max_pg + 1\n rows = 100\n url_list = []\n pageNum = 0\n for pageNum in range(max_pg) :\n url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_ssggzj?'\n url += 'page=' + str( pageNum )\n #url += '&num=' + str( rows )\n #url += '&sort=symbol&asc=1&node=hs_a&symbol=&_s_r_a=init'\n pageNum = pageNum + 1 \n url_list.append(url)\n return url_list\n\ndef sinaStockData(url_list):\n \n # http://www.cnblogs.com/sysu-blackbear/p/3629420.html\n stockData = []\n for url in range(0,len(url_list)):\n #print(url_list[url])\n histData = urllib.request.urlopen(url_list[url]).read()\n '''\n On April 23: Change histData.decode('gbk') to histData.decode('unicode_escape')\n '''\n histData = histData.decode('unicode_escape')\n histData = str(histData).split('[')[1]\n histData = histData[1:len(histData) - 4].split('},{')\n for i in range(0, len(histData)):\n column = {}\n dayData = histData[i].split(',')\n for j in range(0, len(dayData)):\n field = dayData[j].split(':')\n try:\n field[0] = field[0].replace('\"', '')\n column[field[0]] = field[1].replace('\"', '')\n except:\n continue\n stockData.append(column) \n return stockData\n \n\nif __name__ == '__main__':\n \n if (len(sys.argv) <2):\n print('Please input paramente:trade_date,format [yyyy.mm.dd] and load_time ,format [yyyy.mm.dd hh:mm:ss]')\n exit(\"sorry,please input correct parameter\")\n else:\n trade_date = sys.argv[1]\n load_time = sys.argv[2]\n '''\n \n trade_date = '2020.03.11'\n load_time = '2020-03-11 12:00:00'\n '''\n \n '''\n 2. get stock raw data from sina url link and append items: load_time and trade_date\n '''\n url = sinaStockUrl(66)\n str_stocks = sinaStockData(url)\n print(str_stocks)\n \n list = []\n for i in range(0,len(str_stocks)):\n li = str_stocks[i]\n li['insert_time'] = load_time\n li['trade_date'] = trade_date\n list.append(li)\n \n # 3. read from yaml config \n current_path = os.path.abspath(\".\")\n yml_file = os.path.join(current_path, \"config-sina-cash.yaml\")\n file = open(yml_file, 'r', encoding=\"utf-8\")\n file_data = file.read()\n file.close()\n \n '''\n 4. Ready for index mapping setting\n '''\n \n data_list = tf.yaml_toJson(file_data)\n for i in range(0,len(data_list)):\n if i == 0:\n index_map = data_list[0]\n print(index_map)\n else:\n file_list = json.loads(data_list[1])\n index_name = file_list['index']['index_name']\n index_name = index_name + trade_date\n index_type = file_list['index']['index_type']\n '''\n 5. Ready to load stock data to ES\n '''\n \n obj = es.Search(index_name, index_type)\n obj.create_index(index_name,index_type,index_map)\n obj.bulk_Index_Data(list)\n ","sub_path":"sina_cash_import_to_es.py","file_name":"sina_cash_import_to_es.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"394014215","text":"class Book:\r\n def __init__(self, author=\"\", title=\"\"):\r\n self.author = author\r\n self.title = title\r\n\r\ndef display(bookname):\r\n print(bookname.title + \" written by \" + bookname.author)\r\n\r\nif __name__ == '__main__':\r\n a = Book(\"John Steinbeck\", \"Of Mice and Men\")\r\n b = Book(\"Harper Lee\", \"Kill a Mockingbird\")\r\n\r\ndisplay(a)\r\ndisplay(b)","sub_path":"assignment1_part2.py","file_name":"assignment1_part2.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"617518073","text":"\"\"\"\nNote: Running scriptJob(listEvents = True) identified some animLayer events\nnot listed in the documentation that we use in this class. They are:\n\t- animLayerRebuild\n\t- animLayerRefresh\t\t\t\t# Any changes that affect the animLayer panel like selection.\n\t- animLayerAnimationChanged\t\t# When an animLayer attribute like solo, mute, lock, etc changes.\n\t- animLayerLockChanged\n\t- animLayerBaseLockChanged\n\t- animLayerGhostChanged\n\"\"\"\n\nimport maya.cmds as MCmds\nimport U4_AnimLayerUtils\nimport U4_AnimTakeUtils\nfrom functools import partial\nfrom U4_AnimTakeWidget import U4_AnimTakeWidget\nfrom U4_MenuItem_BlockingCheckBox import U4_MenuItem_BlockingCheckBox\nfrom U4_Widgets import *\n\nclass U4_AnimTakesPanel(U4_WorkspaceControl):\n\t\"\"\"\n\tLists animation takes with controls for exporting, mirroring, time ranges, etc.\n\t\"\"\"\n\n\tdef buildContents(self):\n\t\tmenuBar = U4_MenuBarLayout()\n\n\t\tviewMenu = U4_Menu().setLabelText(\"View\")\n\t\tU4_MenuItem().setLabelText(\"Refresh\").onClicked(self._onRefreshClicked)\n\t\tU4_MenuItem_BlockingCheckBox()\n\n\t\tself._exportMenu = U4_Menu().setLabelText(\"Export\")\n\t\tself._mirrorMenu = U4_Menu().setLabelText(\"Mirror\")\n\n\t\tformLayout = U4_FormLayout()\n\t\tself._searchField = U4_TextField(search = True).onTextChanged(self._onSearchTextChanged)\n\t\tself._takesLayout = U4_ScrollLayout()\n\n\t\tformLayout.attachWidgetToWidget(self._takesLayout, topEdge, self._searchField)\n\t\tformLayout.attachWidgetToForm(self._takesLayout, leftEdge | bottomEdge | rightEdge)\n\t\tformLayout.attachWidgetToForm(self._searchField, leftEdge)\n\n\t\tself.refreshTakes()\n\n\t\tself.attachScriptJob(event = [\"playbackRangeChanged\", self._onPlaybackRangeChanged])\n\t\tself.attachScriptJob(event = [\"playbackRangeSliderChanged\", self._onPlaybackRangeSliderChanged])\n\t\tself.attachScriptJob(event = [\"animLayerRefresh\", self._onAnimLayerRefresh])\n\n\tdef getDefaultLabelText(self):\n\t\treturn \"Anim Takes\"\n\n\tdef refreshTakes(self):\n\t\tself._exportMenu.deleteAllItems()\n\t\tself._exportMenu.makeActiveParent()\n\t\tU4_MenuItem().setLabelText(\"All\").onClicked(self._onExportAllClicked)\n\t\tU4_MenuItemDivider()\n\n\t\tself._mirrorMenu.deleteAllItems()\n\t\tself._mirrorMenu.makeActiveParent()\n\t\tU4_MenuItem().setLabelText(\"All\").onClicked(self._onBakeAllMirrorsClicked)\n\t\tU4_MenuItemDivider()\n\n\t\tself._takesLayout.makeActiveParent()\n\t\tself._takesLayout.deleteChildren()\n\n\t\tself.widgets = []\n\t\tallAnimTakes = U4_AnimTakeUtils.getAllAnimTakes()\n\t\thasTakes = len(allAnimTakes) > 0\n\n\t\tif hasTakes:\n\t\t\tU4_RadioCollection()\n\n\t\t\tU4_RowColumnLayout(numColumns = 5).setColumnWidth(0, 20).setColumnWidth(1, 150).setColumnWidth(2, 300).setColumnWidth(3, 170).setColumnWidth(4, 200)\n\t\t\tU4_TextBlock()\n\t\t\tU4_TextBlock().setText(\"Take\").setFont(font_boldLabel)\n\t\t\tU4_TextBlock().setText(\"Path\").setFont(font_boldLabel)\n\t\t\tU4_TextBlock().setText(\"Range\").setFont(font_boldLabel)\n\t\t\tU4_TextBlock().setText(\"Actions\").setFont(font_boldLabel)\n\n\t\t\tfor take in allAnimTakes:\n\t\t\t\tself._takesLayout.makeActiveParent()\n\t\t\t\twidget = U4_AnimTakeWidget(take)\n\t\t\t\tself.widgets.append(widget)\n\n\t\t\t\tself._exportMenu.makeActiveParent()\n\t\t\t\tU4_MenuItem().setLabelText(take).onClicked(widget._onExportClicked)\n\t\t\t\tself._mirrorMenu.makeActiveParent()\n\t\t\t\tU4_MenuItem().setLabelText(take).onClicked(widget._onMirrorClicked)\n\n\t\tself._exportMenu.setIsEnabled(hasTakes)\n\t\tself._mirrorMenu.setIsEnabled(hasTakes)\n\n\t\tself._refreshActiveTakeWidget()\n\n\tdef getTakeWidget(self, take):\n\t\t\"\"\"\n\t\tFind widget associated with a given anim take.\n\t\t\"\"\"\n\t\tfor widget in self.widgets:\n\t\t\tif widget.getTake() == take:\n\t\t\t\treturn widget\n\t\treturn None\n\n\tdef _onTimeValuesChanged(self, times):\n\t\tprint(times)\n\n\tdef _onRefreshClicked(self, *args):\n\t\tself.refreshTakes()\n\n\tdef _onBlockingCheckBoxChanged(self, *args):\n\t\t\"\"\"\n\t\tCalled when the blocking check box is ticked or unticked.\n\t\t\"\"\"\n\t\tisTicked = args[0]\n\t\tMCmds.playbackOptions(edit = True, blockingAnim = isTicked)\n\n\tdef _onPlaybackRangeChanged(self):\n\t\t\"\"\"\n\t\tCalled from scriptJob when the preview playback range changes.\n\t\t\"\"\"\n\t\tself._onTimeSliderChanged()\n\n\tdef _onPlaybackRangeSliderChanged(self):\n\t\t\"\"\"\n\t\tCalled from scriptJob when the total animation range changes.\n\t\t\"\"\"\n\t\tself._onTimeSliderChanged()\n\n\tdef _onAnimLayerRefresh(self, *args):\n\t\t\"\"\"\n\t\tCalled from scriptJob when an animLayer attribute changes.\n\t\tWe use it to detect when the active anim take changes.\n\t\t\"\"\"\n\t\tself._refreshActiveTakeWidget()\n\n\tdef _refreshActiveTakeWidget(self):\n\t\t\"\"\"\n\t\tSync radio button with selected anim take.\n\t\tCalled during refreshTakes and onAnimLayerRefresh.\n\t\t\"\"\"\n\t\tactiveTake = U4_AnimTakeUtils.getActiveAnimTake()\n\t\tif not activeTake:\n\t\t\treturn\n\n\t\twidget = self.getTakeWidget(activeTake)\n\t\tif not widget:\n\t\t\treturn\n\n\t\twidget._activeRadioButton.setValue(True)\n\n\tdef _onTimeSliderChanged(self):\n\t\tactiveTake = U4_AnimTakeUtils.syncActiveTakeWithTimeSlider()\n\t\twidget = self.getTakeWidget(activeTake)\n\t\tif widget:\n\t\t\twidget.refreshTimeValues()\n\n\tdef _onExportAllClicked(self, *args):\n\t\tU4_AnimTakeUtils.exportAllTakes()\n\n\tdef _onBakeAllMirrorsClicked(self, *args):\n\t\tU4_AnimTakeUtils.bakeMirrorTakes()\n\n\tdef _onSearchTextChanged(self, text):\n\t\tfor widget in self.widgets:\n\t\t\tmatches = text in widget.getTake()\n\t\t\twidget.setIsVisible(matches)\n","sub_path":"scripts/U4_Anim/U4_AnimTakesPanel.py","file_name":"U4_AnimTakesPanel.py","file_ext":"py","file_size_in_byte":5188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"360093129","text":"\"\"\"Integration test module.\"\"\"\nimport subprocess\nimport logging\nimport os\n\n\nclass IntegrationTest(object):\n \"\"\"Base class for Integration Tests.\"\"\"\n\n WORKING_DIR = os.path.abspath(os.path.dirname(__file__))\n LOGGER = logging.getLogger('testsuite')\n\n def __init__(self, options=None):\n \"\"\"Initialize base class.\"\"\"\n if options is None:\n self.options = {}\n else:\n self.options = options\n\n @staticmethod\n def run_command(cmd_list, env_vars=None):\n \"\"\"Shell out to provisioner command.\"\"\"\n try:\n subprocess.check_call(cmd_list, env=env_vars)\n except subprocess.CalledProcessError as shelloutexc:\n return shelloutexc.returncode\n return 0\n\n def init(self):\n \"\"\"Implement dummy method (set in consuming classes).\"\"\"\n raise NotImplementedError('You must implement the init() method '\n 'yourself!')\n\n def run(self):\n \"\"\"Implement dummy method (set in consuming classes).\"\"\"\n raise NotImplementedError('You must implement the run() method '\n 'yourself!')\n\n def teardown(self):\n \"\"\"Implement dummy method (set in consuming classes).\"\"\"\n raise NotImplementedError('You must implement the teardown() method '\n 'yourself!')\n","sub_path":"integration_tests/integration_test.py","file_name":"integration_test.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"442674406","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pygame\r\nimport random\r\nimport math\r\nimport os\r\n\r\nos.system('cls')\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\nold_v = tf.logging.get_verbosity()\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\n\r\nlearningRate = 0.006\r\nepochs = 2000\r\nbatchSize = 1\r\n\r\ndf = pd.read_csv(\"C:\\\\Users\\\\Brian\\\\Desktop\\\\data.csv\")\r\nX = df[df.columns[0]].values\r\nY = df[df.columns[1]].values\r\n\r\ndef getBatch(batchSize, inputs, outputs):\r\n idx = np.arange(0,len(inputs))\r\n np.random.shuffle(idx)\r\n idx = idx[:batchSize]\r\n xBatch = [inputs[i] for i in idx]\r\n yBatch = [outputs[i] for i in idx]\r\n xBatch = np.reshape(xBatch, (batchSize,1))\r\n return np.asarray(xBatch), np.asarray(yBatch)\r\n\r\nw = tf.Variable(0.0, tf.float32)\r\nb = tf.Variable(0.0, tf.float32)\r\n\r\nx = tf.placeholder(tf.float32)\r\ny = tf.placeholder(tf.float32)\r\n\r\nprediction = tf.add(tf.multiply(x,w), b)\r\n\r\ncost = tf.reduce_sum(tf.square(prediction-y))\r\n\r\noptimizer = tf.train.AdamOptimizer(learningRate).minimize(cost)\r\n\r\ninit = tf.global_variables_initializer()\r\n\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n for epoch in range(epochs):\r\n xBatch, yBatch = getBatch(batchSize,X,Y)\r\n sess.run(optimizer, feed_dict={x: xBatch, y: yBatch})\r\n\r\n if(epoch+1) % 50 == 0:\r\n c = sess.run(cost, feed_dict={x: X, y: Y})\r\n print(\"Epoch:\", (epoch+1), \"cost=\", \"{:.4f}\".format(c), \"w=\", sess.run(w), \"b=\", sess.run(b))\r\n \r\n print(\"Optimization Finished\")\r\n trainingCost = sess.run(cost, feed_dict={x: X, y:Y})\r\n print(\"Training cost=\", trainingCost, \"w=\", sess.run(w), \"b=\", sess.run(b))\r\n filename = \"values.txt\"\r\n file = open(filename, 'w+')\r\n file.write(str(sess.run(w)) + \",\" + str(sess.run(b)))\r\n\r\n\r\n \r\ntf.logging.set_verbosity(old_v)\r\n","sub_path":"tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"262422845","text":"#Este es un juego de adivinar el número\r\n\r\nimport random\r\n\r\nIntentos=0\r\nprint(\"Hola, ¿cómo te llamas? \")\r\nnombre=input()\r\n\r\nnumero= random.randint(1,20)\r\nprint(\"Bueno,\", nombre, \"estoy pensando en un numero entre 1 y 20\")\r\n\r\nfor Intentos in range (6):\r\n print(\"adivina\") #cuatro espacios en frente de \"print\"\r\n intento=input()\r\n intento=int(intento)\r\n \r\n if intento < numero:\r\n print(\"tu numero es menor a la respuesta\") #8 esoacios en frente de \"print\"\r\n \r\n if intento > numero:\r\n print(\"tu número es mayor a la respuesta\")\r\n \r\n if intento==numero:\r\n break\r\n \r\nif intento == numero:\r\n Intentos = str(Intentos +1)\r\n print(\"Bien hecho\", nombre, \", adivinaste el numero en\", Intentos, \"intentos\")\r\n\r\nif intento != numero:\r\n numero=str(numero)\r\n print(\"No, el numero en que pensaba era\", numero,\".\")\r\n ","sub_path":"Lección 3 de python.py","file_name":"Lección 3 de python.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"183680002","text":"def conj(*sentencas):\n retorno = \"\"\n retorno += \"(\" + sentencas[0] + \" ! \" + sentencas[0] + \")\"\n for i in sentencas[1:]:\n retorno += \" ! \" + \"(\" + i + \" ! \" + i + \")\"\n\n return \"(\" + retorno + \")\"\n \ndef disj(*sentencas):\n retorno = \"\"\n retorno += sentencas[0]\n for i in sentencas[1:]:\n retorno += \" ! \" + i\n\n retorno = \"((\" + retorno + \")\" + \" ! \" + \"(\" + retorno + \"))\" \n return retorno\n\ndef neg(sentenca):\n retorno = \"(\" + sentenca + \")\" + \"!\" + \"(\" + sentenca + \")\"\n return \"(\" + retorno + \")\"\n\ndef cond(sentenca1, sentenca2):\n sentenca1 = neg(sentenca1)\n return disj(sentenca1, sentenca2)\n\ndef bicon(sentenca1, sentenca2):\n return conj(cond(sentenca1, sentenca2), cond(sentenca2, sentenca1))\n\n\ndef gerar_sentencas_atomicas(lista_tokens):\n atoms = []\n for i in lista_tokens:\n if i[2] == \"ATOM\" and i[1] not in atoms:\n atoms.append(i[1])\n\n return atoms\n\ndef substituir_conectivos(expressao, lista_tokens):\n atoms = gerar_sentencas_atomicas(lista_tokens)\n for i in atoms:\n a = i\n exec(f\"{i} = '{str(i)}'\")\n\n return eval(expressao)\n\n\nif __name__ == \"__main__\":\n from analisador_lexico import *\n from analisador_sintatico import *\n from criador_expressao import *\n\n string = input(\"Digite uma sentença lógica: \")\n string = \"(\" + string + \")\"\n\n valido, lista_simbolos = identificar_tokens(string, padrao_tokens)\n\n posicoes = identificar_sentencas(lista_simbolos)\n expressao = criar_expressao(lista_simbolos,posicoes)\n print(expressao)\n print(substituir_conectivos(expressao,lista_simbolos))","sub_path":"conectivo_sheffer.py","file_name":"conectivo_sheffer.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"627362988","text":"import os\nimport re\nimport json\nimport time\nimport urllib2\nimport logging\nfrom vncdotool import api\nfrom selenium import webdriver\nfrom fabric.api import run, settings, put, local, get, env\nfrom utils.htmlparser import MyHTMLParser\nfrom pages.hosted_engine_page import HePage\nfrom cases.helpers import CheckBase\nfrom utils.htmlparser import MyHTMLParser\nfrom utils.constants import PROJECT_ROOT\nfrom utils.rhvmapi import RhevmAction\n\n\nlog = logging.getLogger(\"bender\")\n\n\nclass TestHostedEngine(CheckBase):\n page = None\n\n def set_page(self):\n self.page = HePage(self._driver)\n\n def _get_latest_rhvm_appliance(self, appliance_path):\n \"\"\"\n Purpose:\n Get the latest rhvm appliance from appliance parent path\n \"\"\"\n log.info(\"Getting the latest rhvm4.2 appliance...\")\n req = urllib2.Request(appliance_path)\n rhvm_appliance_html = urllib2.urlopen(req).read()\n\n mp = MyHTMLParser()\n mp.feed(rhvm_appliance_html)\n mp.close()\n mp.a_texts.sort()\n\n link_42 = []\n all_link = mp.a_texts\n for link in all_link:\n if \"4.2\" in link:\n link_42.append(link)\n\n latest_rhvm_appliance_name = link_42[-1]\n latest_rhvm_appliance = appliance_path + latest_rhvm_appliance_name\n return latest_rhvm_appliance\n\n def _install_rhvm_appliance(self):\n log.info(\"Getting and installing the latest rhvm appliance ...\")\n rhvm_appliance_link = self._get_latest_rhvm_appliance(self._config['rhvm_appliance_path'])\n local_rhvm_appliance = \"/root/%s\" % rhvm_appliance_link.split('/')[-1]\n output = self.run_cmd(\"curl -o %s %s\" % (local_rhvm_appliance, rhvm_appliance_link))\n if output[0]==\"False\":\n log.error(\"Failed to download the latest rhvm appliance...\")\n \n self.run_cmd(\"rpm -ivh %s --force\" % local_rhvm_appliance)\n self.run_cmd(\"rm -rf %s\" % local_rhvm_appliance)\n\n def _add_to_etc_host(self):\n log.info(\"Adding the host to /etc/hosts...\")\n host_name = self.run_cmd(\"hostname\")[1]\n self.run_cmd(\"echo '%s %s' >> /etc/hosts\" % (self.host_string, host_name))\n self.run_cmd(\"echo '%s' > /etc/hostname\" % host_name)\n self.run_cmd(\"hostname %s\" % host_name)\n self.run_cmd(\"echo '%s %s' >> /etc/hosts\" % (self._config['he_vm_ip'], self._config['he_vm_fqdn']))\n\n def _clean_nfs_storage(self, nfs_path):\n log.info(\"Cleaning the he nfs storage...\")\n with settings(\n host_string=self._config['nfs_ip'],\n user=\"root\",\n password=self._config['nfs_password'],\n disable_known_hosts=True,\n connection_attempts=60):\n run(\"rm -rf %s/*\" % nfs_path)\n\n def _move_failed_setup_log(self):\n log.info(\"Moving the failed ovirt-hosted-engine-setup.log to the old dir...\")\n ret = self.run_cmd(\"find /var/log -type f |grep ovirt-hosted-engine-setup-.*.log\")\n if ret[0] == True:\n if os.path.exists(\"/var/old_failed_setup_log\") == False:\n self.run_cmd(\"mkdir -p /var/old_failed_setup_log\")\n self.run_cmd(\"mv /var/log/ovirt-hosted-engine-setup/*.log \\\n /var/old_failed_setup_log/\")\n else:\n pass\n\n def _wait_host_status(self, rhvm_ins, host_name, expect_status):\n log.info(\"Waitting for the host %s\" % expect_status)\n i = 0\n host_status = \"unknown\"\n while True:\n if i > 50:\n raise RuntimeError(\n \"Timeout waitting for host %s as current host status is: %s\"\n % (expect_status, host_status))\n host_status = rhvm_ins.list_host(\"name\", host_name)['status']\n log.info(\"HOST: %s\" % host_status)\n if host_status == expect_status:\n break\n elif host_status == 'install_failed':\n raise RuntimeError(\"Host is not %s as current status is: %s\" %\n (expect_status, host_status))\n elif host_status == 'non_operational':\n raise RuntimeError(\"Host is not %s as current status is: %s\" %\n (expect_status, host_status))\n time.sleep(10)\n i += 1\n\n def _set_up(self):\n #super(TestHostedEngine, self).setup()\n try:\n self._move_failed_setup_log()\n self._install_rhvm_appliance()\n self._add_to_etc_host()\n self._clean_nfs_storage(self._config['he_install_nfs'])\n except Exception as e:\n log.info(\"Failed to init the HostedEngine ENV...\")\n return False, e\n return True\n\n def _he_install(self):\n # Setup the HostedEngine ENV\n self._set_up()\n try:\n # VM page\n with self.page.switch_to_frame(self.page.frame_right_name):\n log.info(\"Starting to deploy HostedEngine...\")\n self.page.deploy_icon.click()\n self.page.wait(10)\n # MAC Address\n log.info(\"Input the VM MAC Address...\")\n self.page.mac_address.clear()\n self.page.wait(2)\n self.page.mac_address.send_keys(self._config['he_vm_mac'])\n self.page.wait(1)\n # VM hostname\n log.info(\"input the Engine hostname...\")\n self.page.engine_hostname.clear()\n self.page.wait(1)\n self.page.engine_hostname.send_keys(self._config['he_vm_fqdn'].split(\".\")[0])\n self.page.wait(1)\n # Domain name\n log.info(\"Input the domain name...\") \n self.page.domain_name.clear()\n self.page.wait(1)\n self.page.domain_name.send_keys(self._config['he_vm_domain'])\n self.page.wait(1)\n # VM root password\n log.info(\"Input the VM root password...\")\n self.page.passwd[0].send_keys(self._config['he_vm_password'])\n self.page.wait(1)\n self.page.passwd[1].send_keys(self._config['he_vm_password'])\n self.page.wait(1)\n self.page.next_button.click()\n self.page.wait(5)\n\n # Engine page\n log.info(\"Input the Engine admin password...\")\n self.page.passwd[2].send_keys(self._config['engine_password'])\n self.page.wait(1)\n self.page.passwd[3].send_keys(self._config['engine_password'])\n self.page.wait(1)\n self.page.next_button.click()\n self.page.wait(5)\n\n # Storage page\n log.info(\"Input the HE-VM Storage path\")\n nfs_path = self._config['nfs_ip'] + ':' +self._config['he_install_nfs']\n self.page.nfs_path.send_keys(nfs_path)\n self.page.wait(1)\n self.page.next_button.click()\n self.page.wait(5)\n\n # Network page\n log.info(\"Configure the Network page...\")\n self.page.next_button.click()\n self.page.wait(5)\n\n # Review page\n log.info(\"Review all the configure about HostedEngine...\")\n self.page.deploy_button.click()\n self.page.wait(50)\n log.info(\"Select the appliance...\")\n self.page.default_button[2].click()\n\n self.page.wait(1400)\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_he_install(self):\n true, false = True, False\n vm_status = {'detail': 'Up', 'health': 'good', 'vm': 'up'}\n log.info(\"Checking HostedEngine install...\")\n try:\n self._he_install()\n ret = self.run_cmd(\"hosted-engine --check-deployed\")\n ret_st = self.run_cmd(\"hosted-engine --vm-status --json\")\n \n if ret[0] == True:\n if json.loads(ret_st[1])['1']['engine-status'] == vm_status:\n log.info(\"HE is deployed on %s and HE-VM is up\" % self.host_string)\n else:\n log.info(\"HE is deployed on %s but HE-VM is not up\" % self.host_string)\n return False\n else:\n log.error(\"HE is not deployed on %s\" % self.host_string)\n return False\n\n ret_log = self.run_cmd(\"find /var/log -type f |grep ovirt-hosted-engine-setup-.*.log\")\n if ret_log[0] == True:\n log.info(\"Hosted Engine setup log found\")\n else:\n log.error(\"No hosted engine setup log found\")\n return False\n\n he_res = self.run_cmd(\"grep 'Hosted Engine successfully deployed' %s\" % ret_log[1])\n if he_res[0] == True:\n log.info(\"Found the successfully message in the setup log %s\" % he_res[1])\n else:\n log.error(\"Not found the succeddfully message in the setup log %s\" % he_res[1])\n return False\n\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_he_hint(self):\n log.info(\"Check the local maintenance on one host hint...\")\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n self.page.check_local_maintenance_hint()\n self.page.wait(5)\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_engine_status(self):\n log.info(\"Check the engine status\")\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n self.page.check_engine_status()\n self.page.wait(5)\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_vm_status(self):\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n log.info(\"Check he running on the host...\")\n self.page.check_he_running_on_host(self.run_cmd(\"hostname\")[1])\n self.page.wait(2)\n log.info(\"Check HE-VM status...\")\n self.page.check_vm_status()\n self.page.wait(5)\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_no_password_saved(self):\n log.info(\"Checking no password saved in the log file...\")\n try:\n ret_log = self.run_cmd(\"find /var/log -type f |grep ovirt-hosted-engine-setup-.*.log\")\n output_engine_passwd = self.run_cmd(\"grep 'adminPassword=str' %s |awk -F ':' '{printf $5}'\" % ret_log[1])\n output_root_passwd = self.run_cmd(\"grep 'cloudinitRootPwd=str' %s |awk -F ':' '{printf $5}'\" % ret_log[1])\n if self._config['engine_password'] not in output_engine_passwd[1] and self._config['he_vm_password'] not in output_root_passwd:\n log.info(\"There is no engine admin or root password saved in the log file...\")\n return True\n else:\n log.info(\"Found the password text saved in the log file...\")\n return False\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_no_large_messages(self):\n log.info(\"Check if there are a large number of redundant log generation in /var/log/messages.\")\n size1 = self.run_cmd(\"ls -lnt /var/log/messages | awk '{print $5}'\")\n time.sleep(10)\n size2 = self.run_cmd(\"ls -lnt /var/log/messages | awk '{print $5}'\")\n if int(size2[1]) - int(size1[1]) > 200:\n log.info(\"There are a large redundant log generation in /var/log/messages\")\n return False\n else:\n log.info(\"There are no large redundant log generation in /var/log/messages\")\n return True\n\n def check_additional_host(self):\n log.info(\"Check the additional host in the cluster as HostedEngine deployment...\")\n \n self._clean_nfs_storage(self._config['he_data_nfs'])\n rhvm_fqdn = self._config['he_vm_fqdn']\n host_name = self.run_cmd(\"hostname\")[1]\n rhvm = RhevmAction(rhvm_fqdn, \"admin\", self._config['engine_password'])\n\n try:\n log.info(\"Add the HE first data domain... \")\n rhvm.add_plain_storage_domain(self._config['sd_name'], \"data\", \"nfs\", self._config['nfs_ip'], self._config['he_data_nfs'], host_name)\n time.sleep(100)\n\n log.info(\"Attach the data storage to the datacenter...\")\n rhvm.attach_sd_to_datacenter(self._config['sd_name'], \"Default\")\n time.sleep(15)\n \n log.info(\"Add the additional host to the cluster...\")\n rhvm.add_host(self._config['second_host'], self._config['second_vm_fqdn'], self._config['second_password'], \"Default\", True)\n self._wait_host_status(rhvm, self._config['second_vm_fqdn'], 'up')\n time.sleep(10)\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_put_local_maintenance(self):\n log.info(\"Check put the host to local maintenance...\")\n try:\n \n self.page.check_three_buttons()\n self.page.wait(5)\n self.page.put_host_to_local_maintenance()\n self.page.wait(10)\n\n self.page.check_host_in_local_maintenance()\n\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_migrate_he(self):\n log.info(\"Check whether he on additional host successfully...\")\n try:\n if self.page.check_host_not_in_local_maintenance == True:\n self.check_put_local_maintenance()\n\n time.sleep(10)\n self.page.check_vm_on_additional_host()\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_remove_from_maintenance(self):\n log.info(\"Check remove this host from maintenance...\")\n try:\n self.page.wait(2)\n self.page.remove_host_from_local_maintenance()\n self.page.wait(5)\n self.page.check_host_not_in_local_maintenance()\n self.page.check_cluster_not_in_global_maintenance()\n self.page.wait(2)\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_put_global_maintenance(self):\n log.info(\"Check the cluster in the global maintenance...\")\n try:\n self.page.put_cluster_to_global_maintenance()\n self.page.wait(6)\n self.page.check_cluster_in_global_maintenance()\n self.page.wait(5)\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_he_clean(self):\n log.info(\"Check the function to clean he env is OK, then you can redeploy the HE...\")\n try:\n local_path = os.path.join(PROJECT_ROOT, 'utils', 'clean_he_env.py')\n self.put_remote_file(local_path, \"/root/clean_he_env.py\")\n self.run_cmd(\"python /root/clean_he_env.py\")\n time.sleep(10)\n state = \"You must run deploy first\"\n if self.run_cmd(\"hosted-engine --vm-status\")[1] == state:\n log.info(\"Clean the he env is OK...\")\n return True\n else:\n return False\n\n except Exception as e:\n log.exception(e)\n return False\n return True\n\n def check_he_redeploy(self):\n log.info(\"Check trying to redeploy HostedEngine on the host...\")\n try:\n self._driver.refresh()\n self.page.wait(5)\n self.check_he_install()\n except Exception as e:\n log.exception(e)\n return False\n return True\n","sub_path":"cases/checks/test_he_deployment.py","file_name":"test_he_deployment.py","file_ext":"py","file_size_in_byte":16167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"351628591","text":"import nltk\r\nfrom nltk.corpus import senseval\r\nfrom nltk.corpus import wordnet as wn\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\nfrom nltk.wsd import lesk\r\nfrom nltk.stem.porter import PorterStemmer\r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score\r\nfrom gensim.models import Word2Vec\r\nfrom gensim.test.utils import common_texts, get_tmpfile\r\nfrom gensim.scripts.glove2word2vec import glove2word2vec\r\nfrom gensim.models.keyedvectors import KeyedVectors\r\n\r\nSV_SENSE_MAP = {\r\n\t\"HARD1\": [\"difficult.a.01\"], # not easy, requiring great physical or mental\r\n\t\"HARD2\": [\"hard.a.02\"], # dispassionate\r\n\t\"HARD3\": [\"hard.a.03\"], # resisting weight or pressure\r\n\t\"interest_1\": [\"interest.n.01\"], # readiness to give attention\r\n\t\"interest_2\": [\"interest.n.03\"], # quality of causing attention to be given to\r\n\t\"interest_3\": [\"pastime.n.01\"], # activity, etc. that one gives attention to\r\n\t\"interest_4\": [\"sake.n.01\"], # advantage, advancement or favor\r\n\t\"interest_5\": [\"interest.n.05\"], # a share in a company or business\r\n\t\"interest_6\": [\"interest.n.04\"], # money paid for the use of money\r\n\t\"cord\": [\"line.n.18\"], # something (as a cord or rope) that is long and thin and flexible\r\n\t\"formation\": [\"line.n.01\"], # a formation of people or things one beside another\r\n\t\"text\": [\"line.n.05\"], # text consisting of a row of words written across a page or computer screen\r\n\t\"phone\": [\"telephone_line.n.02\"], # a telephone connection\r\n\t\"product\": [\"line.n.22\"], # a particular kind of product or merchandise\r\n\t\"division\": [\"line.n.29\"], # a conceptual separation or distinction\r\n\t\"SERVE12\": [\"serve.v.02\"], # do duty or hold offices; serve in a specific function\r\n\t\"SERVE10\": [\"serve.v.06\"], # provide (usually but not necessarily food)\r\n\t\"SERVE2\": [\"serve.v.01\"], # serve a purpose, role, or function\r\n\t\"SERVE6\": [\"service.v.01\"] # be used by; as of a utility\r\n}\r\n#https://stackoverflow.com/questions/16381218/how-do-i-get-the-definition-for-a-sense-in-nltks-senseval-module\r\n\r\n\r\n#glove2word2vec(glove_input_file=\"glove.6B/glove.6B.300d.txt\", word2vec_output_file=\"gensim_glove_vectors.txt\")\r\n#model = KeyedVectors.load_word2vec_format(\"gensim_glove_vectors.txt\", binary=False)\r\n\r\n\r\ndef _get_name(sense):\r\n\t\"\"\"Using regular expressions extract the name from sense.\r\n\tE.g. 'difficult.a.01' -> 'difficult' \"\"\"\r\n\treturn re.match(r'[a-z]+', sense._name).group()\r\n\r\n\r\ndef _create_features(target_word):\r\n\tfeatures = {}\r\n\tfor sense in wn.synsets(target_word):\r\n\t\tsense_name = _get_name(sense)\r\n\t\tfeatures[sense._name] = []\r\n\t\tfeatures[sense._name].extend(sense.definition().split())\r\n\r\n\r\n\t\tfor definition_word in sense.definition().split():\r\n\t\t\tfor definition_sense in wn.synsets(definition_word):\r\n\t\t\t\tfor definition_sense_hypernym in definition_sense.hypernyms():\r\n\t\t\t\t\tfeatures[sense._name].append(_get_name(definition_sense_hypernym))\r\n\t\t\t\tfor definition_sense_hyponym in definition_sense.hyponyms():\r\n\t\t\t\t\tfeatures[sense._name].append(_get_name(definition_sense_hyponym))\r\n\r\n\t\t\t\tfor definition_sense_member_holonym in definition_sense.member_holonyms():\r\n\t\t\t\t\tfeatures[sense._name].append(_get_name(definition_sense_member_holonym))\r\n\r\n\t\t\t\tfor definition_sense_member_meronym in definition_sense.member_meronyms():\r\n\t\t\t\t\tfeatures[sense._name].append(_get_name(definition_sense_member_meronym))\r\n\r\n\t\t\t\tfor definition_sense_part_holonym in definition_sense.part_meronyms():\r\n\t\t\t\t\tfeatures[sense._name].append(_get_name(definition_sense_part_holonym))\r\n\r\n\r\n\t\tfor example_sentence in sense.examples():\r\n\t\t\tfeatures[sense._name].extend(example_sentence.split())\r\n\t\t\tfor word in example_sentence.split():\r\n\t\t\t\tfor example_sense in wn.synsets(word):\r\n\r\n\t\t\t\t\tfor example_sense_hypernym in example_sense.hypernyms():\r\n\t\t\t\t\t\tfeatures[sense._name].append(_get_name(example_sense_hypernym))\r\n\t\t\t\t\tfor example_sense_hyponym in example_sense.hyponyms():\r\n\t\t\t\t\t\tfeatures[sense._name].append(_get_name(example_sense_hyponym))\r\n\t\t\t\t\tfor example_sense_member_holonym in example_sense.member_holonyms():\r\n\t\t\t\t\t\tfeatures[sense._name].append(_get_name(example_sense_member_holonym))\r\n\r\n\t\t\t\t\tfor example_sense_member_meronym in example_sense.member_meronyms():\r\n\t\t\t\t\t\tfeatures[sense._name].append(_get_name(example_sense_member_meronym))\r\n\r\n\t\t\t\t\tfor example_sense_part_holonym in example_sense.part_meronyms():\r\n\t\t\t\t\t\tfeatures[sense._name].append(_get_name(example_sense_part_holonym))\r\n\r\n\r\n\t\tfeatures[sense._name] = set(features[sense._name])\r\n\r\n\treturn features\r\n\r\n\r\ndef _sense_for_sentence(sent, features_of_possible_senses, senses_unique, model):\r\n\t\"\"\"\r\n\tCalculate the cosine similarity between the given sentence and the different senses\r\n\tAnd return the sense with the highest number of intersections\r\n\t\"\"\"\r\n\tsimilarities = []\r\n\tfor feature_set in features_of_possible_senses:\r\n\t\tsimilarity = []\r\n\t\tfor w1 in feature_set:\r\n\t\t\ttemp = np.array([model.similarity(w1, w2) for w2 in sent])\r\n\t\t\t#similarity.append(temp.mean())\r\n\t\t\ttemp = np.where(temp < 0.85, 0, 1)\r\n\t\t\tsimilarity.append(temp.sum())\r\n\t\tsimilarities.append(np.mean(similarity))\r\n\treturn senses_unique[np.argmax(similarities)]\r\n\r\n\r\ndef word_embeddings(target_word, remove_stop_words=True):\r\n\tinstances = senseval.instances(target_word + \".pos\")\r\n\r\n\ttext = [[re.sub(r'[^\\w\\s]', '', w[0]) for w in instance.context] for instance in instances]\r\n\tmodel = Word2Vec(text, size=50, window=4, min_count=1, workers=4)\r\n\r\n\tstop_words = set(stopwords.words('english'))\r\n\r\n\tfeatures = _create_features(target_word)\r\n\r\n\ty = np.array([instance.senses[0] for instance in instances])\r\n\tsenses_unique = pd.Series(y).unique().tolist()\r\n\r\n\tporter_stemmer = PorterStemmer()\r\n\t#Use only the features from the senses in the dataset\r\n\tfeatures_of_possible_senses = [list(features[SV_SENSE_MAP[sense][0]])for sense in senses_unique]\r\n\t#Stem the feature words\r\n\t#features_of_possible_senses = [[porter_stemmer.stem(w) for w in feature_set] for feature_set in features_of_possible_senses]\r\n\t#Remove the words not in the model and stop words\r\n\tif remove_stop_words:\r\n\t\tfeatures_of_possible_senses = [[w for w in feature_set if w not in stop_words and w in model.wv.vocab] for feature_set in features_of_possible_senses]\r\n\telse:\r\n\t\tfeatures_of_possible_senses = [[w for w in feature_set if w in model.wv.vocab] for feature_set in features_of_possible_senses]\r\n\r\n\tresults = []\r\n\tfor i, instance in enumerate(instances):\r\n\t\t#sent = [porter_stemmer.stem(w[0]) for w in instance.context]\r\n\t\t#Remove punctuation\r\n\t\tsent = (re.sub(r'[^\\w\\s]', '', w[0]) for w in instance.context)\r\n\t\t#Remove stop words and words not in the model\r\n\t\tif remove_stop_words:\r\n\t\t\tsent = [w for w in sent if w not in stop_words and w in model.wv.vocab]\r\n\t\telse:\r\n\t\t\tsent = [w for w in sent if w in model.wv.vocab]\r\n\t\tresults.append(_sense_for_sentence(sent, features_of_possible_senses, senses_unique, model))\r\n\t\t\r\n\t\tif i % 1000 == 0: print(target_word, i)\r\n\r\n\tX = np.array(results)\r\n\r\n\r\n\taccuracy_improved_lesk = (X == y).mean()\r\n\tprint(accuracy_improved_lesk)\r\n\r\n\tfrom sklearn.metrics import confusion_matrix\r\n\tprint(confusion_matrix(X, y))\r\n\r\n\tprint(\"Precision {}\\nRecall {}\".format(precision_score(y, X, average=\"macro\"),\r\n\t\trecall_score(y, X, average=\"macro\")))\r\n\r\n\treturn [accuracy_improved_lesk, precision_score(y, X, average=\"macro\"), recall_score(y, X, average=\"macro\"),\r\n\t\t\tf1_score(y, X, average=\"macro\")]\r\n\r\n\r\nfinal_results = []\r\nfor word in [\"hard\", \"interest\", \"line\", \"serve\"]:\r\n\tfinal_results.append(word_embeddings(word))\r\n\r\nfinal_results = np.array(final_results)\r\nprint(\"Final precision: {}\".format(final_results[:, 3].mean()))","sub_path":"task5_our_model.py","file_name":"task5_our_model.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"314127094","text":"\n\nfrom xai.brain.wordbase.nouns._december import _DECEMBER\n\n#calss header\nclass _DECEMBERS(_DECEMBER, ):\n\tdef __init__(self,): \n\t\t_DECEMBER.__init__(self)\n\t\tself.name = \"DECEMBERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"december\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_decembers.py","file_name":"_decembers.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"342720512","text":"from tweepy import OAuthHandler\nimport tweepy\nimport time\nimport couchdb\nimport sys\nfrom process_tweet import get_sentiment\nimport csv\nimport ast\nfrom geopy.distance import vincenty\n\ns = 0\n\ndef get_geos():\n geos = []\n with open('postcodes.csv', newline='') as csvfile:\n areas = csv.DictReader(csvfile, delimiter=',', quotechar='\"')\n for area in areas:\n coords = ast.literal_eval(area['boundedBy'])\n center = ((coords[0] + coords[2]) / 2, (coords[1] + coords[3]) / 2)\n dist = vincenty(center, (coords[0], coords[1])).kilometers\n geo = \"%s,%s,%skm\" % (center[1], center[0], dist)\n geos.append(geo)\n return geos\n\ngeos = get_geos()\n\nserver1 = {\n # ds_amores credentials \"Second Australian Cities\"\n 'credentials': {\n 'ckey': \"CzmVaiTpCs7UdqeH28rnzDaZu\",\n 'csecret': \"6q3Isi4pm8Y1NyPuW0sGKIp0HiTu1KYYACpUeedgBeUsFrtL52\",\n 'atoken': \"218523248-buX14uhApOb7Ei8rqPvicb6wdZd9wpM7w7suutcQ\",\n 'asecret': \"ZDlkKSAOIbWipSzLpEAFXbllBavJyKis9hjwrctehMPwP\"\n },\n 'geos': geos # left melb\n}\n\nserver2 = {\n # Angela's credentials\n 'credentials': {\n 'ckey': \"IswkdNrhcsauRsBK8esfX1ko3\",\n 'csecret': \"8lYmGMCaHA9rRZI6t81v5dcCjIswrqYyIo2nwIXiHlUdky9EoT\",\n 'atoken': \"3186106424-ivw3HRIQRMHHgPMbkkVmqdwlaNdGWTyF1wRWz2y\",\n 'asecret': \"bUU6bwSpqIxPHdrSs235s74FJp7jzWqNQpOv1gYe6wAEu\"\n },\n 'geos': geos # center melb\n}\n\nserver3 = {\n # Janice's credentials\n 'credentials': {\n 'ckey': \"H9TfC3iITRDlxCNUY5wqvpPNy\",\n 'csecret': \"QHSQ00xnWURf0lKNPyZ0gIEimjI3eWigYG6yid3r4ML5V4ZEbQ\",\n 'atoken': \"2843502054-TmJMKXmMAttbaAwkEPpM2WQ9Blsv8vylwe8Quu9\",\n 'asecret': \"0Bt1sddXkhNH727EUUj94OsOrCnuwNS9F8FIJWKtRaeRB\"\n },\n 'geos': geos # right melb\n}\n\nservers = [\n server1,\n server2,\n server3\n]\n\nserver = servers[s]\n\ndb_name = 'aus_analytics'\n\ncouch = couchdb.Server('http://115.146.93.17:5984')\ndb = couch[db_name]\n\nclass TweetProcessor:\n\n def __init__(self, keyword=None):\n self.keyword = keyword\n\n def process_tweet(self, status):\n json_tweet = status._json\n json_tweet[\"_id\"] = json_tweet[\"id_str\"]\n json_tweet[\"sentiment\"] = get_sentiment(json_tweet[\"text\"])\n if self.keyword == '':\n json_tweet[\"type\"] = 'tweet'\n else:\n json_tweet[\"type\"] = 'tweet_search'\n json_tweet[\"matched\"] = self.keyword\n while True:\n try:\n db.save(json_tweet)\n return True\n except couchdb.http.ResourceConflict:\n return False\n except couchdb.http.ServerError as e:\n print(\"Server error %s. Sleeping for 60 seconds\" % e)\n sys.stdout.flush()\n time.sleep(60)\n\n def process_tweets(self, results):\n last_id = int(results[0].id_str)\n processed = 0\n for res in results:\n if tp.process_tweet(res):\n processed += 1\n if int(res.id_str) < last_id:\n last_id = int(res.id_str)\n return last_id, processed\n\n\nauth = OAuthHandler(server['credentials']['ckey'], server['credentials']['csecret'])\nauth.set_access_token(server['credentials']['atoken'], server['credentials']['asecret'])\napi = tweepy.API(auth)\n\nprint(\"start\")\nsys.stdout.flush()\n\nterms_files = ['terms1.txt', 'terms2.txt', 'terms3.txt', 'terms4.txt', 'terms5.txt', 'terms6.txt']\n\ni = s\n\nwhile True:\n\n file_name = terms_files[i % len(terms_files)]\n\n f = open(file_name, 'r').read()\n\n keywords = []\n\n for kw in f.split('\\n'):\n keywords.append(kw.strip())\n\n for kw in keywords:\n tp = TweetProcessor(kw)\n for geo in server['geos']:\n count = 0\n while True:\n try:\n results = api.search(kw, geocode=geo, lang='en', count=100)\n break\n except tweepy.error.RateLimitError:\n print(\"1. Rate limit error: sleeping for 1000 seconds\")\n sys.stdout.flush()\n time.sleep(1000)\n except KeyboardInterrupt:\n exit()\n except Exception as e:\n print(\"1. Unknown Error %s - Sleeping for 1000 seconds \" % e)\n sys.stdout.flush()\n time.sleep(1000)\n\n while len(results) > 0:\n last_id, processed = tp.process_tweets(results)\n count += processed\n sys.stdout.flush()\n while True:\n try:\n results = api.search(kw, geocode=geo, lang='en', count=100, max_id=last_id)\n break\n except tweepy.error.RateLimitError:\n print(\"2. Rate limit error: sleeping for 1000 seconds\")\n sys.stdout.flush()\n time.sleep(1000)\n except KeyboardInterrupt:\n exit()\n except Exception as e:\n print(\"2. Unknown Error %s - Sleeping for 1000 seconds\" % e)\n sys.stdout.flush()\n time.sleep(1000)\n results.pop(0)\n print(\"Collected %d for word '%s' in geo '%s'\" % (count, kw, geo))\n sys.stdout.flush()\n print(\"Collected for every keyword in %s, sleeping for 4000 secs\" % file_name)\n sys.stdout.flush()\n time.sleep(4000)\n print(\"Woke up! keep collecting...\")\n sys.stdout.flush()\n i += 1","sub_path":"存档/欣然/past example/tweet_harv_search.py","file_name":"tweet_harv_search.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"422574325","text":"#!/usr/bin/python\n\n\"\"\"\nModified hack to use an external script to process the images in a C++/CUDA tool\nfor faster image processing as the current implementation of KSM and Meng+Sat have been\nslow and working unreliable when executed in the threaded environment.\n\"\"\"\n\n\nimport os\nimport sys\nimport time\nimport argparse\nfrom shutil import copyfile,rmtree\nimport json\nimport subprocess\nimport pathlib\nimport hashlib\nimport tempfile\n\n# from multiprocessing import sharedctypes\nfrom multiprocessing import Process, current_process, Queue\n\nimport numpy as np\nimport colour\n\nimport colorspaces\nimport saturation\nimport plotting\n\n\nparser = argparse.ArgumentParser(description='Process images and create a dataset for the html saturation survey viewer')\nparser.add_argument('--image-filelist', '-I', dest='image_filelist', help='Input image file list (json) - see image-set.json as an example')\nparser.add_argument('--output-folder', '-O', dest='output_folder', help='Output folder for the full survey')\nparser.add_argument('--plott-colors', '-P', dest='plot_colors', help='File with the primaries to calculate')\nparser.add_argument('--saturation-models', '-S', dest='sat_model', help='Saturation models to calculate')\n# parser.add_argument('--saturation-models', '-S', dest='sat_model', help='Saturation models to calculate')\nparser.add_argument('--force', '-f', dest='force_overwrite', action='store_true', help='Force overwrite')\nparser.add_argument('--no-ref', '-r', dest='no_ref', action='store_true', help='Don\\'t create reference images')\n# parser.add_argument('--no-data-js', '-j', dest='no_data_js', action='store_true', help='Don\\'t create data.js overview file')\nparser.add_argument('--batch-file', '-b', dest='create_batch_file', action='store_true', help='Create batch file')\nparser.add_argument('--single-thread', '-s', dest='single_thread', action='store_true', help='Single threaded execution')\nparser.add_argument('--threads', '-T', dest='threads', type=int, default=-1, help='Number of threads (-1 == auto)')\nparser.add_argument('--ignore-skeleton', dest='ignore_skeleton', action='store_true', help='Don\\'t create skeleton')\nparser.set_defaults(no_primaries=False)\n\nargs = vars(parser.parse_args())\n\nif not args['output_folder'] or (os.path.exists(args['output_folder']) and not args['force_overwrite']):\n print('Invalid output folder', args['output_folder'])\n print('Folder exists or not given')\n sys.exit(1)\n\noutput_folder = args['output_folder']\n\n\nsaturation_steps = np.linspace(0.0, 2.0, num=9)\n# saturation_steps = np.array([0.25])\nmax_num_scatter = 200000\n# per image every np.ceil(np.sqrt(pixel / max_num_scatter)) th pixel per line/row\n\ntransfer_curves = [ ('sRGB - pure OETF', 'srgb_oetf'),\n ('sRGB - tonemapped', 'srgb_hermite'),\n ('sRGB - tonemapped (RGB)', 'srgb_hermite_rgb'),\n ('Display P3 - pure OETF', 'disp3_oetf'),\n ('Display P3 - tonemapped', 'disp3_hermite'),\n ('Display P3 - tonemapped (RGB)', 'disp3_hermite_rgb')\n ]\nsat_models = [ ('YCbCr ITU-R BT.709', 'bt709'),\n ('YCbCr ITU-R BT.2020', 'bt2020'),\n ('YCbCr ITU-R BT.2100 (const luminance / ICtCp)', 'bt2100const'),\n ('JzAzBz', 'jzazbz'),\n ('ASC-CDL (Rec.709, no clamp for values > 1.0)', 'asccdl')\n ]\nsat_models = [ ('KSM (Luma preserve 0.00)', 'ksm0.00'),\n# ('KSM (Luma preserve 0.25)', 'ksm0.25'),\n ('KSM (Luma preserve 0.50)', 'ksm0.50'),\n# ('KSM (Luma preserve 0.75)', 'ksm0.75'),\n ('KSM (Luma preserve 1.00)', 'ksm1.00') ]\nsat_models += [ ('Meng+Sat (Luma preserve 0.00)', 'meng0.00'),\n ('Meng+Sat (Luma preserve 0.50)', 'meng0.50'),\n ('Meng+Sat (Luma preserve 1.00)', 'meng1.00') ]\n\nif args['sat_model']:\n sat_models = []\n if os.path.exists(args['sat_model']):\n fp = open(args['sat_model'], 'r')\n sat_models = json.loads(fp.read())\n fp.close()\n else:\n sat_models = json.loads(args['sat_model'])\n\n\nimages = []\nif args['image_filelist'] and os.path.exists(args['image_filelist']):\n fp = open(args['image_filelist'], 'r')\n images = json.loads(fp.read())\n fp.close()\nelif args['image_filelist']:\n images = json.loads(args['image_filelist'])\n\n\ngraphs = []\nif args['plot_colors'] and os.path.exists(args['plot_colors']):\n fp = open(args['plot_colors'], 'r')\n graphs = json.loads(fp.read())\n fp.close()\nelif args['plot_colors']:\n graphs = json.loads(args['plot_colors'])\n\n\nbasepath = os.path.dirname(os.path.realpath(__file__))\nbasepath = os.path.abspath(os.path.join(basepath, '..'))\n\np3_icc_path = os.path.join(basepath, 'external/compact-icc-profiles/profiles/DisplayP3Compat-v2-magic.icc')\n\noutput_json = {}\n\noutput_json['color_spaces'] = []\nfor tc in transfer_curves:\n output_json['color_spaces'].append({'text': tc[0], 'mode': 'images', 'path': tc[1]})\noutput_json['color_spaces'].append({'text': 'Graphs', 'mode': 'graphs', 'path': 'graphs'})\n\noutput_json['saturation'] = list(saturation_steps)\n\noutput_json['color_models'] = []\nfor sm in sat_models:\n output_json['color_models'].append({'text': sm[0], 'path': sm[1]})\n\noutput_json['image_sets'] = {}\noutput_json['image_sets']['default'] = []\noutput_json['image_sets']['graphs'] = []\n\n\nif args['create_batch_file']:\n print('Batch file -------')\n for img in images:\n ref = False\n for sat in sat_models:\n ref_str = '--no-ref'\n if not ref:\n ref_str = ''\n ref = True\n print('python src/create-image-set.py -s --ignore-skeleton -f -O \\'' + args['output_folder']\n + '\\' -I \\'', json.dumps([img]), '\\' -S \\'', json.dumps([sat]), '\\'', ref_str)\n for graph in graphs:\n print('python src/create-image-set.py -s --ignore-skeleton -f -O \\'' + args['output_folder']\n + '\\' -P \\'', json.dumps([graph]), '\\' -S \\'', json.dumps(sat_models), '\\'')\n print('------')\n sys.exit(0)\n\n\nif args['force_overwrite'] and os.path.exists(args['output_folder']) and not args['ignore_skeleton']:\n print('Delete existing folder ...')\n rmtree(args['output_folder'])\nif not args['ignore_skeleton']:\n copyfiles = ['css/base.css', 'js/vue.js', 'js/vue.min.js', 'index.html']\n pathlib.Path(os.path.join(output_folder, 'js')).mkdir(parents=True, exist_ok=True)\n pathlib.Path(os.path.join(output_folder, 'css')).mkdir(parents=True, exist_ok=True)\n pathlib.Path(os.path.join(output_folder, 'data')).mkdir(parents=True, exist_ok=True)\n for fname in copyfiles:\n copyfile(os.path.join(basepath, 'html', fname), os.path.join(output_folder, fname))\n\n\n\ndef tonemap(img_data, base_path):\n for tc in transfer_curves:\n opath = base_path.replace('$oetf$', tc[1])\n\n folder = os.path.dirname(opath)\n pathlib.Path(folder).mkdir(parents=True, exist_ok=True)\n\n data = colorspaces.outputColor(img_data, tc[1])\n\n write_path = opath\n p3_profile = False\n depth='uint8'\n if 'disp3' in tc[1].lower():\n p3_profile = True\n write_path = tempfile.mktemp() + '.png'\n depth='uint16'\n\n colour.write_image(data, write_path, bit_depth=depth, method='OpenImageIO')\n\n if p3_profile:\n # only add profile, no conversion. Therefore in and out profile are the same\n cmd = ['convert', write_path, '-profile', p3_icc_path, '-profile', p3_icc_path, opath]\n imagick = subprocess.run(cmd)\n if imagick.returncode != 0:\n print('Failed to convert ' + opath + ' ', params)\n os.remove(write_path)\n\ndef saturate(img_data, base_path, proc_sat_models = sat_models):\n # for model in proc_sat_models:\n # model_path = base_path.replace('$model$', model[1])\n # for sat in saturation_steps:\n # opath = model_path.replace('$sat$', '%.3f' % (sat))\n # data = saturation.saturate(img_data, sat, model[1])\n # tonemap(data, opath)\n # data = None\n modes = []\n vals = []\n for sat in sat_models:\n modes.append(sat[1])\n for sat in saturation_steps:\n vals.append(str(sat))\n\n tpath = tempfile.mktemp() + '.npy'\n respath = tempfile.mktemp()\n np.save(tpath, img_data)\n\n cmd = [basepath + '/process_ict.bash', tpath, ':'.join(modes), ':'.join(vals), respath]\n ext_proc = subprocess.run(cmd, cwd=basepath)\n os.remove(tpath)\n if ext_proc.returncode != 0:\n print('Failed to externally process...')\n print('')\n foo = input('Kill process or press enter to continue...\\n')\n\n # foo = input('Halting for user input ...\\n')\n\n for model in modes:\n model_path = base_path.replace('$model$', model)\n for sat in vals:\n img_path = respath + '_' + model + '_' + sat + '.npy'\n processed_img = np.load(img_path)\n os.remove(img_path)\n sat_f = float(sat)\n opath = model_path.replace('$sat$', '%.3f' % (sat_f))\n tonemap(processed_img, opath)\n\ndef processImages(proc_images = images, proc_sat_models = sat_models, thread_mode=False):\n output_filename = os.path.join(output_folder, 'data/img_$image_hash$/$model$/$oetf$/img_$sat$.png')\n for img in proc_images:\n print('Working on image', img[0], '...')\n\n md5hash = hashlib.md5(img[0].encode('utf-8')).hexdigest()\n base_path = output_filename.replace('$image_hash$', md5hash)\n\n if not thread_mode:\n output_json['image_sets']['default'].append({ 'text': img[1],\n 'path': 'img_' + md5hash })\n\n color_space = ''\n exposure = 0\n if len(img) > 2:\n color_space = img[2]\n if len(img) > 3:\n exposure = img[3]\n data = colorspaces.importImage(img[0], color_space)\n if data is None:\n print('Image ', img[0], ' invalid!')\n continue\n data = data * np.power(2.0, exposure)\n\n if len(proc_sat_models) > 0:\n saturate(data, base_path, proc_sat_models)\n\n if len(proc_sat_models) == 0 or len(proc_sat_models) == sat_models:\n if not args['no_ref']:\n ref_path = base_path.replace('$model$', 'ref').replace('$sat$', 'ref')\n tonemap(data, ref_path)\n\n if thread_mode:\n return ('default', { 'text': img[1], 'path': 'img_' + md5hash })\n return (None, )\n\n\ndef createGraphs(data_set, base_path):\n for model in sat_models:\n model_path = base_path.replace('$model$', model[1])\n pathlib.Path(model_path).mkdir(parents=True, exist_ok=True)\n\n lines = []\n for i in range(data_set.shape[0]):\n saturated = np.zeros((saturation_steps.shape[0],) + data_set.shape[1:])\n for j in range(saturation_steps.shape[0]):\n saturated[j, ...] = saturation.saturate(data_set[i,...], saturation_steps[j], model[1])\n lines.append(saturated)\n\n bpath = os.path.join(model_path, 'graph_{}.png')\n plotting.plotAll(lines=lines, hue_lines='hung', base_filename=bpath, save_only=True)\n\ndef processGraphs(proc_graphs = graphs, thread_mode=False):\n output_filepath = os.path.join(output_folder, 'data/graph_$graph_path$/$model$/')\n \n for graph in proc_graphs:\n print('Working on graph', graph[0], '...')\n\n md5hash = hashlib.md5(graph[0].encode('utf-8')).hexdigest()\n if not thread_mode:\n output_json['image_sets']['graphs'].append({ 'text': graph[0],\n 'path': 'graph_' + md5hash })\n opath = output_filepath.replace('$graph_path$', md5hash)\n\n data = graph[1]\n if isinstance(data, str):\n # check if file exists and open as image\n nth_pixel = max_num_scatter\n color_space = ''\n if len(graph) > 2:\n color_space = graph[2]\n if len(graph) > 3:\n nth_pixel = graph[3]\n img = colorspaces.importImage(graph[1], color_space)\n if img is None:\n print('Image ', graph[1], ' invalid!')\n continue\n else:\n data = img.reshape(img.shape[0] * img.shape[1], img.shape[2])[::nth_pixel, :]\n else:\n data = np.array(data)\n if len(graph) > 2:\n data = colorspaces.convertColor(data, graph[2])\n\n createGraphs(data, opath)\n if thread_mode:\n return ('graphs', { 'text': graph[0], 'path': 'graph_' + md5hash })\n\n return (None, )\n\nif args['single_thread']:\n if images:\n processImages()\n if graphs:\n processGraphs()\nelse:\n def worker(wnum, input_queue, result_queue):\n os.sched_setaffinity(0, [wnum])\n # print('Start worker:', wnum)\n while True:\n # try:\n if True:\n value = input_queue.get(block=True)\n # print('got data', value)\n if value == 'STOP':\n # print('Stopping worker', wnum)\n break\n res = (None, )\n if value[0] == 'image':\n timages = [value[1]]\n tsat_models = [value[2]]\n if value[2] is None:\n tsat_models = []\n # print('Working on image', timages, tsat_models)\n res = processImages(proc_images=timages, proc_sat_models=tsat_models, thread_mode=True)\n elif value[0] == 'graph':\n res = processGraphs([value[1]], thread_mode=True)\n else:\n print('Failed: ', value, '\\n')\n # print('Worker done with current task', wnum)\n result_queue.put(res)\n\n # except:\n # pass\n os.sched_yield()\n\n num_procs = args['threads']\n if num_procs <= 0:\n num_procs = os.cpu_count() - 3\n task_queue = Queue(3*num_procs)\n done_queue = Queue(3*num_procs)\n\n print('Running {} workers ...'.format(num_procs))\n processes = []\n for i in range(num_procs):\n processes.append(Process(target = worker,\n args = (i, task_queue, done_queue),\n name = 'worker {}'.format(i),\n daemon = True))\n processes[-1].start()\n\n todo = []\n for img in images:\n for model in sat_models:\n todo.append(('image', img, model))\n todo.append(('image', img, None))\n for graph in graphs:\n todo.append(('graph', graph))\n\n # print(todo)\n # for item in todo:\n # print(item)\n # sys.exit(1)\n\n num_sent = 0\n num_done = 0\n num_todo = len(todo)\n perc = 0\n iterator = iter(todo)\n\n # Push grid points to process and ceep count. When done send stop signal\n def print_progress(msg=None):\n msg_str = ''\n if msg is not None:\n msg_str = '['+msg+']'\n print('\\033[2K\\r{} sent, {} done, {} total ({} %) {}'.format(num_sent,\n num_done, num_todo, perc, msg_str), end='')\n\n while num_done < num_todo:\n print_progress('sending work')\n\n while num_sent < num_todo and not task_queue.full():\n nextval = next(iterator)\n task_queue.put(nextval)\n num_sent += 1\n os.sched_yield()\n\n while True:\n try:\n item = done_queue.get(block=False)\n if len(item) > 0 and (not item[0] is None):\n output_json['image_sets'][item[0]].append(item[1])\n num_done += 1\n perc = int(num_done / num_todo * 100)\n except:\n break\n time.sleep(0)\n\n print_progress()\n time.sleep(10)\n\n # Terminate workers.\n for i in range(num_procs):\n task_queue.put('STOP')\n\n for p in processes:\n p.join()\n\n print('\\n ... done')\n\n\nif not args['ignore_skeleton']:\n with open(os.path.join(output_folder, 'data/dataset.js'), 'w') as outfile:\n outfile.write('survey_data_set = ')\n outfile.write(json.dumps(output_json, indent=2))","sub_path":"src/create-image-set-external.py","file_name":"create-image-set-external.py","file_ext":"py","file_size_in_byte":16282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"18870382","text":"import torch\nimport cv2\nimport glob, os\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nnp.random.seed(130)\n\n#Import from AK\nfrom data_aug.data_aug import *\nfrom data_aug.bbox_util import *\nimport cv2\nimport pickle as pkl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom copy import deepcopy\nimport random\nfrom util_digits import *\nfrom rotate_display.util_interpret_digits import *\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(24500 , 50) \n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 24500)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x)\n\n\ndef digit_classifier(image):\n net = Net()\n # READ IMAGES\n #filename_a = \"./digit_img/digit_a.png\"\n #img_a = cv2.imread(filename_a,0)\n\n #filename_b = \"./digit_img/digit_b.png\"\n #img_b = cv2.imread(filename_b,0)\n\n #filename_sign = \"./digit_img/digit_sign.png\"\n #img_sign = cv2.imread(filename_sign,0)\n\n #filename_d = \"./digit_img/digit_d.png\"\n #img_d = cv2.imread(filename_d,0)\n #Run AK code to detect the display and cut the digit into four regiions \n\n # READ CLASSIFIERS\n classifier_digit_a = Net()\n classifier_digit_a.load_state_dict(torch.load('./classifiers/classifier_digits_a.pth'))\n classifier_digit_a.eval()\n\n classifier_digit_b = Net()\n classifier_digit_b.load_state_dict(torch.load('./classifiers/classifier_digits_b.pth'))\n classifier_digit_b.eval()\n\n\n classifier_digit_sign = Net()\n classifier_digit_sign.load_state_dict(torch.load('./classifiers/classifier_digits_sign.pth'))\n classifier_digit_sign.eval()\n\n classifier_digit_d = Net()\n classifier_digit_d.load_state_dict(torch.load('./classifiers/classifier_digits_d.pth'))\n classifier_digit_d.eval()\n\n\n images_tensors = torch.tensor(image[1])\n images_exp = images_tensors.unsqueeze_(0)\n images_exp = images_tensors.unsqueeze_(0)\n\n output_a = classifier_digit_a(images_exp.float())\n pred_a = output_a.data.max(1, keepdim=True)[1]\n\n #print('digit_superior_esquerdo: ', pred_a.item())\n\n\n images_tensors = torch.tensor(image[2])\n images_exp = images_tensors.unsqueeze_(0)\n images_exp = images_tensors.unsqueeze_(0)\n\n output_b = classifier_digit_b(images_exp.float())\n pred_b = output_b.data.max(1, keepdim=True)[1]\n #print('digit_superior_direito: ', pred_b.item())\n\n digit_upper = pred_a.item()*10 + pred_b.item()\n \n\n\n images_tensors = torch.tensor(image[3])\n images_exp = images_tensors.unsqueeze_(0)\n images_exp = images_tensors.unsqueeze_(0)\n\n output_sign = classifier_digit_sign(images_exp.float())\n pred_sign = output_sign.data.max(1, keepdim=True)[1]\n #print('digito_inferior_esquerdo: ', pred_sign.item())\n\n\n images_tensors = torch.tensor(image[4])\n images_exp = images_tensors.unsqueeze_(0)\n images_exp = images_tensors.unsqueeze_(0)\n\n output_d = classifier_digit_d(images_exp.float())\n pred_d = output_d.data.max(1, keepdim=True)[1]\n\n\n if pred_sign == 0:\n digit_down = 0*10 + pred_d.item()\n elif pred_sign == 1:\n digit_down = pred_sign.item()*10 + pred_d.item()\n elif pred_sign == 2:\n digit_down = (1*10 + pred_d.item())*(-1)\n elif pred_sign == 3:\n digit_down = -pred_d.item()\n\n\n #print('digito_inferior_direito: ', pred_d.item())\n #print(\"digit upper: %d\"% digit_upper)\n #print(\"digit down: %d\"% digit_down)\n #print(\"=========================\")\n return digit_upper, digit_down\n\n\n\n\n\ndebug_flavio_part = False # From flavio script. If true, it will generate several plots written by flavio\ndef mnist_classifier(final_image):\n\n\n #From AK Script\n template_file = './rotate_display/template_1.png' #template_1.png is the largest\n percent_template = cv2.imread(template_file,0) #keep it in memory to speedup\n #These are the important digits for training the MNIST DNNs\n num_pixels_output_image = 290 #this image will not be used, has all display\n num_pixels_each_digit = 154 #this image is the input image to the NNs\n\n\n\n #Final image will be drone image\n cropped_binary_image, cropped_image = step_all_from_camera_to_rotated_display(final_image, percent_template)\n\n all_images = extract_digits_from_display(cropped_binary_image,num_pixels_output_image,num_pixels_each_digit)\n\n if debug_flavio_part:\n cv2.imshow('all_images_1',all_images[1])\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n cv2.imshow('all_images_2',all_images[2])\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n cv2.imshow('all_images_3',all_images[3])\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n cv2.imshow('all_images_4',all_images[4])\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n cv2.imshow('cropped_binary_image',cropped_binary_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n cv2.imshow('cropped_image',cropped_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n cv2.imshow('final_image',final_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n upper_digit, down_digit = digit_classifier(all_images) \n return upper_digit, down_digit\n\n\ndef mnist_classifier_by_voting(drone_images):\n upper_digits = [] #To store all the upper digits predictions\n down_digits = [] #To store all the down digits predictions\n for image in drone_images:\n upper_digit, down_digit = mnist_classifier(image) # Do the prediction in the image\n upper_digits.append(upper_digit) #store the upper digits predictions\n down_digits.append(down_digit) #store the down predictions\n #After all the prediction, we just need to select the one who repeats more\n #Convert to numpy because it is easier to work on\n\n upper_digits_numpy = np.array(upper_digits) #Convert the list from numpy because it's easier to work on\n down_digits_numpy = np.array(down_digits) #Convert the list from numpy because it's easier to work on\n\n\n #Get all the unique values (if we have a list with [52,52,32,32], the output will be only [52,32])\n upper_digits_unique_numpy = np.unique(np.array(upper_digits)) \n down_digits_unique_numpy = np.unique(np.array(down_digits))\n \n upper_digits_votes = []\n down_digits_votes = []\n #Select the most voted digits!\n for votes in upper_digits_unique_numpy:\n upper_digits_votes.append(upper_digits_numpy[upper_digits_numpy == votes].shape[0])\n \n for votes in down_digits_unique_numpy:\n down_digits_votes.append(down_digits_numpy[down_digits_numpy == votes].shape[0])\n \n #select the most voted\n indice_of_the_most_voted_upper_digit = np.argmax(upper_digits_votes)\n indice_of_the_most_voted_down_digit = np.argmax(down_digits_votes)\n upper_digit = upper_digits_unique_numpy[indice_of_the_most_voted_upper_digit]\n down_digit = down_digits_unique_numpy[indice_of_the_most_voted_down_digit]\n return upper_digit, down_digit","sub_path":"phase_3_display_classification/aux_functions_for_classifier.py","file_name":"aux_functions_for_classifier.py","file_ext":"py","file_size_in_byte":7400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"219041366","text":"from Models import User, Message\nfrom DAO import UserDAO, MessageDAO\nfrom threading import Thread, Lock, current_thread, local\nfrom queue import Queue\nimport server_host\nimport time\nimport json\nimport socket\nimport mysql.connector as mysql\n\n\nclass Server:\n def __init__(self):\n self.database_connection = mysql.connect(\n host='localhost',\n user='root',\n password='',\n database='chitchatdb'\n )\n self.cursor = self.database_connection.cursor()\n self.userDAO = UserDAO(self.database_connection)\n self.messageDAO = MessageDAO(self.database_connection)\n self.host = server_host.host\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.bind(self.host)\n self.socket.listen(5)\n self.connected_clients = {}\n self.lock = Lock()\n self.messages_queue = Queue()\n\n def run(self):\n while True:\n print('Waiting for connection')\n client_connection, client_address = self.socket.accept()\n\n print('Client at {} connected'.format(client_address))\n Thread(target=self.handle_connection, args=(client_connection,)).start()\n\n def handle_connection(self, client_connection: socket.socket):\n thread_name = current_thread().getName()\n print('\\t{}: Started'.format(thread_name))\n\n try:\n local_data = local()\n while True:\n data = bytes()\n received = bytes()\n\n while True:\n received += client_connection.recv(4096)\n if not received:\n break\n elif len(received) < 4096:\n data += received\n break\n else:\n data += received\n\n request = json.loads(data.decode())\n\n if request['request'] == 'login' or request['request'] == 'register':\n user_data = request['user']\n user = User(user_data['username'], user_data['password'])\n\n if request['request'] == 'login':\n print('\\t{}: Checking if user is registered'.format(thread_name))\n user.id = self.userDAO.get_user_id(user)\n\n if user.id == 0:\n print('\\t{}: User is not registered'.format(thread_name))\n response = {'info': 'Invalid username or password'}\n else:\n print('\\t{}: User logged in'.format(thread_name))\n local_data.user = user\n self.connected_clients[local_data.user.id] = client_connection\n response = {'info': 'Logged'}\n else:\n print('\\t{}: Trying to add user to the database'.format(thread_name))\n user = self.userDAO.insert(user)\n\n if user is None:\n print('\\t{}: User already exists'.format(thread_name))\n response = {'info': 'User already exists'}\n else:\n print('\\t{}: User registered'.format(thread_name))\n user.id = self.userDAO.get_id_by_username(user.username)\n local_data.user = user\n response = {'info': 'Successfully registered'}\n\n client_connection.sendall(json.dumps(response).encode())\n\n if response['info'] == 'Invalid username or password' \\\n or response['info'] == 'User already exists':\n break\n elif request['request'] == 'search_for':\n print('\\t{}: Searching for user'.format(thread_name))\n other_id = self.userDAO.get_id_by_username(request['username'])\n\n if other_id == 0:\n response = {'search_result': {'info': '{} does not exist'.format(request['username'])}}\n client_connection.sendall(json.dumps(response).encode())\n else:\n json_user = self.userDAO.get_user_by_id(other_id)\n local_data.other_client = User(json_user['username'], json_user['password'], _id=other_id)\n response = {'search_result': {'info': '{} found'.format(request['username'])}}\n client_connection.sendall(json.dumps(response).encode())\n elif request['request'] == 'prefetch_messages':\n print('\\t{}: Fetching user messages'.format(thread_name))\n messages = self.messageDAO.prefetch(local_data.user.id, local_data.other_client.id, 20)\n\n for message in messages:\n if message.sender_id == local_data.user.id:\n sender_username = local_data.user.username\n else:\n sender_username = local_data.other_client.username\n\n json_message = {\n 'fetched_message': {\n 'sender': sender_username,\n 'content': message.content\n }\n }\n client_connection.sendall(json.dumps(json_message).encode())\n time.sleep(0.08)\n elif request['request'] == 'send_message':\n json_message = request['message']\n message = Message(local_data.user.id,\n local_data.other_client.id,\n content=json_message['content'],\n send_time=json_message['send_time'],\n status=1)\n self.messageDAO.insert(message)\n message = {\n 'message': {\n 'sender': local_data.user.username,\n 'content': json_message['content']\n }\n }\n client_connection.sendall(json.dumps(message).encode())\n\n if local_data.other_client.id in self.connected_clients.keys():\n connection = self.connected_clients[local_data.other_client.id]\n connection.sendall(json.dumps(message).encode())\n\n except ConnectionError or Exception:\n print('\\t{}: Some error happened, connection will be closed'.format(thread_name))\n client_connection.close()\n\n for _id, connection in self.connected_clients.items():\n if connection == client_connection:\n with self.lock:\n self.connected_clients.pop(_id)\n break\n\n finally:\n print('\\t{}: Connection closed'.format(thread_name))\n client_connection.close()\n\n\nif __name__ == '__main__':\n server = Server()\n server.run()\n\n","sub_path":"src/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"306316259","text":"import os\nimport re\nimport locale\nimport random\nimport argparse\nimport ipaddress\nimport subprocess\nfrom select import select\nfrom struct import pack, unpack\nfrom socket import socket, AF_INET, SOCK_DGRAM\n\n\ndef reverse_mapping(mapping):\n result = {}\n for key in mapping:\n result[mapping[key]] = key\n return result\n\n\nDNS_DEFAULT_PORT = 53\nDEFAULT_BUFFER_SIZE = 64 * 1024\n\nCLASSES = {'IN': 1, 'ANY': 255, '*': 255}\nOPCODES = {'QUERY': 0, 'IQUERY': 1, 'STATUS': 2}\nMESSAGE_TYPE = {'QUERY': 0, 'RESPONSE': 1}\nTYPES = {'A': 1, 'NS': 2, 'CNAME': 5, 'SOA': 6, 'PTR': 12,\n 'MX': 15, 'AAAA': 28, 'ANY': 255, '*': 255}\nRCODES = {'No error': 0, 'Format error': 1, 'Server failure': 2,\n 'Name Error': 3, 'Not Implemented': 4, 'Refused': 5}\n\n\ndef deserialize_enum(mapping, value):\n for k, v in mapping.items():\n if v == value:\n return k\n return 'Unknown'\n\n\ndef deserialize_domain(data, offset):\n domain = ''\n offset_to_return = offset\n shortened = False\n while True:\n length = data[offset]\n offset += 1\n if length & 0xC0 == 0xC0:\n if not shortened:\n offset_to_return = offset + 1\n offset = ((length & (~0xC0)) << 8) + data[offset]\n shortened = True\n elif length & 0xC0 == 0 and length > 0:\n domain += data[offset: offset + length].decode('utf-8') + '.'\n offset += length\n else:\n return domain, offset_to_return if shortened else offset\n\n\ndef serialize_domain(domain):\n return b''.join(\n map(lambda label: pack(\">B\", len(label)) + label.encode('utf-8'),\n domain.split('.')))\n\n\ndef decode_rdata(data, offset, length, dns_type):\n if dns_type in ['A']:\n return str(ipaddress.IPv4Address(data[offset:offset + length]))\n if dns_type in ['AAAA']:\n return str(ipaddress.IPv6Address(data[offset:offset + length]))\n if dns_type in ['PTR', 'NS', 'CNAME']:\n return deserialize_domain(data, offset)[0]\n if dns_type in ['MX']:\n return [('Preference', unpack(\">H\", data[offset:offset + 2])[0]),\n ('Exchange', deserialize_domain(data, offset + 2)[0])]\n if dns_type == 'SOA':\n mname, offset = deserialize_domain(data, offset)\n rname, offset = deserialize_domain(data, offset)\n serial, refresh, retry, expire, minimum = unpack(\n \">5I\", data[offset:offset + 20])\n return [('MNAME', mname), ('RNAME', rname), ('SERIAL', serial),\n ('REFRESH', refresh), ('RETRY', retry),\n ('EXPIRE', expire), ('MINIMUM', minimum)]\n return data[offset:offset + length]\n\n\ndef encode_rdata(rdata, dns_type):\n if dns_type == 'A':\n return b''.join(map(lambda x: int(x).to_bytes(1, 'big'),\n rdata.split('.')))\n if dns_type == 'AAAA':\n return b''.join(map(bytes.fromhex, rdata.split(':')))\n if dns_type in ['PTR', 'NS', 'CNAME']:\n return serialize_domain(rdata)\n\n\ndef get_domain_name(target):\n if re.match(r'\\d+\\.\\d+\\.\\d+\\.\\d+', target):\n return 'PTR', '.'.join(reversed(target.split('.'))) + \".IN-ADDR.ARPA.\"\n return 'A', target if target[-1] == '.' else target + '.'\n\n\nclass QuestionEntry:\n def __init__(self, domain, dns_type, dns_class):\n self.domain = domain\n self.dns_type = dns_type\n self.dns_class = dns_class\n\n @classmethod\n def deserialize(cls, data, offset):\n domain, offset = deserialize_domain(data, offset)\n dns_type, dns_class = unpack(\">HH\", data[offset:offset + 4])\n return QuestionEntry(domain, dns_type, dns_class), offset + 4\n\n def serialize(self):\n return serialize_domain(self.domain) + \\\n pack(\">HH\", self.dns_type, self.dns_class)\n\n def to_plain_object(self):\n deser_type = deserialize_enum(TYPES, self.dns_type)\n deser_class = deserialize_enum(CLASSES, self.dns_class)\n return [(\"Domain\", self.domain),\n (\"TYPE\", \"%s (%d)\" % (deser_type, self.dns_type)),\n (\"CLASS\", \"%s (%d)\" % (deser_class, self.dns_class))]\n\n\nclass ResourceRecord:\n def __init__(self, domain, dns_type, dns_class, ttl, rdlength, rdata):\n self.domain = domain\n self.dns_type = dns_type\n self.dns_class = dns_class\n self.ttl = ttl\n self.rdata = rdata\n self.rdlength = rdlength\n\n @classmethod\n def deserialize(cls, data, offset):\n domain, offset = deserialize_domain(data, offset)\n dns_type, dns_class, ttl, rdlen = unpack(\">HHIH\",\n data[offset:offset + 10])\n rdata = decode_rdata(data, offset + 10, rdlen,\n deserialize_enum(TYPES, dns_type))\n return ResourceRecord(domain, dns_type, dns_class, ttl, rdlen, rdata),\\\n offset + 10 + rdlen\n\n def serialize(self):\n return serialize_domain(self.domain) \\\n + pack(\">HHIH\", self.dns_type, self.dns_class,\n self.ttl, self.rdlength) \\\n + encode_rdata(self.rdata,\n deserialize_enum(TYPES, self.dns_type))\n\n def to_plain_object(self):\n deser_type = deserialize_enum(TYPES, self.dns_type)\n deser_class = deserialize_enum(CLASSES, self.dns_class)\n return [(\"Domain\", self.domain),\n (\"TYPE\", \"%s (%d)\" % (deser_type, self.dns_type)),\n (\"CLASS\", \"%s (%d)\" % (deser_class, self.dns_class)),\n (\"TTL\", self.ttl),\n (\"RDLENGTH\", self.rdlength),\n (\"RDATA\", self.rdata)]\n\n\ndef deserialize_list(cls, data, offset, count):\n result = []\n for i in range(count):\n rr, offset = cls.deserialize(data, offset)\n result.append(rr)\n return result, offset\n\n\ndef deserialize_resource_records(data, offset, count):\n return deserialize_list(ResourceRecord, data, offset, count)\n\n\ndef list_to_plain_object(prefix, list):\n return [(\"%s %d\" % (prefix, i), list[i].to_plain_object())\n for i in range(len(list))]\n\n\ndef resource_records_to_plain_object(list):\n return list_to_plain_object(\"Resource record\", list)\n\n\nclass Packet:\n def __init__(self, id, qr, opcode, aa, tc, rd, ra,\n rcode, questions, answers, authority, additional):\n self.id = id\n self.qr = qr\n self.opcode = opcode\n self.aa = aa\n self.tc = tc\n self.rd = rd\n self.ra = ra\n self.rcode = rcode\n self.questions = questions\n self.answers = answers\n self.authority = authority\n self.additional = additional\n\n @classmethod\n def form_request(cls, target, recursion=True, dns_type=None, dns_cls='IN'):\n default_type, domain = get_domain_name(target)\n question = QuestionEntry(\n domain,\n TYPES[default_type] if dns_type is None else TYPES[dns_type],\n CLASSES[dns_cls])\n return Packet(random.randint(0, 1 << 16), MESSAGE_TYPE['QUERY'],\n OPCODES['QUERY'], 0, 0,\n 1 if recursion else 0,\n 1 if recursion else 0,\n RCODES['No error'],\n [question], [], [], [])\n\n @classmethod\n def deserialize(cls, data):\n id, options, questions_count, answers_count, \\\n authority_count, additional_count = unpack(\">HHHHHH\", data[:12])\n qr = options >> 15\n opcode = (options >> 11) & 0xF\n aa = options >> 10 & 0x1\n tc = options >> 9 & 0x1\n rd = options >> 8 & 0x1\n ra = options >> 7 & 0x1\n rcode = options & 0xF\n offset = 12\n questions, offset = deserialize_list(\n QuestionEntry, data, offset, questions_count)\n answers, offset = deserialize_resource_records(\n data, offset, answers_count)\n authority, offset = deserialize_resource_records(\n data, offset, authority_count)\n additional, offset = deserialize_resource_records(\n data, offset, additional_count)\n return Packet(id, qr, opcode, aa, tc, rd, ra, rcode,\n questions, answers, authority, additional)\n\n def serialize(self):\n options = self.qr << 15 | self.opcode << 11 | self.aa << 10 \\\n | self.tc << 9 | self.rd << 8 | self.ra << 7 | self.rcode\n header = pack('>HHHHHH', self.id, options, len(self.questions),\n len(self.answers), len(self.authority),\n len(self.additional))\n questions = b''.join(map(lambda qe: qe.serialize(), self.questions))\n answers = b''.join(map(lambda rr: rr.serialize(), self.answers))\n authority = b''.join(map(lambda rr: rr.serialize(), self.authority))\n additional = b''.join(map(lambda rr: rr.serialize(), self.additional))\n return header + questions + answers + authority + additional\n\n def to_plain_object(self):\n headers = [(\"ID\", self.id),\n (\"QR\", deserialize_enum(MESSAGE_TYPE, self.qr)),\n (\"OPCODE\", deserialize_enum(OPCODES, self.opcode)),\n (\"AA\", \"YES\" if self.aa == 1 else \"NO\"),\n (\"TC\", \"YES\" if self.tc == 1 else \"NO\"),\n (\"RD\", \"YES\" if self.rd == 1 else \"NO\"),\n (\"RA\", \"YES\" if self.ra == 1 else \"NO\"),\n (\"Rcode\", deserialize_enum(RCODES, self.rcode))]\n counts = [(\"Questions\", len(self.questions)),\n (\"Answers\", len(self.answers)),\n (\"Authority\", len(self.authority)),\n (\"Additional\", len(self.additional))]\n sections = [(\"HEADERS\", headers), (\"COUNTS\", counts)]\n if len(self.questions) > 0:\n po = (\"QUESTIONS\",\n list_to_plain_object(\"Question Entry\", self.questions))\n sections.append(po)\n if len(self.answers) > 0:\n po = (\"ANSWERS\", resource_records_to_plain_object(self.answers))\n sections.append(po)\n if len(self.authority) > 0:\n po = (\"AUTHORITY\",\n resource_records_to_plain_object(self.authority))\n sections.append(po)\n if len(self.additional) > 0:\n po = resource_records_to_plain_object(self.additional)\n sections.append((\"ADDITIONAL\", po))\n return sections\n\n\ndef get_default_dns_servers():\n if os.name == \"nt\":\n output = subprocess\\\n .check_output('ipconfig /all')\\\n .decode(locale.getpreferredencoding())\n raw_servers = re\\\n .findall(r'(?:DNS Servers|DNS-серверы)\\D+:([.\\d\\s]+)', output)\n not_none_lam = lambda x: x != ''\n split_lam = lambda s: re.split('\\s+', s)\n servers = filter(not_none_lam, sum(map(split_lam, raw_servers), []))\n return list(servers)\n\n\ndef dns_type_handler(dns_type):\n if dns_type not in TYPES:\n raise argparse.ArgumentTypeError(\"Unknown DNS type '%s'\" % dns_type)\n return dns_type\n\n\ndef dns_class_handler(dns_class):\n if dns_class not in CLASSES:\n raise argparse.ArgumentTypeError(\"Unknown DNS class '%s'\" % dns_class)\n return dns_class\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser(description=\"DNS tool\")\n parser.add_argument(\"target\",\n help=\"Domain name, IPv4 or IPv6 to be resolved\")\n parser.add_argument(\"server\",\n nargs=\"*\",\n help=\"Domain servers to use\",\n default=get_default_dns_servers())\n parser.add_argument(\"-t\", \"--timeout\",\n help=\"Communication timeout in seconds (default 2)\",\n default=2, type=int)\n parser.add_argument(\"-v\", \"--verbose\",\n help=\"Show verbose packet structure\",\n action=\"store_true\", default=False)\n parser.add_argument(\"--dns-type\",\n help=\"Query server with this DNS type\",\n type=dns_type_handler, default='A')\n parser.add_argument(\"--dns-class\",\n help=\"Query server with this DNS type\",\n type=dns_class_handler, default='IN')\n parser.add_argument(\"-r\", \"--no-recursion\",\n help=\"Disable recursion\",\n action=\"store_true\", default=False)\n return parser\n\n\ndef get_address(source):\n chunks = source.split(':')\n return chunks[0], int(chunks[1]) if len(chunks) > 1 else DNS_DEFAULT_PORT\n\n\ndef get_raw_response(args, data):\n for server in args.server:\n try:\n address = get_address(server)\n with socket(AF_INET, SOCK_DGRAM) as sock:\n sock.sendto(data, address)\n if select([sock], [], [], args.timeout)[0]:\n return sock.recvfrom(DEFAULT_BUFFER_SIZE)[0]\n except Exception:\n print('Failed to receive response from %s' % server)\n\n\ndef stringify_plain_object(obj, indent=0):\n result = \"\"\n for values in obj:\n if isinstance(values[1], list):\n t = (values[0], stringify_plain_object(values[1], indent + 1))\n result += \"\\t\" * indent + \"%s\\n%s\\n\" % t\n else:\n result += \"\\t\" * indent + \"%-10s: %s\\n\" % (values[0], values[1])\n return result\n\n\ndef stringify_rr_short(rr):\n return \"%s %s %s %s %s %s\" % (rr.domain,\n deserialize_enum(TYPES, rr.dns_type),\n deserialize_enum(CLASSES, rr.dns_class),\n rr.ttl, rr.rdlength, rr.rdata)\n\nif __name__ == \"__main__\":\n parser = get_args_parser()\n args = parser.parse_args()\n request = Packet.form_request(args.target,\n dns_type=args.dns_type,\n dns_cls=args.dns_class,\n recursion=not args.no_recursion)\n raw_packet = get_raw_response(args, request.serialize())\n if raw_packet:\n try:\n response = Packet.deserialize(raw_packet)\n if args.verbose:\n print(stringify_plain_object(response.to_plain_object()))\n for rr in response.answers:\n print(stringify_rr_short(rr))\n if len(response.answers) == 0 and len(response.authority) > 0:\n print(\"No answer. Set recursive or use authority:\")\n for rr in response.authority:\n print(stringify_rr_short(rr))\n if len(response.answers) == 0 and len(response.authority) == 0:\n s = \"No answer not authority. Something wrong is going on here\"\n print(s)\n except Exception:\n print('Failed to parse response')\n else:\n print('Failed to receive response')\n","sub_path":"dnscache/nszoom.py","file_name":"nszoom.py","file_ext":"py","file_size_in_byte":14828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"626980656","text":"# Imports\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\n\n# Data loading\ntraining_data = pd.read_csv('train.csv')\n\n# Data preprocessing and putting into a data structure that can be\n# used by a classifier\ntraining_data[\"Age\"] = training_data[\"Age\"].fillna(0)\ntraining_data[\"Sex\"].loc[training_data[\"Sex\"] == \"male\"] = 0\ntraining_data[\"Sex\"].loc[training_data[\"Sex\"] == \"female\"] = 1\ntraining_data[\"Embarked\"] = training_data[\"Embarked\"].fillna(0)\ntraining_data[\"Embarked\"].loc[training_data[\"Embarked\"] == \"C\"] = 1\ntraining_data[\"Embarked\"].loc[training_data[\"Embarked\"] == \"Q\"] = 2\ntraining_data[\"Embarked\"].loc[training_data[\"Embarked\"] == \"S\"] = 3\n\nX = training_data[[\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Fare\", \"Embarked\"]]\ny = training_data[\"Survived\"]\n\n# Create classifier with parameters\nlogRegClasif = LogisticRegression(max_iter=500, solver='lbfgs')\n\n# Train classifier\nlogRegClasif.fit(X, y)\n\n# Load test dataset and use classifier to make predictions\ntest_data = pd.read_csv('test.csv')\n\ntest_data[\"Age\"] = test_data[\"Age\"].fillna(test_data[\"Age\"].median())\ntest_data[\"Sex\"].loc[test_data[\"Sex\"] == \"male\"] = 0\ntest_data[\"Sex\"].loc[test_data[\"Sex\"] == \"female\"] = 1\ntest_data[\"Embarked\"] = test_data[\"Embarked\"].fillna(0)\ntest_data[\"Embarked\"].loc[test_data[\"Embarked\"] == \"C\"] = 1\ntest_data[\"Embarked\"].loc[test_data[\"Embarked\"] == \"Q\"] = 2\ntest_data[\"Embarked\"].loc[test_data[\"Embarked\"] == \"S\"] = 3\ntest_data[\"Fare\"] = test_data[\"Fare\"].fillna(0)\n\nX_test = test_data[[\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Fare\", \"Embarked\"]]\n\npredictions = logRegClasif.predict(X_test)\n\n# Save test predictions to disk in the format required for kaggle.\nsubmission = pd.DataFrame({\"PassengerId\": test_data[\"PassengerId\"], \"Survived\": predictions})\nsubmission.to_csv(\"submissions_and_results/submission3.csv\", index=False)\n","sub_path":"Kaggle/firstAttempt/Titanic/code3.py","file_name":"code3.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"536686516","text":"from aip import AipOcr\r\n\r\n\"\"\" 你的 APPID AK SK \"\"\"\r\nAPP_ID = '19517004' \r\nAPI_KEY = 'kvgz4GeHHiQikfEUHy0pVGf6'\r\nSECRET_KEY = 'YtKuSBZAjChRvQWGkLjB8sLp857jZYo4'\r\n\r\nclient = AipOcr(APP_ID, API_KEY, SECRET_KEY)\r\n\r\ndef get_file_content(filepath):\r\n with open(filepath,'rb') as f:\r\n return f.read()\r\n\r\ndef get_img_content(img):\r\n image_content=''\r\n content = client.basicAccurate(image=img)\r\n # print(content)\r\n for words in content['words_result']:\r\n # print(words) # 字典\r\n image_content += words['words']\r\n print(image_content)\r\n\r\nif __name__ == '__main__':\r\n img = get_file_content('G:\\\\Ph.D._Thesis\\\\Dissertation\\\\Collection\\\\安徽\\\\2014\\\\5\\\\5_02.png')\r\n get_img_content(img)","sub_path":"图片文字识别.py","file_name":"图片文字识别.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"46020538","text":"from rest_framework import viewsets, mixins\nfrom rest_framework.decorators import list_route\nfrom django.core.cache import cache\nfrom rest_framework.response import Response\n \nfrom cart.models import Cart\nfrom cart.serializers import CartSerializer\nfrom user.UserAuthtication import UserTokenAuthentication\nfrom user.models import AXFUser\nfrom utils.error import PramsException\n\n\nclass CartView(viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.UpdateModelMixin):\n\n queryset = Cart.objects.all()\n serializer_class = CartSerializer\n # 认证\n authentication_classes = (UserTokenAuthentication,)\n\n def list(self, request, *args, **kwargs):\n # 1. 获取User, 通过token\n # token = request.query_params.get('token')\n # user_id = cache.get(token)\n # 2. 获取购物车数据\n cart = Cart.objects.filter(c_user_id=request.user.id).all()\n serializer = self.get_serializer(cart, many=True)\n\n res = {\n 'carts': serializer.data,\n 'total_price': self.total_price(request)\n }\n return Response(res)\n\n def total_price(self, request):\n carts = Cart.objects.filter(c_user_id=request.user.id,\n c_is_select=True)\n total = 0\n for cart in carts:\n total += cart.c_goods.price * cart.c_goods_num\n return '{:.2f}'.format(total)\n\n @list_route(methods=['POST'])\n def add_cart(self, request):\n # 添加购物车\n # 前端传递: token, goodsid\n # 1. 获取User, 通过token\n # token = self.request.data.get('token')\n # user_id = cache.get(token)\n # if user_id:\n # user = AXFUser.objects.get(pk=user_id)\n user = request.user\n # 2. 获取购物车数据\n goodsid = self.request.data.get('goodsid')\n cart = Cart.objects.filter(c_user=user,\n c_goods_id=goodsid).first()\n # 3. 判断购物车数据是否存在,并做对应的处理\n c_goods_num = 1\n if cart:\n cart.c_goods_num += 1\n cart.save()\n c_goods_num = cart.c_goods_num\n else:\n Cart.objects.create(c_user=user,\n c_goods_id=goodsid)\n\n res = {\n 'c_goods_num': c_goods_num\n }\n return Response(res)\n # raise PramsException({'code': 1008, 'msg': '无法添加商品,请去登录'})\n\n @list_route(methods=['POST'])\n def sub_cart(self, request):\n # 减少购物车中的商品数量\n # 1. 获取购物车中该商品的信息\n goodsid = request.data.get('goodsid')\n user = request.user\n cart = Cart.objects.filter(c_user=user,\n c_goods_id=goodsid).first()\n c_goods_num = 0\n if cart:\n if cart.c_goods_num > 1:\n # 表示商品的数量大于1,则自减1\n cart.c_goods_num -= 1\n cart.save()\n c_goods_num = cart.c_goods_num\n else:\n # 表示商品的数量为1,则删除\n cart.delete()\n\n res = {\n 'c_goods_num': c_goods_num\n }\n\n return Response(res)\n\n def update(self, request, *args, **kwargs):\n # /api/cart/cart/1/ PATCH\n # 参数: token\n instance = self.get_object()\n instance.c_is_select = not instance.c_is_select\n instance.save()\n return Response({'code': 200, 'msg': '修改商品选择状态成功'})\n\n @list_route(methods=['PATCH'])\n def all_update(self, request):\n # /api/cart/cart/all_update/ PATCH\n # 传参token\n user = request.user\n carts = Cart.objects.filter(c_user=user).all()\n for cart in carts:\n cart.c_is_select = not cart.c_is_select\n cart.save()\n\n return Response({'code': 200, 'msg': '批量修改商品选择状态成功'})\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"65312518","text":"\r\n#!/usr/bin/env python3\r\n\r\nimport sys\r\nimport math\r\n\r\n'''\r\nChase Dickerson\r\nDijkstras Algorithm\r\n4/9/2019\r\n'''\r\n\r\n'''\r\nDijkstras Algorithm is used to find the shortest path between a start vertex and\r\n every other vertex in the graph\r\n'''\r\ndef dijkstras(adjacency_list, start_vertex):\r\n \r\n # Finding the length of the graph (will be used to loop through graph)\r\n\tn = len(adjacency_list)\r\n\tinfinity = math.inf\r\n\r\n # Setting all path lengths to infinity\r\n\tpath_lengths = {val: infinity for val in adjacency_list}\r\n\tpath_lengths[start_vertex] = 0\r\n\tvisited = {start_vertex} \r\n\r\n\twhile len(visited) != n:\r\n\t\tmin_path_length = infinity\r\n\t\tv_star = None\r\n\t\tw_star = None\r\n\r\n # Looping through vertexes that have been added to visited\r\n\t\tfor v_from in visited:\r\n\t\t\tfor v, weight, in adjacency_list[v_from]:\r\n\t\t\t\tif v not in visited:\r\n # Dijkstra's greedy greedy criterion \r\n\t\t\t\t\tdgc = path_lengths[v_from] + weight\r\n\t\t\t\t\tif dgc < min_path_length:\r\n\t\t\t\t\t\tmin_path_length = dgc\r\n\t\t\t\t\t\tv_star = v_from\r\n\t\t\t\t\t\tw_star = v\r\n\r\n # Adding w star to visited \r\n\t\tvisited.add(w_star)\r\n # Adding the mininum path length to path_lengths\r\n\t\tpath_lengths[w_star] = min_path_length\r\n\treturn path_lengths\r\n\r\ndef main():\r\n\r\n file_name = input('Please provide a filename containing an adjacency list:\\n')\r\n start_vertex = input('Please provide a start vertex label (1..n):\\n')\r\n\r\n adjacency_list = {}\r\n\r\n # Reading in the file as an adjacency list\r\n with open(file_name, 'r') as adjacency_list_file:\r\n for line in adjacency_list_file:\r\n vals = [int(val) for val in line.replace(\",\", \" \").split()]\r\n adjacency_list[vals[0]] = [e for e in zip(vals[1::2], vals[2::2])]\r\n length = dijkstras(adjacency_list, int(start_vertex)) \r\n\r\n # Printing the lengths\r\n for num in length:\r\n if num < len(length):\r\n print(length[num], end=\",\")\r\n else:\r\n print(length[num], end=\"\\n\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n ","sub_path":"Dijkstras/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"346046279","text":"# coding=utf-8\nimport pymysql\n\n\ndef delete_domain_record(conn, domain):\n cursor = conn.cursor()\n sql = \"DELETE FROM `dns_info` WHERE `domain`=%s\"\n cursor.execute(query=sql, args=[domain])\n conn.commit()\n return cursor.rowcount\n\n\ndef insert_domain_record(conn, record_list):\n cursor = conn.cursor()\n sql = \"INSERT `dns_info`(`record`,`domain`,`value`,`status`,`type`,`ttl`,`id`)\" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s)\"\n for record in record_list:\n cursor.execute(query=sql, args=[record[\"record\"], record[\"domain\"], record[\"value\"],\n record[\"status\"], record[\"type\"], record[\"ttl\"], record[\"id\"]])\n conn.commit()\n return cursor.rowcount\n\n\ndef get_domain_record(conn, domain):\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"SELECT `domain`,`record`,`value`,`status`,`type`,`ttl`,`id` FROM `dns_info` WHERE `domain`=%s\"\n cursor.execute(query=sql, args=[domain])\n return list(cursor.fetchall())\n\n\ndef read_proxy_rule(conn, user):\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"SELECT * FROM `proxy_rule` WHERE `user`=%s\"\n cursor.execute(sql, args=[user])\n return cursor.fetchall()\n\n\ndef update_proxy_rule(conn, username, rule_list):\n cursor = conn.cursor()\n sql = \"DELETE FROM `proxy_rule` WHERE `user`=%s\"\n cursor.execute(query=sql, args=[username])\n sql = \"REPLACE INTO `proxy_rule`(`user`,`type`,`content`,`group`) VALUES (%s,%s,%s,%s)\"\n for rule in rule_list:\n cursor.execute(query=sql, args=[username, rule[\"type\"], rule[\"content\"], rule[\"group\"]])\n conn.commit()\n return cursor.rowcount\n","sub_path":"service/Network/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"28180270","text":"\"\"\" database submodule associated to the postgres uservice\n\n\"\"\"\nimport logging\nimport socket\n\nimport tenacity\nfrom aiopg.sa import Engine\nfrom servicelib.common_aiopg_utils import (\n DataSourceName,\n PostgresRetryPolicyUponInitialization,\n create_pg_engine,\n is_postgres_responsive,\n)\n\nfrom .config import POSTGRES_DB, POSTGRES_ENDPOINT, POSTGRES_PW, POSTGRES_USER\n\nlog = logging.getLogger(__name__)\n\n\n@tenacity.retry(**PostgresRetryPolicyUponInitialization().kwargs)\nasync def wait_till_postgres_responsive(dsn: DataSourceName) -> None:\n if not is_postgres_responsive(dsn):\n raise Exception\n\n\nclass DBContextManager:\n def __init__(self):\n self._db_engine: Engine = None\n\n async def __aenter__(self):\n dsn = DataSourceName(\n application_name=f\"{__name__}_{id(socket.gethostname())}\",\n database=POSTGRES_DB,\n user=POSTGRES_USER,\n password=POSTGRES_PW,\n host=POSTGRES_ENDPOINT.split(\":\")[0],\n port=POSTGRES_ENDPOINT.split(\":\")[1],\n )\n\n log.info(\"Creating pg engine for %s\", dsn)\n await wait_till_postgres_responsive(dsn)\n engine = await create_pg_engine(dsn, minsize=1, maxsize=4)\n self._db_engine = engine\n return self._db_engine\n\n async def __aexit__(self, exc_type, exc, tb):\n self._db_engine.close()\n await self._db_engine.wait_closed()\n log.debug(\n \"engine '%s' after shutdown: closed=%s, size=%d\",\n self._db_engine.dsn,\n self._db_engine.closed,\n self._db_engine.size,\n )\n","sub_path":"services/sidecar/src/simcore_service_sidecar/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"356024850","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom plotly import graph_objs as go\nimport pathlib\nimport numpy as np\nfrom sklearn.datasets import make_spd_matrix\n\napp = dash.Dash(__name__, meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}])\nserver = app.server\n\n# public mapbox key\nmapbox_access_token = \"pk.eyJ1IjoicGxvdGx5bWFwYm94IiwiYSI6ImNqdnBvNDMyaTAxYzkzeW5ubWdpZ2VjbmMifQ.TXcBE-xg9BFdV2ocecc_7g\"\n\n# dict of current active fires\nfireName = 'Ferguson Fire'\nlist_of_fires = {\n \"Ferguson Fire\": {\"lat\": 37.652, \"lon\": -119.881},\n \"Camp Fire\": {\"lat\": 39.767380, \"lon\": -121.633759},\n \"Klamathon Fire\": {\"lat\": 41.893332, \"lon\": -122.534655},\n}\n\n# Get relative data folder\nPATH = pathlib.Path(__file__).parent\nDATA_PATH = PATH.joinpath(\"data\").resolve()\n\n# Initialize data frame\nfiredata = np.load(DATA_PATH.joinpath('state_evolution.npy'))\n\n# Layout of Dash App\napp.layout = html.Div(\n children=[\n html.Div(\n className=\"row\",\n children=[\n # Column for user controls\n html.Div(\n className=\"two columns div-user-controls\",\n children=[\n html.H2(\"DeepFire.AI\"),\n html.Div(\n className=\"row\",\n children=[\n html.Div(\n className=\"div-for-dropdown\",\n children=[\n # Dropdown for active fires\n dcc.Dropdown(\n id=\"fire-selector\",\n options=[\n {\"label\": i, \"value\": i}\n for i in list_of_fires\n ],\n value='Ferguson Fire',\n )\n ],\n ),\n ],\n ),\n html.P(id=\"fire-warnings\"),\n dcc.Graph(id=\"histogram\"),\n ],\n ),\n # Column for app graphs and plots\n html.Div(\n className=\"ten columns div-for-charts bg-grey\",\n children=[\n dcc.Graph(id=\"map-graph\"),\n ],\n ),\n ],\n )\n ]\n)\n\n# Update Histogram Figure based on fire chosen\n@app.callback(\n Output(\"histogram\", \"figure\"),\n [Input(\"fire-selector\", \"value\")],\n)\ndef update_histogram(selection):\n\n [xVal, yVal, colorVal] = np.array([1,2]), np.array([2,3]), 'red'\n\n layout = go.Layout(\n bargap=0.01,\n bargroupgap=0,\n barmode=\"group\",\n margin=go.layout.Margin(l=0, r=0, t=10, b=50),\n showlegend=False,\n plot_bgcolor=\"rgb(25, 26, 26)\",\n paper_bgcolor=\"rgb(25, 26, 26)\",\n dragmode=\"select\",\n font=dict(color=\"white\"),\n xaxis=dict(\n range=[-0.5, 3.5],\n showgrid=False,\n nticks=2,\n fixedrange=True,\n ),\n yaxis=dict(\n range=[0, max(yVal)* 5 / 4],\n showticklabels=False,\n showgrid=False,\n fixedrange=True,\n rangemode=\"nonnegative\",\n zeroline=False,\n ),\n annotations=[\n dict(\n x=xi,\n y=yi,\n text=str(yi),\n xanchor=\"center\",\n yanchor=\"bottom\",\n showarrow=False,\n font=dict(color=\"white\"),\n )\n for xi, yi in zip(xVal, yVal)\n ],\n )\n\n return go.Figure(\n data=[\n go.Bar(x=xVal, y=yVal, marker=dict(color=colorVal), hoverinfo=\"x\"),\n go.Scatter(\n opacity=0,\n x=xVal,\n y=yVal / 2,\n hoverinfo=\"none\",\n mode=\"markers\",\n marker=dict(color=\"rgb(66, 134, 244, 0)\", symbol=\"square\", size=40),\n visible=True,\n ),\n ],\n layout=layout,\n )\n\n\n@app.callback(\n Output(\"map-graph\", \"figure\"),\n [\n Input(\"fire-selector\", \"value\"),\n ],\n)\ndef update_graph(selectedLocation):\n zoom = 9.0\n latInitial = 37.652\n lonInitial = -119.881\n bearing = 0\n\n random1, random2 = 0.1*np.random.rand(), 0.005*np.random.rand()\n cov = np.array([[random1, random2],[random2, random1]])\n cov = np.dot(cov, cov.T)\n mean = [latInitial, lonInitial]\n \n lat_longs = np.random.multivariate_normal(mean, cov, size=(1000))\n planes_lat_longs = np.random.multivariate_normal(mean, cov, size=(10))\n trucks_lat_longs = np.random.multivariate_normal(mean, cov, size=(20))\n\n if selectedLocation:\n zoom = zoom\n latInitial = list_of_fires[selectedLocation][\"lat\"]\n lonInitial = list_of_fires[selectedLocation][\"lon\"]\n\n random1, random2 = 0.1*np.random.rand(), 0.005*np.random.rand()\n cov = np.array([[random1, random2],[random2, random1]])\n cov = np.dot(cov, cov.T)\n mean = [latInitial, lonInitial]\n \n lat_longs = np.random.multivariate_normal(mean, cov, size=(1000))\n planes_lat_longs = np.random.multivariate_normal(mean, cov, size=(10))\n trucks_lat_longs = np.random.multivariate_normal(mean, cov, size=(20))\n\n return go.Figure(\n data=[\n # Data for fire\n go.Scattermapbox(\n lat= lat_longs[:,0], \n lon= lat_longs[:,1],\n mode=\"markers\",\n hoverinfo=\"lat+lon+text\",\n marker=dict(\n showscale=True,\n # color=np.append(np.insert(0, 0, 0), 1.0),\n opacity=0.5,\n size=10,\n color='crimson',\n # colorbar=dict(\n # title=\"Fire Intensity\",\n # x=0.93,\n # xpad=0,\n # nticks=24,\n # tickfont=dict(color=\"#d8d8d8\"),\n # titlefont=dict(color=\"#d8d8d8\"),\n # thicknessmode=\"pixels\",\n # ),\n ),\n ),\n go.Scattermapbox(\n lat= planes_lat_longs[:,0], \n lon= planes_lat_longs[:,1],\n mode=\"markers\",\n hoverinfo=\"lat+lon+text\",\n marker=dict(\n symbol='triangle',\n opacity=0.4,\n size=20,\n color='blue',\n ),\n ),\n go.Scattermapbox(\n lat= trucks_lat_longs[:,0], \n lon= trucks_lat_longs[:,1],\n mode=\"markers\",\n hoverinfo=\"lat+lon+text\",\n marker=dict(\n symbol='square',\n opacity=0.4,\n size=20,\n color='purple',\n ),\n ),\n # Plot of fire locations on the map\n go.Scattermapbox(\n lat=[list_of_fires[i][\"lat\"] for i in list_of_fires],\n lon=[list_of_fires[i][\"lon\"] for i in list_of_fires],\n mode=\"markers\",\n hoverinfo=\"text\",\n text=[i for i in list_of_fires],\n opacity=0.6,\n marker=dict(size=15),\n ),\n\n ],\n layout=go.Layout(\n autosize=True,\n margin=go.layout.Margin(l=0, r=35, t=0, b=0),\n showlegend=False,\n mapbox=dict(\n accesstoken=mapbox_access_token,\n center=dict(lat=latInitial, lon=lonInitial),\n style=\"dark\",\n bearing=bearing,\n zoom=zoom,\n ),\n updatemenus=[\n dict(\n buttons=(\n [\n dict(\n args=[\n {\n \"mapbox.zoom\": 9,\n \"mapbox.center.lon\": \"37.652\",\n \"mapbox.center.lat\": \"-119.881\",\n \"mapbox.bearing\": 0,\n \"mapbox.style\": \"dark\",\n }\n ],\n label=\"Reset Zoom\",\n method=\"relayout\",\n )\n ]\n ),\n direction=\"left\",\n pad={\"r\": 0, \"t\": 0, \"b\": 0, \"l\": 0},\n showactive=False,\n type=\"buttons\",\n x=0.45,\n y=0.02,\n xanchor=\"left\",\n yanchor=\"bottom\",\n bgcolor=\"#323130\",\n borderwidth=1,\n bordercolor=\"#6d6d6d\",\n font=dict(color=\"#FFFFFF\"),\n )\n ],\n ),\n )\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","sub_path":"front_end/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"93769014","text":"import matplotlib.pyplot as plt\nimport mysql.connector as con\n\n\nfrom GrabCodewars import helper as h\n\nmydb = con.connect(host=\"remotemysql.com\", user=h.MySQLDB, passwd=h.MySQLpassw)\ncursor = mydb.cursor()\n\nsql = \"SELECT distinct username,date_format(date,'%Y.%m.%d') as date, honor FROM {}.Ranks order by username,date;\".format(h.MySQLDB)\ncursor.execute(sql)\nk = cursor.fetchall() # [('name',)] == fetchall()\nd = {}\nfor n in k:\n name = ''\n if name != n[0]:\n l = [x[1:] for x in list(filter(lambda x:x[0] ==n[0] ,k))]\n name = n[0]\n d[name] = l\n# print(d)\n\nfor n in d:\n x = list(range(0,len(d[n])))\n y = [x[1] for x in d[n]]\n print(x, y)\n plt.plot(x, y, label = n)\n\nplt.xlabel('Date')\nplt.ylabel('Honor')\nplt.title(\"CW Plot\")\n# plt.legend(bbox_to_anchor=(0.07,1,0.7, 1), loc='upper left', )\nplt.legend(bbox_to_anchor=(1,1), loc='upper left', )\n\nplt.show()\n# style\nplt.style.use('seaborn-darkgrid')\n\n","sub_path":"GrabCodewars/Codewars_Graph.py","file_name":"Codewars_Graph.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"61557782","text":"from flask import json,g\nfrom pymongo import MongoClient\n\nclass WorkFlowModel():\n def __init__(self, jobName=None,username=None):\n self.collection = getattr(g,'database',MongoClient('localhost', 27017)).iins_sys\n self.jobName = jobName\n self.taskList = list(self.collection.workflow.find({\"jobName\": jobName, \"username\": username}, {\"_id\": 0}))\n if len(self.taskList) == 0:\n self.taskList = list(self.collection.workflow_temp.find({\"jobName\": jobName}, {\"_id\": 0}))\n for task in self.taskList:\n if username != None:\n task['username'] = username\n self.collection.workflow.update_one(\n {\"jobName\": jobName, \"username\": username, \"taskID\": task['taskID']},\n {\"$set\": task}, upsert=True)\n\n\n def getWorkflow(self):\n return self.taskList\n\n def set(self,taskID,func, **kwargs):\n workflows = self.collection.workflow.find({\"processName\": self.processName, \"userName\": self.userName}, {\"_id\": 0}).sort(\n \"taskID\", 1)\n for w in workflows:\n if w['taskID']==taskID:\n if func:\n kwargs['status']='saved'\n self.collection.workflow.update_one({\"processName\": self.processName, \"userName\": self.userName, \"taskID\": w['taskID']}, {\"$set\": kwargs})\n else:\n if 'status' in kwargs:\n del kwargs['status']\n self.collection.workflow.update_one({\"processName\": self.processName, \"userName\": self.userName, \"taskID\": w['taskID']}, {\"$set\": kwargs})\n\n def update_task(self, processName, userName, taskID, setValues):\n self.collection.workflow.update_one({\"processName\": processName, \"userName\": userName, \"taskID\": taskID},\n {\"$set\": setValues})\n\n def clear_user_all_task(self):\n workflows = []\n workflowTemp = self.collection.workflow.find({\"processName\": self.processName, \"userName\": self.userName}, {\"_id\": 0}).sort(\n \"taskID\", 1)\n if workflowTemp.count() == 0:\n self.__init__(self.processName, self.userName)\n for w in workflowTemp:\n workflows.append(w)\n for workflow in workflows:\n self.collection.workflow.delete_one(\n {\"$and\": [{\"processName\": workflow['processName']}, {\"userName\": workflow['userName']}]})\n","sub_path":"gameServer/workflow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"196743829","text":"import tensorflow as tf\nimport sonnet as snt\nfrom params import *\n\n\ndef Swich(inputs):\n return inputs * tf.nn.sigmoid(inputs)\n\n\nclass Forward(snt.AbstractModule):\n def __init__(self, name='forward'):\n super().__init__(name=name)\n self.name = name\n\n def _build(self, inputs):\n with tf.variable_scope(self.name):\n net = self._build_shared_network(inputs)\n V = snt.Linear(1, 'value')(net)\n A = snt.Linear(ACTION_SIZE, 'advantage')(net)\n Q = V + (A - tf.reduce_mean(A, axis=1, keep_dims=True))\n return Q\n\n def _build_shared_network(self, inputs):\n net = snt.Conv2D(32, [8, 8], [4, 4])(inputs)\n net = Swich(net)\n net = snt.Conv2D(64, [4, 4], [2, 2])(net)\n net = Swich(net)\n net = snt.Conv2D(64, [3, 3], [1, 1])(net)\n net = Swich(net)\n net = snt.BatchFlatten(1)(net)\n net = snt.Linear(512)(net)\n return Swich(net)\n\n\n\n","sub_path":"src/fh_tools/language_test/DeepLearningNotes/Note-5 DQN与HS300指数择时/D3QN_Scale/agent/forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"205150621","text":"#!/usr/bin/env python\n\n# Copyright (c) 2006-2017 Andrey Golovigin\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nfrom __future__ import unicode_literals\n\nfrom pybtex.cmdline import CommandLine, make_option, standard_option\n\n\nclass PybtexConvertCommandLine(CommandLine):\n prog = 'pybtex-convert'\n args = '[options] in_filename out_filename'\n description = 'convert between bibliography database formats'\n long_description = \"\"\"\n\npybtex-convert converts bibliography database files between supported formats\n(currently BibTeX, BibTeXML and YAML).\n\n \"\"\".strip()\n\n num_args = 2\n\n options = (\n (None, (\n standard_option('strict'),\n make_option(\n '-f', '--from', dest='from_format',\n help='input format (%plugin_choices)', metavar='FORMAT',\n type='load_plugin', plugin_group='pybtex.database.input',\n ),\n make_option(\n '-t', '--to', dest='to_format',\n help='output format (%plugin_choices)', metavar='FORMAT',\n type='load_plugin', plugin_group='pybtex.database.output',\n ),\n standard_option('keyless_entries'),\n make_option(\n '--preserve-case', dest='preserve_case',\n action='store_true',\n help='do not convert identifiers to lower case',\n ),\n )),\n ('Encoding options', (\n standard_option('encoding'),\n standard_option('input_encoding'),\n standard_option('output_encoding'),\n )),\n )\n option_defaults = {\n 'keyless_entries': False,\n 'preserve_case': False,\n }\n\n def run(\n self, from_filename, to_filename,\n encoding, input_encoding, output_encoding,\n keyless_entries,\n **options\n ):\n from pybtex.database.convert import convert\n convert(\n from_filename, to_filename,\n input_encoding=input_encoding or encoding,\n output_encoding=output_encoding or encoding,\n parser_options={'keyless_entries': keyless_entries},\n **options\n )\n\nmain = PybtexConvertCommandLine()\n\nif __name__ == '__main__':\n main()\n","sub_path":"pybtex/pybtex/database/convert/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"625321266","text":"__author__ = 'Siuxoes'\n\ndef is_prime(a):\n return all(a % i for i in range(2, a))\n\ndef sumPrimes():\n suma = 0\n primeNumbers = 0\n i = 2 # We start in number 2, because it is the first prime number\n while primeNumbers < 1000: # we loop until we get our first 1000 prime numbers\n if is_prime(i):\n suma += i\n i += 1\n primeNumbers += 1\n else:\n i += 1\n return suma\n\nprint(sumPrimes())","sub_path":"SumOfPrimes.py","file_name":"SumOfPrimes.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"127150949","text":"import sys\nfrom PyQt5 import QtWidgets\nimport logging\nsys.path.append(r\"C:\\Users\\Furkan\\Desktop\\ANTENNA MEASUREMENT SYSTEM\")\nimport Master\n\ndate_strftime_format = \"%d-%b-%y %H:%M:%S\"\nmessage_format = \"%(asctime)s - %(levelname)s : %(message)s\"\nlogging.basicConfig(filename='log.log', level=logging.INFO, format=message_format, datefmt=date_strftime_format)\n\nclass QTextEditLogger(logging.Handler):\n def __init__(self, parent):\n super().__init__()\n self.widget = QtWidgets.QPlainTextEdit(parent)\n self.widget.setReadOnly(True)\n\n def emit(self, record):\n msg = self.format(record)\n self.widget.appendPlainText(msg)\n\n\nclass MyDialog(QtWidgets.QDialog, QtWidgets.QPlainTextEdit):\n def __init__(self, parent=None):\n super().__init__(parent)\n\n logTextBox = QTextEditLogger(self)\n logTextBox.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))\n logging.getLogger().addHandler(logTextBox)\n logging.getLogger().setLevel(logging.DEBUG)\n\n self._button = QtWidgets.QPushButton(self)\n self._button.setText('Test Me')\n\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(logTextBox.widget)\n layout.addWidget(self._button)\n self.setLayout(layout)\n\n self._button.clicked.connect(self.test)\n\n def test(self):\n Master.main()\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n dlg = MyDialog()\n dlg.show()\n dlg.raise_()\n sys.exit(app.exec_())","sub_path":"GUI/gui_log.py","file_name":"gui_log.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"413704080","text":"import logging\nfrom ..abstract import ErdReadOnlyConverter\nfrom ..primitives import *\n\nfrom gehomesdk.erd.values.dishwasher import ErdErrorState\n\n_LOGGER = logging.getLogger(__name__)\n\nclass ErdErrorStateConverter(ErdReadOnlyConverter[ErdErrorState]):\n def erd_decode(self, value: str) -> ErdErrorState:\n if not value:\n return ErdErrorState()\n \n try:\n #convert to int\n i = erd_decode_int(value)\n\n return ErdErrorState(\n id = i & 0xF,\n active = bool((i & 0xF0) >> 8),\n raw_value=value\n )\n except Exception as ex: \n _LOGGER.exception(\"Could not construct error state, using default.\")\n return ErdErrorState(raw_value=value)\n","sub_path":"gehomesdk/erd/converters/dishwasher/erd_error_converter.py","file_name":"erd_error_converter.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"590740843","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Float64MultiArray\nfrom sensor_msgs.msg import JointState\nimport copy\nimport numpy as np\nimport math as m\n\n\n\n \nclass AdmittanceControl(object):\n\n def __init__(self):\n rospy.init_node('hand_impedance_control', anonymous=True)\n rate = rospy.Rate(100) # 10hz \n self.impedance_signal_send = Float64MultiArray()\n\n self.torque_percs_received_sensor = Float64MultiArray() \n rospy.Subscriber(\"torque_percs\", Float64MultiArray, self.torque_percs_received)\n\n impedance_controller = rospy.Publisher('impedance_signal_send', Float64MultiArray, queue_size=10)\n\n self.torque_data_array = [0,0,0,0,0,0]\n\n self.left_finger_torque_data_array = [0 for i in range(10)]\n self.middle_finger_torque_data_array = [0 for i in range(10)]\n self.right_finger_torque_data_array = [0 for i in range(10)]\n\n self.aver_force_left = 0\n self.aver_force_middle = 0\n self.aver_force_right = 0\n\n self.left_value_set_to_zero = 0\n self.middle_value_set_to_zero = 0\n self.right_value_set_to_zero = 0\n \n self.left_value_sum = 0\n self.middle_value_sum = 0\n self.right_value_sum = 0\n \n self.value_count = 0\n\n ### Parameters of the desired dynamics\n # k * (T_e*dot_e + e) = f_d - f\n self.T_e = 0.3 # desired time constant\n self.k = 20 # desired spring\n \n self.F_ext = 1\n self.T = 0\n\n self.error_n_3 = 0 # left finger\n self.error_n_4 = 0 # middle finger\n self.error_n_5 = 0 # right finger\n \n self.proxy_n_3 = 0\n self.proxy_n_4 = 0\n self.proxy_n_5 = 0\n \n\n self.scalar = 20.0\n\n while not rospy.is_shutdown():\n # update the impedance signal by the force sensor \n \n self.error_n_3 = self.output(self.error_n_3, self.torque_data_array[1], 15, 0.02) # 20 is ok\n self.error_n_4 = self.output(self.error_n_4, self.torque_data_array[0], 0, 0.02) # 20 is ok\n self.error_n_5 = self.output(self.error_n_5, self.torque_data_array[2], 15, 0.02) # 20 is ok \n \n self.proxy_n_3 = self.torque_data_array[4]\n self.proxy_n_4 = self.torque_data_array[3]\n self.proxy_n_5 = self.torque_data_array[5]\n \n \n\n self.impedance_signal_data = [0, 0, self.error_n_3 * self.scalar, self.error_n_4 * 20, self.error_n_5 * self.scalar, self.proxy_n_3, self.proxy_n_4, self.proxy_n_5] \n #self.impedance_signal_data = [0, 0, self.torque_data_array[1], self.torque_data_array[0], self.torque_data_array[2], self.proxy_n_3, self.proxy_n_4, self.proxy_n_5] \n \n #self.impedance_signal_data = [0, 0, self.aver_force_left, self.aver_force_middle, self.aver_force_right] \n \n self.impedance_signal_send.data = self.impedance_signal_data \n impedance_controller.publish(self.impedance_signal_send)\n rate.sleep()\n\n rospy.spin()\n\n \n def output(self, e_0, F_ext, F_d, T):\n\n # input parameters\n self.e_0 = e_0\n self.F_ext = F_ext\n self.F_d = F_d\n self.T = T\n \n # calculation of the trajectory position in the next step\n self.e_1 = (1 - self.T/self.T_e)*self.e_0 + self.T/(self.k*self.T_e)*\\\n (self.F_d - self.F_ext)\n \n return self.e_1\n \n\n def torque_percs_received(self,msg):\n self.torque_percs_received_sensor.data = msg.data\n \n for i in range(0, len(self.torque_percs_received_sensor.data)):\n self.torque_data_array[i] = self.torque_percs_received_sensor.data[i]\n \n #if (abs(self.aver_force_left-self.torque_data_array[1]) > 2):\n if (self.value_count < 20):\n self.left_value_sum += self.torque_data_array[1]\n self.middle_value_sum += self.torque_data_array[0]\n self.left_value_sum += self.torque_data_array[2]\n if (self.value_count == 20):\n self.left_value_set_to_zero = self.left_value_sum / 20\n self.middle_value_set_to_zero = self.middle_value_sum / 20\n self.right_value_set_to_zero = self.right_value_sum / 20\n if (self.value_count > 20):\n #Signal filter\n\n self.torque_data_array[1] = self.torque_data_array[1]- self.left_value_set_to_zero \n self.torque_data_array[0] = self.torque_data_array[0]- self.middle_value_set_to_zero \n self.torque_data_array[2] = self.torque_data_array[2]- self.right_value_set_to_zero \n\n #self.aver_force_left = self.aver_list_value(self.left_finger_torque_data_array, self.torque_data_array[1], self.aver_force_left) \n #self.aver_force_middle = self.aver_list_value(self.middle_finger_torque_data_array, self.torque_data_array[0], self.aver_force_middle)\n #self.aver_force_right = self.aver_list_value(self.right_finger_torque_data_array, self.torque_data_array[2], self.aver_force_right)\n \n self.value_count = self.value_count + 1\n \n\n def aver_list_value(self, list_value, new_value, old_value):\n sum_value = 0\n if (abs(new_value - old_value) > 3):\n list_value.append(new_value)\n list_value.pop(1)\n for i in range(len(list_value)):\n sum_value = sum_value + list_value[i]\n return sum_value / len(list_value)\n\n\n\n\nif __name__ == '__main__':\n try:\n AdmittanceControl()\n except rospy.ROSInterruptException: pass\n\n","sub_path":"kclhand_control/scripts/impedance_publisher.py","file_name":"impedance_publisher.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"650306455","text":"def get_yahoo_prices(p):\n from pandas_datareader import data\n from fintools import set_start_end\n\n if not isinstance(p.start, str) & isinstance(p.end, str):\n raise TypeError('format of start & end must be \"YYYY-MM-DD')\n\n if isinstance(p.prices, str):\n if p.prices == 'yahoo':\n tickers = p.symbols.copy()\n if p.cash_proxy != 'CASHX':\n tickers = list(set(tickers + [p.cash_proxy]))\n try:\n if isinstance(p.risk_free, str):\n tickers = list(set(tickers + [p.risk_free]))\n except:\n pass\n\n if p.start >= p.end:\n raise ValueError('start must be < end')\n\n if not isinstance(p.start, str) & isinstance(p.end, str):\n raise TypeError('format of start and end must be \"YYYY-MM-DD\"')\n\n start, end = set_start_end(start=p.start, end=p.end)\n\n data_panel = data.DataReader(tickers, \"yahoo\", start, end)\n\n close = data_panel['Adj Close'].sort_index(ascending=True)\n\n return close.copy().dropna()\n else:\n return p.prices","sub_path":"fintools/get_yahoo_prices.py","file_name":"get_yahoo_prices.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"549458232","text":"# 如何在一个binary tree中找到extra edge并断开\n\n# O(N) Time\n# O(H) Space\n\n# DFS the binary tree, if kid already explored, remove the edges\n\n# iteratively version\ndef removeExrtra(root):\n stack = [root] if root else []\n explored = {root} if root else set()\n while stack:\n node = stack.pop()\n if node.left:\n if node.left in explored:\n node.left = None # if only 1 here can return root\n else:\n explored.add(node.left)\n stack.append(node.left)\n if node.right:\n if node.right in explored:\n node.right = None\n else:\n explored.add(node.right)\n stack.append(node.right)\n return root\n\n# recursive version\ndef removeExrtra(root):\n def dfs(root):\n explored.add(root)\n if root.left:\n if root.left in explored:\n root.left = None\n else:\n dfs(root.left)\n if root.right:\n if root.right in explored:\n root.right = None\n else:\n dfs(root.right)\n\n explored = {}\n if root:\n dfs(root)\n return root\n\n# follow-up如果有多条边怎么办\n# same code, this algorithm also work for multiple edges\n\n# 684. Redundant Connection (undirected graph)\n# Return an edge that can be removed so that the resulting graph is a tree of N nodes. \n\n# O(N^2) time fancy style union-find\n# O(Nlg*N)~O(N) time traditional style\n\n# union-find, if 2 nodes connect, remove it\ndef findRedundantConnection(edges):\n root = ''.join(map(chr, range(1001)))\n for x, y in edges:\n if root[x] == root[y]:\n return [x, y]\n root = root.replace(root[x], root[y]) \n # union replace all connected item to same chr(unicode)\n\n# traditional style\ndef findRedundantConnection(edges):\n def find(x):\n if root[x] != x:\n root[x] = find(root[x])\n return root[x]\n\n N = len(set([x for x,_ in edges]+[y for _,y in edges]))\n root = [i for i in range(N)]\n\n for x, y in edges:\n a, b = find(x-1), find(y-1)\n if a == b: # x,y connect, remove them\n return [x, y]\n else: # not connect, union\n root[a] = b\n\n# if use dfs time complexity will be O(N^2)\n# since a graph may not connect, we should use dfs for each node\n\nfrom collections import defaultdict as dfDict\ndef findRedundantConnection(edges):\n graph = dfDict(set)\n\n def dfs(source, target):\n if source not in seen:\n seen.add(source)\n if source == target: return True\n return any(dfs(nei, target) for nei in graph[source])\n\n for u, v in edges:\n seen = set()\n if u in graph and v in graph and dfs(u, v):\n return u, v\n graph[u].add(v)\n graph[v].add(u)\n","sub_path":"Google/removeRedundant.py","file_name":"removeRedundant.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"596143764","text":"# MIT License\n#\n# Copyright (c) 2017 Matthias Rost, Alexander Elvers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n__author__ = \"Matthias Rost, Alexander Elvers (mrost / aelvers inet.tu-berlin.de)\"\n\nimport gc\nimport itertools\nimport math\nimport multiprocessing\nimport random\nimport sys\n\nfrom algorithms import (\n greedy_matching as greedy_pkg,\n greedy_matching_parallel as greedy_pkg_parallel,\n optimal_mip as mip_pkg,\n)\nfrom datamodel import (\n suitable_substrates as ss_pkg,\n scenario as scen_pkg,\n requests as req_pkg,\n)\nfrom experiments import abstract_experiment_manager as aem_pkg\n\n\nclass AlgorithmManager(aem_pkg.AbstractAlgorithmManager):\n default_algorithms = [\n (\"MIP\",),\n (\"GREEDY_SINGLE\",),\n (\"GREEDY_PARALLEL\", {\"processes\": 2}),\n (\"GREEDY_PARALLEL\", {\"processes\": 4}),\n (\"GREEDY_PARALLEL\", {\"processes\": 8}),\n ]\n\n def execute_algorithms_in_parallel(self, scenario, max_number_of_processes):\n results = {}\n result_queue = multiprocessing.Queue()\n\n if self.algorithm_partition is None:\n self.algorithm_partition = self.get_algorithm_partition(max_number_parallel_processes=max_number_of_processes)\n for alg_list in self.algorithm_partition:\n processes = {}\n for alg in alg_list:\n process = multiprocessing.Process(target=self.execute_algorithm_multiprocess, args=(scenario, alg, result_queue))\n print(f\"starting {alg} .. \")\n process.start()\n processes[alg] = process\n for i in range(len(alg_list)):\n encapsulated_result = result_queue.get()\n print(f\"received result {encapsulated_result}\")\n encapsulated_result[1].scenario = scenario\n results[encapsulated_result[0]] = encapsulated_result[1]\n processes[encapsulated_result[0]].join()\n print(f\"process of algorithm {encapsulated_result[0]} is terminated / {len(alg_list)-(i+1)} of {len(alg_list)} outstanding to terminate\")\n\n return results\n\n def create_algorithm(self, scenario, algorithm):\n if algorithm.key == aem_pkg.AlgorithmType.MIP:\n return mip_pkg.ExactDeploymentMIP(scenario)\n elif algorithm.key == aem_pkg.AlgorithmType.GREEDY_SINGLE:\n return greedy_pkg.GreedyMatching(scenario)\n elif algorithm.key == aem_pkg.AlgorithmType.GREEDY_PARALLEL:\n return greedy_pkg_parallel.GreedyMatchingMaster(scenario, number_of_processes=algorithm.properties[\"processes\"])\n else:\n raise Exception(\"I don't know this type of algorithm.\")\n\n def get_algorithm_partition(self, max_number_parallel_processes):\n result = []\n\n process_count_to_alg = {}\n for alg in self.algorithms:\n process_count = 1\n if alg.key == aem_pkg.AlgorithmType.MIP:\n result.append([alg])\n elif alg.key == aem_pkg.AlgorithmType.GREEDY_SINGLE or alg.key == aem_pkg.AlgorithmType.GREEDY_PARALLEL:\n if alg.key == aem_pkg.AlgorithmType.GREEDY_PARALLEL:\n process_count = alg.properties[\"processes\"]\n if process_count not in process_count_to_alg:\n process_count_to_alg[process_count] = []\n process_count_to_alg[process_count].append(alg)\n\n\n process_counts = sorted(process_count_to_alg.keys(), reverse=True)\n\n while len(process_counts) > 0:\n\n available_count = max_number_parallel_processes\n partition = []\n print(\"starting a new partition\")\n print(f\"remaining elements are {process_count_to_alg} \")\n\n for i in process_counts:\n while available_count >= i and i in process_count_to_alg and len(process_count_to_alg[i]) > 0:\n available_count -= i\n partition.append(process_count_to_alg[i][0])\n print(f\"\\tadding {process_count_to_alg[i][0]} to partition obtaining {partition}\")\n process_count_to_alg[i] = process_count_to_alg[i][1:]\n if len(process_count_to_alg[i]) == 0:\n del process_count_to_alg[i]\n print(f\"\\tnew remaining algorithms {process_count_to_alg}\")\n\n result.append(partition)\n process_counts = sorted(process_count_to_alg.keys(), reverse=True)\n\n return result\n\n\nclass ExperimentManager(aem_pkg.AbstractExperimentManager):\n algorithm_manager_class = AlgorithmManager\n\n def __init__(self, probability_for_pair, max_deviation, capacity_factor,substrate_filter=None, number_of_repetitions=1, offset=0):\n super().__init__(probability_for_pair, max_deviation, capacity_factor,substrate_filter, number_of_repetitions, offset)\n self.suitable_substrates = ss_pkg.unpickle_pruned_suitable_substrates()\n\n\n def construct_scenarios(self):\n counter = 0\n print(self.probability_for_pair)\n print(self.capacity_factor)\n print(self.suitable_substrates)\n print(self.number_of_repetitions)\n print(self.max_deviation)\n\n for prob, cap_factor, substrate_name, repetition in itertools.product(self.probability_for_pair,\n self.capacity_factor,\n self.suitable_substrates.names,\n range(self.number_of_repetitions)):\n\n #print self.substrate_filter\n if self.substrate_filter is not None and substrate_name not in self.substrate_filter:\n continue\n substrate = self.suitable_substrates.substrates[substrate_name]\n #print substrate_name\n pairs = []\n\n handled_nodes = []\n for u in substrate.nodes:\n handled_nodes.append(u)\n for v in substrate.nodes:\n if v in handled_nodes:\n continue\n if random.random() <= prob:\n pairs.append((u,v))\n #req = req_pkg.Request(u, v, random.uniform(md_lb, md_ub), capacity=1)\n #requests.append(req)\n\n for deviation in self.max_deviation:\n\n self.scenario_keys.append((prob, deviation, cap_factor, substrate_name, repetition))\n\n if counter > 0 and counter % 100 == 0:\n if self.substrate_filter is not None:\n print(f\"Having created {counter} of {len(self.max_deviation) * len(self.capacity_factor) * len(self.probability_for_pair)*len(self.substrate_filter)*self.number_of_repetitions} many scenarios\")\n else:\n print(f\"Having created {counter} of {len(self.max_deviation) * len(self.capacity_factor) * len(self.probability_for_pair)*len(self.suitable_substrates.names)*self.number_of_repetitions} many scenarios\")\n\n\n number_of_nodes = substrate.get_number_of_nodes()\n\n capacity = math.ceil((number_of_nodes - 1) * 2 * prob\n + (number_of_nodes * number_of_nodes - 2 * number_of_nodes - 1) / 2 * prob * cap_factor)\n\n requests = []\n\n\n\n md_lb, md_ub = deviation, deviation\n for (u,v) in pairs:\n req = req_pkg.Request(u, v, random.uniform(md_lb, md_ub), capacity=1)\n requests.append(req)\n\n\n middleboxes = {}\n for u in substrate.nodes:\n middleboxes[u] = capacity\n\n scenario = scen_pkg.Scenario(counter, substrate, requests, middleboxes)\n\n self.scenarios[(prob,deviation,cap_factor,substrate_name,repetition)] = scenario\n\n counter += 1\n\n def create_scenario_partition(self, number_of_servers):\n scenarios_of_server = {}\n for i in range(number_of_servers):\n scenarios_of_server[i] = []\n i = 0\n for x in self.scenario_keys:\n scenarios_of_server[i].append(x)\n i = (i+1)% number_of_servers\n return scenarios_of_server\n\n def execute_scenarios(self, server_number=None, number_of_servers=6, number_of_cores=8):\n scenario_keys = self.scenario_keys\n if server_number is None:\n print(\"\\n\\nWARNING: all experiments are executed now...\\n\\n\")\n else:\n scenario_keys = self.create_scenario_partition(number_of_servers=number_of_servers)[server_number]\n\n counter = 1\n for scenario_key in scenario_keys:\n print(f\"\\n\\nEXPERIMENT_MANAGER: Starting experiments for scenario {counter} of {len(scenario_keys)}\\n\\n\")\n scen_results = self.algorithm_manager.execute_algorithms_in_parallel(self.scenarios[scenario_key], max_number_of_processes=number_of_cores)\n self.scenario_solutions[scenario_key] = scen_results\n counter += 1\n gc.collect()\n #import objgraph\n #objgraph.show_most_common_types()\n import resource\n print(f\"Memory usage: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000} (MB)\")\n","sub_path":"src/experiments/experiment_manager.py","file_name":"experiment_manager.py","file_ext":"py","file_size_in_byte":10292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"421508977","text":"import requests\nfrom bs4 import BeautifulSoup as soup\nimport datetime as dt\nimport json\nimport os\nimport re\nimport csv\nimport time\nimport logging\nimport hashlib\nimport MySQLdb\nimport justext\nimport requests\nimport warnings\nimport feedparser\nimport urllib.parse\nimport pandas as pd\nimport urllib.request\nfrom goose3 import Goose\n# from boilerpipe.extract import Extractor\n\ndef getInitPage(to_date,from_date,commCode,comm):\n url='http://agmarknet.gov.in/SearchCmmMkt.aspx?Tx_Commodity='+commCode+'&Tx_State=0&Tx_District=0&Tx_Market=0&DateFrom='+from_date+'&DateTo='+to_date+'&Fr_Date='+(from_date)+'&To_Date='+(to_date)+'&Tx_Trend=0&Tx_CommodityHead='+comm+'&Tx_StateHead=--Select--&Tx_DistrictHead=--Select--&Tx_MarketHead=--Select--'\n print(url)\n res = requests.get(url)\n return res.content\n\ndef getInitPageTon(to_date,from_date,commCode,comm):\n url='http://agmarknet.gov.in/SearchCmmMkt.aspx?Tx_Commodity='+commCode+'&Tx_State=0&Tx_District=0&Tx_Market=0&DateFrom='+from_date+'&DateTo='+to_date+'&Fr_Date='+(from_date)+'&To_Date='+(to_date)+'&Tx_Trend=2&Tx_CommodityHead='+comm+'&Tx_StateHead=--Select--&Tx_DistrictHead=--Select--&Tx_MarketHead=--Select--'\n print(url)\n res = requests.get(url)\n return res.content\n\ndef getNextPage(date,vs):\n from_date = date\n to_date = date\n uri ='http://agmarknet.gov.in/SearchCmmMkt.aspx?Tx_Commodity=17&Tx_State=0&Tx_District=0&Tx_Market=0&DateFrom='+from_date+'&DateTo='+to_date+'&Fr_Date='+(from_date)+'&To_Date='+(to_date)+'&Tx_Trend=0&Tx_CommodityHead=Apple&Tx_StateHead=--Select--&Tx_DistrictHead=--Select--&Tx_MarketHead=--Select--'\n #the http headers are useful to simulate a particular browser (some sites deny\n #access to non-browsers (bots, etc.)\n #also needed to pass the content type. \n url = uri\n headers = {\n 'Cache-Control': 'no-cache',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Origin':'http://agmarknet.gov.in',\n 'Referer':url,\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'X-MicrosoftAjax':'Delta=true',\n 'X-Requested-With':'XMLHttpRequest'\n }\n\n # we group the form fields and their values in a list (any\n # iterable, actually) of name-value tuples. This helps\n # with clarity and also makes it easy to later encoding of them.\n\n formFields = (\n # the viewstate is actualy 800+ characters in length! I truncated it\n # for this sample code. It can be lifted from the first page\n # obtained from the site. It may be ok to hardcode this value, or\n # it may have to be refreshed each time / each day, by essentially\n # running an extra page request and parse, for this specific value.\n (r'__VIEWSTATE',vs),\n\n # following are more of these ASP form fields\n (r'ctl00$ScriptManager1', r'ctl00$cphBody$UpdatePanel1|ctl00$cphBody$GridPriceData'),\n (r'ctl00$ddlLanguages',r'en'),\n (r'ctl00$ddlArrivalPrice',r'0'),\n (r'ctl00$ddlCommodity',r'17'),\n (r'ctl00$ddlState',r'0'),\n (r'ctl00$ddlDistrict',r'0'),\n (r'ctl00$ddlMarket',r'0'),\n (r'ctl00$txtDate',from_date),\n (r'ctl00$ValidatorExtender1_ClientState',to_date),\n (r'ctl00$ValidatorCalloutExtender2_ClientState',r''),\n (r'ctl00$cphBody$DDLPirceMearure',r'0'),\n (r'ctl00$cphBody$DDlExpression',r'0'),\n (r'ctl00$cphBody$Textserach',r''),\n (r'ctl00$cphBody$ddlCommodity',r'17'),\n (r'ctl00$cphBody$ddlfromyear',from_date[-4:]),\n (r'ctl00$cphBody$ddltoyear',to_date[-4:]),\n (r'ctl00$cphBody$DropDownDisplay',r'0'),\n (r'__EVENTTARGET',r'ctl00$cphBody$GridPriceData'),\n (r'__EVENTARGUMENT',r'Page$Next'),\n (r'__LASTFOCUS',r''),\n (r'__VIEWSTATEGENERATOR',r'B5EE7E14'),\n (r'__VIEWSTATEENCRYPTED',r''),\n (r'__ASYNCPOST',r'true')\n )\n\n formDict = {}\n for a,b in formFields:\n formDict[a] = b\n\n session = requests.Session()\n resp = session.post(uri,headers=headers,data=formDict)\n session.close()\n return resp.content\n\n\ndef populateDF(sp,df):\n nrows = df.shape[0]\n print(\"rows:\",nrows)\n tr_temp=sp.findAll(\"tr\")\n print(\"len: \",len(tr_temp))\n for i,tr in enumerate(tr_temp[1:]):\n coli = []\n for ival,td in enumerate(tr.find_all('td')):\n val = td.getText().strip()\n if ival == 9:\n val = val.replace(\" \",\"-\")\n coli.append(val)\n if len(coli) == 10:\n df.loc[i+nrows] = coli\n \ndef send(l):\n uri = 'http://www.agriiprince.com/test/aditya/loop_cron.php'\n headers = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n formDict = {}\n formDict['values'] = json.dumps(l)\n formDict['rofl'] = 'Xz6mUMy4pFgMamyBu8hkWuq'\n\n session = requests.Session()\n resp = session.post(uri,headers=headers,data=formDict)\n session.close()\n print(\"Done\")\n\n \ndef popLis(sp,lis):\n tr_temp=sp.findAll(\"tr\")\n k=0\n for i,tr in enumerate(tr_temp[1:]):\n coli = []\n for ival,td in enumerate(tr.find_all('td')[1:]):\n val = td.getText().strip()\n if ival == 8:\n val = dt.datetime.strptime(val,\"%d %b %Y\").strftime('%Y-%m-%d')\n coli.append(val)\n print(k)\n k=k+1\n if len(coli) == 9:\n print(coli)\n query = \"\"\"INSERT INTO main2 VALUES('{date}','{district}','{market}','{commodity}','{variety}','{grade}',NULL,{minP},{maxP},{modP},NULL) ON DUPLICATE KEY UPDATE grade = '{grade}';\"\"\".format(date = coli[8],district = coli[0],market = coli[1],commodity = coli[2],variety = coli[3],grade=coli[4]\n , minP = coli[5],maxP = coli[6],modP = coli[7])\n lis.append(query)\n print(\"done\")\n \n \ndef popLisTon(sp,lis,commInp):\n tr_temp=sp.findAll(\"tr\")\n print(\"vnjudrsio\")\n k=0\n for i,tr in enumerate(tr_temp[1:]):\n coli = []\n tds = tr.find_all('td')\n if tds[0].getText().strip() == '-':\n continue\n print(tds)\n for ival,td in enumerate(tds):\n val = td.getText().strip()\n if ival == 9:\n val = dt.datetime.strptime(val,\"%d %b %Y\").strftime('%Y-%m-%d')\n coli.append(val)\n #print(coli)\n if len(coli) == 10:\n print(coli)\n print(k)\n k=k-1\n query = \"\"\"INSERT INTO main2 VALUES('{date}','{district}','{market}','{commodity}','{variety}',NULL,'{state}',{minP},{maxP},{modP},{tonnage}) ON DUPLICATE KEY UPDATE tonnage = {tonnage}, state = '{state}';\"\"\".format(date = coli[9],district = coli[1],market = coli[2],commodity = commInp,variety = coli[3]\n , minP = coli[6],maxP = coli[7],modP = coli[8],tonnage = coli[5],state = coli[0])\n #cursor.execute(query)\n lis.append(query)\n print(\"inserted\")\n\ndef findViewState(respContent):\n st = respContent.find('VIEWSTATE') + 10\n end = respContent[st:].find('|')\n return respContent[st:][:end]\n\n\n\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup as soup\nimport os\nimport time\nimport json\nfrom bs5 import *\nimport logging\n\nlag = 2\n\n#os.chdir('/home/ubuntu/pytfiles/')\n\nwith open('comm.json', 'r') as fp:\n commDict = json.load(fp)\n\nimport datetime as dt\n\ncwd = os.getcwd()\nlogging.basicConfig(filename = cwd + r\"/logscrape.log\",level = logging.DEBUG)\nlogger = logging.getLogger()\n\n \n\n# while True:\nprint('Start.')\nda = dt.date.today()\ndiso = da.strftime('%Y-%m-%d')\n# diso=\"2019-01-05\"\ndliso = (da - dt.timedelta(lag)).strftime('%Y-%m-%d')\n# dliso=\"2019-01-05\"\nd = da.strftime(\"%d-%b-%Y\")\n# d=\"2019-01-05\"\nlis = []\n\ntries = 0\n\nfor code in commDict:\n print(commDict[code])\n try:\n\t\t\n initResp = getInitPage(diso,dliso,code,commDict[code])\n initSoup = soup(initResp,'lxml')\n initVS = initSoup.input.getText()\n if initVS == '':\n initVS = initSoup.input['value']\n\n #popLis(initSoup,lis)\n \n \n\n if str(initResp).find('Page$Next') != -1:\n nextP = getNextPage(d,initVS)\n vs = findViewState(str(nextP))\n #popLis(soup(nextP,'lxml'),lis)\n\n while str(nextP).find('Page$Next') != -1:\n vs = findViewState(str(nextP))\n nextP = getNextPage(d,vs)\n # popLis(soup(nextP,'lxml'),lis)\n\n\n # tonnage begin (does not take lag)\n for lagdur in range(lag+1):\n # diso = da.strftime('%Y-%m-%d')\n dlisoTon = (da - dt.timedelta(lagdur)).strftime('%Y-%m-%d')\n # dlisoTon=\"2019-01-05\"\n initResp = getInitPageTon(dlisoTon,dlisoTon,code,commDict[code])\n initSoup = soup(initResp,'lxml')\n initVS = initSoup.input.getText()\n if initVS == '':\n initVS = initSoup.input['value']\n \n popLisTon(initSoup,lis,commDict[code])\n # exit()\n # tonnage end\n\n # print(commDict[code],len(lis),end = \" \")\n logger.info(d + \" done\")\n \n send(lis)\n print('.',end = ' ')\n lis = []\n except Exception as e:\n tries += 1\n print(e)\n print(\"skipping \",commDict[code])\n if tries <= 100:\n continue\n else:\n print(\"Too much failure. breaking.\")\n break\n\n# df = pd.read_csv('sample.csv',usecols = ['TS'])\n# l = df.shape[0]\n# df.loc[l] = time.time()\n# df.to_csv(\"sample.csv\")\n# time.sleep(7200)\n","sub_path":"mandi-price/agmarknet_cron.py","file_name":"agmarknet_cron.py","file_ext":"py","file_size_in_byte":9779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"180196434","text":"# coding=utf8\nimport xlrd\nimport pdb\nDEFAULT_SHEET_INDEX = 0\nDEFAULT_ROW_INDEX = 3\n\n\ndef analyse(path):\n result = []\n workbook = xlrd.open_workbook(path)\n sheet = workbook.sheet_by_index(DEFAULT_SHEET_INDEX)\n \n for i in range(DEFAULT_ROW_INDEX, sheet.nrows):\n o = {\n 'billno': sheet.cell(i, 6).value,\n 'company': sheet.cell(i, 10).value, \n 'code': sheet.cell(i, 11).value, \n }\n result.append(o)\n \n return result\n\n\nif __name__ == '__main__':\n path = 'cangku.xls'\n analyse(path)\n","sub_path":"bill/excelutil.py","file_name":"excelutil.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"138163934","text":"import pytest\nimport os\nfrom collections import OrderedDict\n\nimport pypospack.utils\nfrom pypospack.pyposmat import PyposmatDataFile\n\ndef get_testing_set():\n testing_set = OrderedDict()\n testing_set['results_data_fn'] = os.path.join(\n pypospack.utils.get_pypospack_root_directory(),\n 'data','Si__sw__data','pareto_optimization_1','data','pyposmat.results.0.out'\n )\n\n assert os.path.isfile(testing_set['results_data_fn'])\n\n return testing_set\n\ndef dev__read():\n \n testing_set = get_testing_set()\n\n o = PyposmatDataFile()\n o.read()\n\n","sub_path":"tests/pyposmat/data/PyposmatDataFile/test__write.py","file_name":"test__write.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"435193826","text":"def prime_interval(x, y):\r\n if y >= x:\r\n for num in range(x, y + 1):\r\n if num > 1: # all prime numbers are bigger than 1\r\n for i in range(2, num):\r\n if (num % i) == 0:\r\n break\r\n else:\r\n print(num)\r\n else:\r\n print(\"Upper boundary must be bigger than or equal to the lower boundary!\")\r\n\r\n\r\nwhile True:\r\n try:\r\n user_x = int(input(\"Please enter a lower boundary: \"))\r\n break\r\n except ValueError:\r\n print(\"Invalid input, please try again\")\r\n\r\nwhile True:\r\n try:\r\n user_y = int(input(\"Please enter an upper boundary: \"))\r\n break\r\n except ValueError:\r\n print(\"Invalid input, please try again\")\r\n\r\nprint(prime_interval(user_x, user_y))\r\n\r\n","sub_path":"6-PrimeIntervals.py","file_name":"6-PrimeIntervals.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"634852749","text":"##############################################################################\n#\n# Kennedy Institute of Rheumatology\n#\n# $Id$\n#\n# Copyright (C) 2018 Stephen Sansom\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n###############################################################################\n\n\"\"\"===========================\nPipeline WGCNA\n===========================\n\n:Author: Sansom lab\n:Release: $Id$\n:Date: |today|\n:Tags: Python\n\nOverview\n========\n\nThis pipeline wraps the WGCNA method using a set of Rscripts.\n\n\nUsage\n=====\n\n\nConfiguration\n-------------\n\nThe pipeline requires a configured :file:`pipeline.yml` file.\n\nDefault configuration files can be generated by executing:\n\n python /pipeline_seurat.py config\n\n\nInput files\n-----------\n\n\n\nDependencies\n------------\n\nThis pipeline requires:\n\n* cgat-core: https://github.com/cgat-developers/cgat-core\n* R & various packages.\n* Latex.\n\n\nPipeline output\n===============\n\n\n\"\"\"\n\nfrom ruffus import *\nfrom pathlib import Path\nimport sys\nimport os\nimport shutil\nimport glob\nimport sqlite3\nimport numpy as np\nimport pandas as pd\nimport textwrap\nimport subprocess\nfrom scipy.stats.mstats import gmean\nimport cgatcore.experiment as E\nfrom cgatcore import pipeline as P\nimport cgatcore.iotools as IOTools\n\n# import local pipeline utility functions\nfrom pipeline_utils import templates\n\n\n# -------------------------- < parse parameters > --------------------------- #\n\n# load options from the config file\nPARAMS = P.get_parameters(\n [\"%s/pipeline.yml\" % os.path.splitext(__file__)[0],\n \"../pipeline.yml\",\n \"pipeline.yml\"])\n\n# set the location of the tenx code directory\nif \"wgnca_dir\" not in PARAMS.keys():\n PARAMS[\"wgcna_dir\"] = Path(__file__).parents[1]\nelse:\n raise ValueError(\"Could not set the location of the tenx code directory\")\n\n\n# ----------------------- < pipeline configuration > ------------------------ #\n\n# handle pipeline configuration\nif len(sys.argv) > 1:\n if(sys.argv[1] == \"config\") and __name__ == \"__main__\":\n sys.exit(P.main(sys.argv))\n\n\n# ########################################################################### #\n# ################# Sanity check and parse the input files ################## #\n# ########################################################################### #\n\nglobal EXPRESSION_DATA_PATH\nEXPRESSION_DATA_PATH = PARAMS[\"input_expression_data\"]\n\nif not os.path.exists(EXPRESSION_DATA_PATH):\n raise ValueError(\"input expression data file not found\")\n\n# optional trait and metadata inputs\n\nglobal TRAIT_DATA_STAT\nif not PARAMS[\"input_trait_data\"] is None:\n trait_data_path = PARAMS[\"input_trait_data\"]\n\n if not os.path.isfile(trait_data_path):\n raise ValueError(\"trait file not found\")\n\n else:\n TRAIT_DATA_STAT = \"--traitdata=%(trait_data_path)s\" % locals()\n\nelse:\n TRAIT_DATA_STAT = \"\"\n\nglobal META_DATA_STAT\nif not PARAMS[\"input_meta_data\"] is None:\n meta_data_path = PARAMS[\"input_meta_data\"]\n\n if not os.path.isfile(meta_data_path):\n raise ValueError(\"meta file not found\")\n\n else:\n META_DATA_STAT = \"--metadata=%(meta_data_path)s\" % locals()\n\nelse:\n META_DATA_STAT = \"\"\n\n\n\n\n\n@follows(mkdir(\"annotation.dir\"))\n@files(None, \"annotation.dir/genesets.sentinel\")\ndef getGenesetAnnotations(infile, outfile):\n '''Get mappings between Ensembl gene_ids and (i) Entrez ids\n and (ii) KEGG pathways.\n '''\n\n outdir = os.path.dirname(outfile)\n\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_fetch_geneset_annotations.R\n --ensemblversion=%(annotation_ensembl_release)s\n --ensemblhost=%(annotation_ensembl_host)s\n --species=%(annotation_species)s\n --outdir=%(outdir)s\n &> %(log_file)s\n ''' % dict(PARAMS, **locals())\n\n # requires internet connectivity.\n # and the BMRC cluster is broken by to_cluster = FALSE!\n process = subprocess.Popen(statement.replace(\"\\n\", \"\"),\n shell=True, stdout=subprocess.PIPE)\n process.wait()\n\n if process.returncode != 0:\n raise ValueError(\"failed to get annotation\")\n\n IOTools.touch_file(outfile)\n\n\n@files(None,\n \"wgcna.dir/clean.dir/clean.sentinel\")\ndef cleanData(infile, outfile):\n '''\n Prepare the data for a WGCNA run\n '''\n\n results_file = outfile.replace(\".sentinel\", \".RData\")\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n #infile_path = os.path.abspath(infile)\n\n expression_data_path = EXPRESSION_DATA_PATH\n trait_data_stat = TRAIT_DATA_STAT\n\n out_dir = os.path.dirname(os.path.abspath(outfile))\n results_filename = os.path.basename(results_file)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_data_cleaning.R\n --input=%(expression_data_path)s\n --idcol=%(annotation_idcol)s\n --outdir=%(out_dir)s\n --outfilename=%(results_filename)s\n --minfraction=%(clean_min_fraction)s\n --minnsamples=%(clean_min_n_samples)s\n --minngenes=%(clean_min_n_genes)s\n --minrelativeweight=%(clean_min_relative_weight)s\n --cutheight=%(clean_cut_height)s\n --minsize=%(clean_min_size)s\n %(trait_data_stat)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n\n@transform(cleanData,\n regex(r\"(.*)/.*/clean.sentinel\"),\n r\"\\1/soft.power.dir/soft.power.sentinel\")\ndef softPower(infile, outfile):\n '''\n Run the soft power analysis\n '''\n\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n clean_data = infile.replace(\".sentinel\", \".RData\")\n\n job_threads = PARAMS[\"module_threads\"]\n job_memory = PARAMS[\"module_memory\"]\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_soft_power.R\n --input=%(clean_data)s\n --outdir=%(out_dir)s\n --networktype=%(module_network_type)s\n --adjcorfnc=%(module_adj_cor_fnc)s\n --adjdistfnc=%(module_adj_dist_fnc)s\n --threads=%(module_threads)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n\n\n# ########################################################################### #\n# ################### Step by Step module detection ######################### #\n# ########################################################################### #\n\n\n@transform(cleanData,\n regex(r\"(.*)/.*/clean.sentinel\"),\n r\"\\1/modules.dir/adjacency.sentinel\")\ndef computeAdjacency(infile, outfile):\n '''Compute the adjacency matrix'''\n\n results_file = os.path.basename(outfile).replace(\".sentinel\", \".RData\")\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n clean_data = infile.replace(\".sentinel\", \".RData\")\n\n job_threads = PARAMS[\"module_threads\"]\n job_memory = PARAMS[\"module_memory\"]\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_compute_adjacency.R\n --input=%(clean_data)s\n --outdir=%(out_dir)s\n --outfilename=%(results_file)s\n --threads=%(module_threads)s\n --softpower=%(module_soft_power)s\n --networktype=%(module_network_type)s\n --adjcorfnc=%(module_adj_cor_fnc)s\n --adjdistfnc=%(module_adj_dist_fnc)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n@transform(computeAdjacency,\n regex(r\"(.*)/modules.dir/adjacency.sentinel\"),\n r\"\\1/modules.dir/TOM.sentinel\")\ndef computeTOM(infile, outfile):\n '''Compute the TOM'''\n\n results_file = os.path.basename(outfile).replace(\".sentinel\", \".RData\")\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n adjacency_data = infile.replace(\".sentinel\", \".RData\")\n\n job_threads = PARAMS[\"module_threads\"]\n job_memory = PARAMS[\"module_memory\"]\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_compute_TOM.R\n --input=%(adjacency_data)s\n --outdir=%(out_dir)s\n --outfilename=%(results_file)s\n --threads=%(module_threads)s\n --tomtype=%(module_tom_type)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n\n@transform(computeTOM,\n regex(r\"(.*)/modules.dir/TOM.sentinel\"),\n add_inputs(cleanData),\n r\"\\1/modules.dir/modules.sentinel\")\ndef detectModules(infiles, outfile):\n '''Cluster the TOM, cut the tree and merge the modules'''\n\n results_file = os.path.basename(outfile).replace(\".sentinel\", \".RData\")\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n tom_data, clean_data = [x.replace(\".sentinel\", \".RData\") for x in infiles]\n\n job_threads = PARAMS[\"module_threads\"]\n job_memory = PARAMS[\"module_memory\"]\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_detect_modules.R\n --cleandata=%(clean_data)s\n --tomdata=%(tom_data)s\n --outdir=%(out_dir)s\n --outfilename=%(results_file)s\n --threads=%(module_threads)s\n --softpower=%(module_soft_power)s\n --minmodulesize=%(module_min_size)s\n --medissthreshold=%(module_diss_threshold)s\n --adjcorfnc=%(module_adj_cor_fnc)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n\n# ########################################################################### #\n# ################# Blockwise module detection ############################## #\n# ########################################################################### #\n\n\n@transform(cleanData,\n regex(r\"(.*)/.*/clean.sentinel\"),\n r\"\\1/modules.dir/modules.sentinel\")\ndef detectModulesBlockwise(infile, outfile):\n '''\n Run the module detection\n '''\n\n results_file = os.path.basename(outfile).replace(\".sentinel\", \".RData\")\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n clean_data = infile.replace(\".sentinel\", \".RData\")\n\n job_threads = PARAMS[\"module_threads\"]\n job_memory = PARAMS[\"module_memory\"]\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_detect_modules_blockwise.R\n --input=%(clean_data)s\n --outdir=%(out_dir)s\n --outfilename=%(results_file)s\n --threads=%(module_threads)s\n --maxblocksize=%(module_block_size)s\n --softpower=%(module_soft_power)s\n --networktype=%(module_network_type)s\n --adjcorfnc=%(module_adj_cor_fnc)s\n --adjdistfnc=%(module_adj_dist_fnc)s\n --tomtype=%(module_tom_type)s\n --minmodulesize=%(module_min_size)s\n --medissthreshold=%(module_diss_threshold)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n\n\n# ########################################################################### #\n# ################## integrate module detection options ##################### #\n# ########################################################################### #\n\nif PARAMS[\"module_detection\"] == \"stepwise\":\n collectModules = detectModules\n\nelif PARAMS[\"module_detection\"] == \"blockwise\":\n collectModules = detectModulesBlockwise\n\nelse:\n raise ValueError('Module detection must be set to either \"stepwise\" or \"blockwise\"')\n\n\n# ########################################################################### #\n# #################### Module characterisation ############################## #\n# ########################################################################### #\n\n\n@transform(collectModules,\n regex(r\"(.*)/.*/modules.sentinel\"),\n add_inputs(getGenesetAnnotations, cleanData),\n r\"\\1/membership.dir/membership.sentinel\")\ndef characteriseModules(infiles, outfile):\n '''\n Run the module detection\n '''\n\n modulesx, annotationsx, cleanx = infiles\n\n results_file = os.path.basename(outfile).replace(\".sentinel\", \".tsv\")\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n clean_data = cleanx.replace(\".sentinel\", \".RData\")\n module_data = modulesx.replace(\".sentinel\", \".RData\")\n annotation_file = annotationsx.replace(\"genesets.sentinel\", \"ensembl.to.entrez.tsv.gz\")\n\n job_threads = PARAMS[\"module_threads\"]\n job_memory = PARAMS[\"module_memory\"]\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_modules_vs_traits.R\n --input=%(clean_data)s\n --modules=%(module_data)s\n --annotation=%(annotation_file)s\n --idcol=%(annotation_idcol)s\n --namecol=%(annotation_namecol)s\n --outdir=%(out_dir)s\n --outfilename=%(results_file)s\n --threads=%(module_threads)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n\n@transform(characteriseModules,\n regex(r\"(.*)/.*/membership.sentinel\"),\n r\"\\1/eigengenes.dir/eigengenes.sentinel\")\ndef characteriseEigengenes(infile, outfile):\n '''\n Characterise the eigen genes\n '''\n\n modulesx = infile\n\n eigengene_path = os.path.join(os.path.dirname(modulesx),\n \"eigengenes.tsv\")\n\n membership_path = os.path.join(os.path.dirname(modulesx),\n \"membership.tsv\")\n\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n trait_data_stat = TRAIT_DATA_STAT\n meta_data_stat = META_DATA_STAT\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_characterise_eigengenes.R\n --eigengenes=%(eigengene_path)s\n --namecol=%(annotation_namecol)s\n --membership=%(membership_path)s\n --params=pipeline.yml\n %(trait_data_stat)s\n %(meta_data_stat)s\n --figwidth=%(plot_eigengene_heatmap_width)s\n --figheight=%(plot_eigengene_heatmap_height)s\n --outdir=%(out_dir)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n@active_if(PARAMS[\"input_genelists\"]!=None)\n@transform(collectModules,\n regex(r\"(.*)/.*/modules.sentinel\"),\n add_inputs(getGenesetAnnotations, cleanData),\n r\"\\1/eigengenes.dir/eigengenes.vs.genelists.sentinel\")\ndef eigengenesVsGenelists(infiles, outfile):\n '''\n Characterise the eigen genes\n '''\n\n modulex, annotationx, cleanx = infiles\n\n log_file = outfile.replace(\".sentinel\", \".log\")\n\n clean_data = cleanx.replace(\".sentinel\", \".RData\")\n module_data = modulex.replace(\".sentinel\", \".RData\")\n annotation_file = annotationx.replace(\"genesets.sentinel\", \"ensembl.to.entrez.tsv.gz\")\n\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_eigengenes_vs_genelists.R\n --input=%(clean_data)s\n --annotation=%(annotation_file)s\n --modules=%(module_data)s\n --genelists=%(input_genelists)s\n --idcol=%(annotation_idcol)s\n --namecol=%(annotation_namecol)s\n --outdir=%(out_dir)s\n &> %(log_file)s\n '''\n\n P.run(statement)\n\n # write the tex snippet.\n genelists = pd.read_csv(PARAMS[\"input_genelists\"], sep=\"\\t\")\n plot_groups = set(genelists[\"plot_group\"])\n\n tex_file = os.path.join(out_dir, \"genelists.tex\")\n with open(tex_file, \"w\") as tex:\n\n for plot_group in plot_groups:\n\n pg_esc = plot_group.replace('_','\\_')\n\n heatmap_path = os.path.join(out_dir,\n \"genelist.\" + plot_group)\n\n heatmap_fig = {\"width\": \"1\", \"height\": \"0.9\",\n \"path\": heatmap_path,\n \"caption\": \"Heatmap of manually curated \" +\\\n pg_esc + \" genes\"}\n\n tex.write(templates.subsection % {\"title\": pg_esc + \" genes\"})\n tex.write(textwrap.dedent(\n templates.figure % heatmap_fig))\n tex.write(\"\\n\")\n\n IOTools.touch_file(outfile)\n\n\n\ndef parseGMTs(param_keys=[\"gmt_pathway_files_\"]):\n '''Helper function for parsing the lists of GMT files'''\n\n all_files = []\n all_names = []\n\n for param_key in param_keys:\n\n\n gmts = [x for x in PARAMS.keys()\n if x.startswith(param_key)]\n\n if len(gmts) > 0:\n all_files += [PARAMS[x] for x in gmts]\n\n all_names += [x.replace(param_key, \"\")\n for x in gmts]\n\n if len(all_files) == 0:\n all_files = \"none\"\n all_names = \"none\"\n else:\n all_files = \",\".join(all_files)\n all_names = \",\".join(all_names)\n\n return all_names, all_files\n\n\n# ########################################################################### #\n# ######################### Geneset Analysis ################################ #\n# ########################################################################### #\n\n@active_if(PARAMS[\"run_genesets\"])\n@transform(characteriseModules,\n regex(r\"(.*)/membership.dir/.*.sentinel\"),\n add_inputs(getGenesetAnnotations),\n r\"\\1/genesets.dir/geneset.analysis.sentinel\")\ndef genesetAnalysis(infiles, outfile):\n '''\n Naive geneset over-enrichment analysis of module genes.\n\n Testing is performed with the gsfisher package.\n\n GO categories and KEGG pathways are tested by default.\n\n Arbitrary sets of genes cat be supplied as GMT files\n (e.g. such as those from MSigDB).\n '''\n\n membershipx, genesetAnno = infiles\n\n membership_file = membershipx.replace(\".sentinel\", \".tsv\")\n\n outdir = os.path.dirname(outfile)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n anno = os.path.join(os.path.dirname(genesetAnno),\n \"ensembl.to.entrez.tsv.gz\")\n\n kegg_pathways = os.path.join(os.path.dirname(genesetAnno),\n \"kegg_pathways.rds\")\n\n param_keys = [\"gmt_celltype_files_\",\n \"gmt_pathway_files_\"]\n gmt_names, gmt_files = parseGMTs(param_keys=param_keys)\n\n # get the set of modules\n xx = pd.read_csv(membership_file, sep=\"\\t\")\n modules = [x for x in set(xx[\"module\"].values)]\n\n job_memory = \"20G\"\n\n statements = []\n\n species = PARAMS[\"annotation_species\"]\n wgcna_dir = PARAMS[\"wgcna_dir\"]\n idcol = PARAMS[\"annotation_idcol\"]\n\n for module in modules:\n\n logfile = os.path.join(outdir,\n \"geneset.analysis.\" + module + \".log\")\n\n statements.append('''Rscript %(wgcna_dir)s/R/wgcna_modules_vs_genesets.R\n --input=%(membership_file)s\n --module=%(module)s\n --species=%(species)s\n --annotation=%(anno)s\n --idcol=%(idcol)s\n --kegg_pathways=%(kegg_pathways)s\n --gmt_names=%(gmt_names)s\n --gmt_files=%(gmt_files)s\n --outdir=%(outdir)s\n &> %(logfile)s\n ''' % locals())\n\n P.run(statements)\n\n IOTools.touch_file(outfile)\n\n@active_if(PARAMS[\"run_genesets\"])\n@transform(genesetAnalysis,\n regex(r\"(.*)/.*.sentinel\"),\n add_inputs(characteriseModules),\n r\"\\1/summarise.geneset.analysis.sentinel\")\ndef summariseGenesetAnalysis(infiles, outfile):\n '''\n Summarise the geneset over-enrichment analyses of cluster marker genes.\n\n Enriched pathways are summarised in an Excel table and a heatmap.\n '''\n\n outdir = os.path.dirname(outfile)\n\n infile, membershipx = infiles\n\n membership_file = membershipx.replace(\".sentinel\", \".tsv\")\n\n # need to sort out the dependencies properly!\n genesetdir = os.path.dirname(infile)\n\n param_keys = [\"gmt_celltype_files_\",\n \"gmt_pathway_files_\"]\n gmt_names, gmt_files = parseGMTs(param_keys=param_keys)\n\n # get the set of modules\n xx = pd.read_csv(membership_file, sep=\"\\t\")\n modules = [x for x in set(xx[\"module\"].values)]\n module_list = \",\".join(modules)\n\n job_memory = \"20G\"\n\n logfile = outfile.replace(\".sentinel\", \".log\")\n\n use_adjusted = str(PARAMS[\"genesets_use_adjusted_pvalues\"]).upper()\n show_common = str(PARAMS[\"genesets_show_common\"]).upper()\n\n show_detailed = str(PARAMS[\"genesets_show_detailed\"])\n\n statement = '''Rscript %(wgcna_dir)s/R/wgcna_summariseGenesets.R\n --genesetdir=%(genesetdir)s\n --gmt_names=%(gmt_names)s\n --show_detailed=%(show_detailed)s\n --modulelist=%(module_list)s\n --mingenes=%(genesets_min_fg_genes)s\n --pvaluethreshold=%(genesets_pvalue_threshold)s\n --padjustmethod=%(genesets_padjust_method)s\n --useadjusted=%(use_adjusted)s\n --minoddsratio=%(genesets_min_odds_ratio)s\n --showcommon=%(show_common)s\n --outprefix=%(outdir)s/cluster.genesets\n --prefix=genesets\n --plotdirvar=clusterGenesetsDir\n &> %(logfile)s\n '''\n P.run(statement)\n\n IOTools.touch_file(outfile)\n\n\n# ------------------- < within cluster geneset analysis > ------------------- #\n\n@transform(summariseGenesetAnalysis,\n regex(\"(.*)/genesets.dir/summarise.geneset.analysis.sentinel\"),\n add_inputs(softPower, characteriseModules,\n characteriseEigengenes, eigengenesVsGenelists),\n r\"\\1/latex.dir/report.vars.sty\")\n\ndef latexVars(infiles, outfile):\n '''\n Prepare a file containing the latex variable definitions.\n '''\n\n infile = infiles[0]\n\n rundir = Path(outfile).parents[1]\n\n if not os.path.exists(os.path.dirname(outfile)):\n os.mkdir(os.path.dirname(outfile))\n\n vars = {\"wgcnaDir\": PARAMS[\"wgcna_dir\"],\n \"reportTitle\": PARAMS[\"report_title\"],\n \"reportAuthor\": PARAMS[\"report_author\"],\n \"minFraction\": PARAMS[\"clean_min_fraction\"],\n \"minSamples\": PARAMS[\"clean_min_n_samples\"],\n \"minGenes\": PARAMS[\"clean_min_n_genes\"],\n \"cutHeight\": PARAMS[\"clean_cut_height\"],\n \"clusterMinSize\": PARAMS[\"clean_min_size\"],\n \"softPower\": PARAMS[\"module_soft_power\"],\n \"detection\": PARAMS[\"module_detection\"],\n \"networkType\": PARAMS[\"module_network_type\"].replace(\"_\",\"-\"),\n \"adjCorFunction\": PARAMS[\"module_adj_cor_fnc\"],\n \"adjDistFunction\": PARAMS[\"module_adj_dist_fnc\"],\n \"tomType\": PARAMS[\"module_tom_type\"],\n \"minSize\": PARAMS[\"module_min_size\"],\n \"dissThreshold\": PARAMS[\"module_diss_threshold\"],\n \"cleanDir\": os.path.join(rundir,\"clean.dir\"),\n \"powerDir\": os.path.join(rundir,\"soft.power.dir\"),\n \"membershipDir\": os.path.join(rundir, \"membership.dir\"),\n \"moduleDir\": os.path.join(rundir, \"modules.dir\"),\n \"eigengeneDir\": os.path.join(rundir, \"eigengenes.dir\"),\n \"genesetDir\": os.path.join(rundir, \"genesets.dir\"),\n \"clusterGenesetsDir\": os.path.join(rundir, \"genesets.dir\")}\n\n with open(outfile, \"w\") as ofh:\n for command, value in vars.items():\n\n ofh.write(\"\\\\newcommand{\\\\\" + command + \"}{\" + str(value) + \"}\\n\")\n\n\n\n@transform(latexVars,\n regex(\"(.*)/report.vars.sty\"),\n r\"\\1/summaryReport.pdf\")\ndef summaryReport(infile, outfile):\n '''\n Prepare a PDF summary report.\n '''\n\n outfile_name = os.path.basename(outfile)\n jobName = outfile_name[:-len(\".pdf\")]\n\n outdir = os.path.dirname(outfile)\n rundir = Path(outdir).parents[0]\n\n compilation_dir = os.path.join(outdir, \".latex_compilation.dir\")\n\n latexVars = os.path.join(outdir, \"report.vars.sty\")\n\n try:\n shutil.rmtree(compilation_dir)\n except FileNotFoundError:\n pass\n\n os.mkdir(compilation_dir)\n\n # get the latex variables\n statement = '''pdflatex -output-directory=%(compilation_dir)s\n -jobname=%(jobName)s\n %(draft_mode)s\n '\\\\input %(latexVars)s\n \\\\def\\\\reportTitle{pipeline\\\\_wgcna.py: summary report}\n '''\n # get the intro\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/introReport.tex\n '''\n\n # add the section to visualise the soft power\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/paramSection.tex\n '''\n\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/cleanSection.tex\n '''\n\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/moduleSection.tex\n '''\n\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/eigengeneSection.tex\n '''\n\n if not PARAMS[\"input_genelists\"] == None:\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/genelistSection.tex\n '''\n\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/membershipSection.tex\n '''\n\n if(PARAMS[\"run_genesets\"]):\n statement += '''\n \\\\input %(wgcna_dir)s/pipelines/pipeline_wgcna/genesetSection.tex\n '''\n\n statement += '''\\\\input %(wgcna_dir)s/latex/endmatter.tex'\n '''\n\n # Deliberately run twice - necessary for LaTeX compilation..\n draft_mode = \"-draftmode\"\n P.run(statement)\n\n draft_mode = \"\"\n P.run(statement)\n\n # Move the compiled pdfs to report.dir\n shutil.move(os.path.join(compilation_dir, outfile_name),\n outfile)\n\n\n@follows(mkdir(\"report.dir\"))\n@transform(summaryReport,\n regex(r\"wgcna.dir/latex.dir/summaryReport.pdf\"),\n r\"report.dir/report.sentinel\")\ndef report(infile, outfile):\n '''\n Link output files to a directory in the \"reports.dir\" folder.\n\n '''\n\n out_dir = \"report.dir\"\n\n try:\n os.stat(out_dir)\n except FileNotFoundError:\n os.mkdir(out_dir)\n\n run_dir = \"wgcna.dir\"\n\n targets = {os.path.join(run_dir,\"eigengenes.dir\",\"eigengen_heatmap.png\"): \"module.eigengene.heatmap.png\",\n os.path.join(run_dir,\"genesets.dir\",\"cluster.genesets.xlsx\"): \"module.genesets.xlsx\",\n os.path.join(run_dir,\"latex.dir\",\"summaryReport.pdf\"): \"summary.report.pdf\",\n os.path.join(run_dir, \"membership.dir\", \"eigengenes.tsv\"): \"module.eigengene.expression.matrix.tsv\",\n os.path.join(run_dir, \"membership.dir\", \"membership.tsv\"): \"module.gene.membership.tsv\"\n }\n\n for source_path, target_name in targets.items():\n\n if os.path.exists(source_path):\n\n target_path = os.path.join(out_dir, target_name)\n\n os.symlink(os.path.relpath(source_path, start=out_dir),\n target_path)\n\n IOTools.touch_file(outfile)\n\n\n\n\n# ########################################################################### #\n# ##################### full target: to run all tasks ####################### #\n# ########################################################################### #\n\n@follows(report)\ndef full():\n pass\n\n\n# ------------------- < ***** end of pipeline **** > ------------------------ #\n\nif __name__ == \"__main__\":\n sys.exit(P.main(sys.argv))\n","sub_path":"pipelines/pipeline_wgcna.py","file_name":"pipeline_wgcna.py","file_ext":"py","file_size_in_byte":29513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"323941463","text":"import collections.abc\nimport threading\nimport time\nfrom sdnlg.libs.signals.signals import Signal, called_on\nfrom sdnlg.libs.data.structures import Node, Port\nfrom sdnlg.libs.utils.singleton import Singleton\n\nfrom sdnlg.libs.core.configs import read_openflow_configs\nfrom shared.cal.cal import CoreCal\n#from shared.cal.cal import Message\nfrom shared.messagebroker import MessageBroker\n\nconfs = read_openflow_configs()\nPACKET_OUT_INTERVAL = confs['PACKET_OUT_INTERVAL']\n\n\n# Defined signals\nSIGNAL_PACKET_IN = Signal()\nSIGNAL_PORT_STATUS = Signal()\n\n\nclass CoreAttributeError(Exception):\n pass\n\n\nclass Core(object, metaclass=Singleton):\n \"\"\"\n The Core class. Controls the communication between the various modules of the SDN-LG. It is a Singleton.\n \"\"\"\n def __init__(self):\n\n self._switches = list()\n self._links = list()\n self._dispatcher = None\n self._cal = CoreCal()\n\n def listener_core(msg):\n if msg.header.payload == 3:\n if msg.body.action == 'switch_config':\n self.process_switch_config(msg.body)\n\n self._mb = MessageBroker(listener_core, False)\n\n @property\n def switches(self):\n return self._switches\n\n @switches.setter\n def switches(self, my_switches):\n valid = True\n if my_switches is not None:\n if isinstance(my_switches, collections.abc.Sequence):\n if not all(isinstance(s, Node) for s in my_switches):\n valid = False\n else:\n valid = False\n else:\n my_switches = list()\n if valid:\n self._switches = my_switches\n else:\n raise CoreAttributeError('Switches must be a list of Node instances or None')\n\n @property\n def links(self):\n return self._links\n\n @links.setter\n def links(self, my_links):\n valid = True\n if my_links is not None:\n if isinstance(my_links, collections.abc.Sequence):\n for p1, p2 in my_links:\n if not isinstance(p1, Port) and isinstance(p2, Port):\n valid = False\n else:\n valid = False\n else:\n my_links = list()\n if valid:\n self._links = my_links\n else:\n raise CoreAttributeError('Links must be a list of (Port, Port) or None')\n\n def has_link(self, port1, port2):\n for p1, p2 in self.links:\n if p1 == port1 and p2 == port2:\n return True\n if p1 == port2 and p2 == port1:\n return True\n return False\n\n def add_link(self, port1, port2):\n if not self.has_link(port2, port2):\n self.links.append((port1, port2))\n\n # def send_packet(self, node, port, data):\n # msg = Message()\n # msg.header.id = 255\n\n def prepare_ports(self, list_ports):\n ports = list()\n for p in list_ports:\n port = Port({'port_no': p.port_no, 'name': p.name, 'speed': p.speed, 'state':p.state})\n ports.append(port)\n\n return ports\n\n def switch_exists(self, dpid):\n for s in self.switches:\n if s.dpid == dpid:\n return s\n return False\n\n def add_switch(self, dpid, controller_id, data):\n # add a switch to the list\n node = Node({'dpid': dpid, 'controller_id': controller_id, 'n_tables': data.n_tbls,\n 'capabilities': data.caps})\n node.ports = self.prepare_ports(data.ports.dports.values())\n self.switches.append(node)\n\n def remove_switch(self, dpid):\n # remove a switch from the list\n s = self.switch_exists(dpid)\n if s:\n self.switches.remove(s)\n\n def update_switch(self, s, dpid, controller_id, data):\n s.dpid = dpid\n s.controller_id = controller_id\n s.n_tables = data.n_tbls\n s.capabilities = data.caps\n s.ports = self.prepare_ports(data.ports.dports.values())\n\n def add_port(self, dpid, port):\n # add a port to a existing switch\n s = self.switch_exists(dpid)\n if s:\n s.ports.append(port)\n\n def remove_port(self, dpid, port):\n s = self.switch_exists(dpid)\n if s:\n if isinstance(port, Port):\n s.ports.remove(port)\n else:\n p = s.port_no(port['id'])\n s.ports.remove(p)\n\n def switch_config(self, dpid, controller_id, data):\n s = self.switch_exists(dpid)\n if not s:\n self.add_switch(dpid, controller_id, data)\n else:\n self.update_switch(s, dpid, controller_id, data)\n\n def packet_out(self, node, port, data, lldp=False):\n \"\"\"\n Sends PacketOut\n Args:\n node: node that will send te PacketOut\n port: port to send PacketOut\n data: Ethernet frame to be sent\n lldp: if it is a LLDP packet\n\n \"\"\"\n rid = node.controller_id\n payload = {'dpid': node.dpid,\n 'action': 'send_probe',\n 'data': data}\n\n self._cal.send_msg(rid, 3, payload)\n\n def add_flow(self, node, match, actions):\n \"\"\"\n Adds a flow to the node\n Args:\n node:\n match:\n actions:\n\n Returns:\n\n \"\"\"\n rid = node.controller_id\n payload = {'dpid': node.dpid,\n 'action': 'add_entry',\n 'data': {'match': match},\n }\n self._cal.send_msg(rid, 3, payload)\n\n def stats_request(self, node, type, body={}):\n \"\"\"\n Get statistics from the node\n Args:\n node:\n type:\n body:\n\n Returns:\n\n \"\"\"\n pass\n\n def get_stats(self):\n pass\n\n def push_flow(self, node, action, flow):\n pass\n\n def packet_in(self, msg):\n SIGNAL_PACKET_IN.send(sender='core', msg=msg)\n\n def color_node(self, node, color):\n node.old_color = node.color\n node.color = color\n\n def process_switch_config(self, msg):\n if msg.data.reason == 'added':\n self.add_switch(msg.dpid, 1, msg.data)\n elif msg.data.reason == 'modified':\n s = self.switch_exists(msg.dpid)\n if s:\n self.update_switch(s, msg.dpid, 1, msg.data)\n elif msg.data.reason == 'deleted':\n self.remove_switch(msg.dpid)\n print(self.switches)\n\n\ndef cube():\n import random\n nodes = [Node({'dpid':'0000000000000001', 'controller_id': 1, 'capabilities':'', 'n_tables':5})]\n nodes.append(Node({'dpid':'0000000000000002', 'controller_id': 1, 'capabilities':'', 'n_tables':5}))\n nodes.append(Node({'dpid': '0000000000000003', 'controller_id': 2, 'capabilities': '', 'n_tables': 5}))\n nodes.append(Node({'dpid': '0000000000000004', 'controller_id': 2, 'capabilities': '', 'n_tables': 5}))\n nodes.append(Node({'dpid': '0000000000000005', 'controller_id': 3, 'capabilities': '', 'n_tables': 5}))\n nodes.append(Node({'dpid': '0000000000000006', 'controller_id': 1, 'capabilities': '', 'n_tables': 5}))\n nodes.append(Node({'dpid': '0000000000000007', 'controller_id': 1, 'capabilities': '', 'n_tables': 5}))\n\n ports = []\n for i in range(1, 9):\n ports.append(Port({'port_no':i, 'name': '10Gigabit{}'.format(i), 'speed':10000000000,\n 'uptime':random.randint(0,1234567)}))\n for i in range(9, 17):\n ports.append(Port({'port_no':i, 'name': 'Gigabit{}'.format(i), 'speed':1000000000,\n 'uptime':random.randint(0,1234567)}))\n nodes[0].ports = ports\n\n ports = []\n for i in range(1, 17):\n ports.append(Port({'port_no':i, 'name': '10Gigabit{}'.format(i), 'speed':10000000000,\n 'uptime':random.randint(0,1234567)}))\n nodes[1].ports = ports\n\n ports = []\n for i in range(1, 9):\n ports.append(Port({'port_no':i, 'name': '10Gigabit{}'.format(i), 'speed':10000000000,\n 'uptime':random.randint(0,1234567)}))\n for i in range(9, 24):\n ports.append(Port({'port_no': i, 'name': 'Gigabit{}'.format(i), 'speed': 1000000000,\n 'uptime': random.randint(0, 1234567)}))\n nodes[2].ports = ports\n\n ports = []\n for i in range(1, 9):\n ports.append(Port({'port_no': i, 'name': 'Gigabit{}'.format(i), 'speed': 1000000000,\n 'uptime': random.randint(0, 1234567)}))\n nodes[3].ports = ports\n\n ports = []\n for i in range(1, 9):\n ports.append(Port({'port_no': i, 'name': 'Gigabit{}'.format(i), 'speed': 1000000000,\n 'uptime': random.randint(0, 1234567)}))\n for i in range(9, 11):\n ports.append(Port({'port_no': i, 'name': '10Gigabit{}'.format(i), 'speed': 10000000000,\n 'uptime': random.randint(0, 1234567)}))\n nodes[4].ports = ports\n\n ports = []\n for i in range(1, 17):\n ports.append(Port({'port_no': i, 'name': 'Gigabit{}'.format(i), 'speed': 1000000000,\n 'uptime': random.randint(0, 1234567)}))\n nodes[5].ports = ports\n\n ports = []\n for i in range(1, 9):\n ports.append(Port({'port_no': i, 'name': '10Gigabit{}'.format(i), 'speed': 10000000000,\n 'uptime': random.randint(0, 1234567)}))\n nodes[6].ports = ports\n\n links = [(nodes[0].ports[2], nodes[1].ports[5]),\n (nodes[0].ports[12], nodes[5].ports[2]),\n (nodes[0].ports[7], nodes[6].ports[5]),\n (nodes[1].ports[2], nodes[2].ports[3]),\n (nodes[2].ports[21], nodes[3].ports[5]),\n (nodes[2].ports[2], nodes[6].ports[3]),\n (nodes[3].ports[7], nodes[4].ports[5]),\n (nodes[4].ports[2], nodes[5].ports[5]),\n (nodes[4].ports[9], nodes[6].ports[1])\n ]\n\n return nodes, links\n\n\ndef cube1():\n nodes, links = cube()\n links[8] = (nodes[4].ports[4], nodes[2].ports[13])\n return nodes, links\n\n\nclass TopologyDiscovery(object):\n\n # Listen to switch_config\n # Send LLDP\n # Listen to PacketReceived(PacketIn)\n # Store to create topology\n def __init__(self, core):\n def packet_out():\n while True:\n self.send_packet_out()\n time.sleep(PACKET_OUT_INTERVAL)\n\n @called_on(SIGNAL_PACKET_IN, weak=False)\n def process_packet_in(pkt):\n port1 = pkt['p1']\n port2 = pkt['p2']\n self.core.add_link(port1, port2)\n\n def run_topology():\n i = 0\n while True:\n switches, links = cube() if i % 2 == 0 else cube1()\n self.core.switches = switches\n self.core.links = links\n time.sleep(20)\n i += 1\n\n self.core = core\n self.sendPacketOut = threading.Thread(target=packet_out)\n self.generate_topology = threading.Thread(target=run_topology)\n #self.generate_topology.start()\n\n def send_packet_out(self):\n print(self.core.links)\n\n def generate_topology(self):\n print(self.core.links)\n\n\n","sub_path":"sdnlg/core/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":11201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"313305981","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nfrom distutils.core import setup,Extension\n\nmymodule = Extension('_so2048',sources = ['so2048.i','so2048.c'])\n\nsetup(\n name = 'so2048',\n version = '0.0',\n author = 'Coin',\n description = 'C methods called by python',\n ext_modules = [mymodule],\n py_modules = ['so2048'],\n )\n","sub_path":"python/QT/2048/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"70316523","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport math\nimport MeCab\nfrom myreviewer.lib import cleaner, keyword_extractor\n\nTOP_FACTOR = 2\n\n# Mecabの辞書は好きな物を使ってください.\nMECAB_OPT = '-d /usr/lib/mecab/dic/mecab-ipadic-neologd'\nmecab = MeCab.Tagger(MECAB_OPT)\n\ndef compute_speciality_from_sentence(sen, dfreq, avg, stddev):\n factors = list()\n node = mecab.parseToNode(sen).next\n while node is not None:\n clazz = node.feature.split(',')\n if clazz[0] != '名詞':\n node = node.next\n continue\n surf = node.surface.lower()\n if surf not in dfreq[0]:\n node = node.next\n continue\n factors.append((math.log(dfreq[1] / dfreq[0][surf]) - avg) / stddev)\n node = node.next\n factors.sort()\n factors.reverse()\n return sum(factors[:TOP_FACTOR]) / TOP_FACTOR\n\ndef evaluate_speciality_from_dataset(dataset, dfreq):\n text = [d['sentence'].encode('utf-8') for d in dataset]\n tfreq = keyword_extractor.compute_tfreq(text)\n avg, stddev = keyword_extractor.compute_stddev(tfreq, dfreq)\n return [compute_speciality_from_sentence(sen, dfreq, avg, stddev) for sen in text]\n","sub_path":"myreviewer/myreviewer/lib/speciality_evaluator.py","file_name":"speciality_evaluator.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"289949920","text":"#program to calculate get area and perimeter of circle\r\n\r\nclass Circle:\r\n def CalculateArea(self):\r\n print(\"Enter radius:\")\r\n self.s=float(input()) #to get input and place in current object\r\n area=3.14*self.s*self.s\r\n print(\"Area of circle is = %f\"%(area))\r\n \r\n def CalculatePerimeter(self):\r\n perimeter=2*3.14*self.s\r\n print(\"Perimeter of circle is = %f\"%(perimeter))\r\n\r\n#program to calculate get area and perimeter of ellipse\r\n\r\nclass Ellipse:\r\n def CalculateArea(self):\r\n print(\"Enter major axis:\")\r\n self.s=float(input())\r\n print(\"Enter minor axis:\")\r\n self.c=float(input())\r\n area=self.s*self.c #it calculate area of ellipse\r\n print(\"Area of ellipse is = %f\"%(area))\r\n\r\n def CalculatePerimeter(self):\r\n perimeter=2*3.14**(self.s+self.c/2)\r\n print(\"Perimeter of ellipse is =%f\"%(perimeter))\r\n \r\n\r\n#here we create object and call the function\r\n\r\nc=Circle()\r\nc.CalculateArea()\r\nc.CalculatePerimeter()\r\n\r\nprint(\"\\n\")\r\n\r\nc=Ellipse()\r\nc.CalculateArea()\r\nc.CalculatePerimeter()\r\n","sub_path":"Shapes.py","file_name":"Shapes.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534062945","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nDEPS = [\n 'minfs',\n 'recipe_engine/path',\n 'recipe_engine/step',\n]\n\n\ndef RunSteps(api):\n minfs_path = api.path['start_dir'].join('out', 'build-zircon', 'tools',\n 'minfs')\n\n # Ensure no default path exists & that it can be set.\n assert not api.minfs.minfs_path\n api.minfs.minfs_path = minfs_path\n assert api.minfs.minfs_path == minfs_path\n\n # Create a 200mb minfs image with a specific name\n api.minfs.create(\n path=api.path.join(api.path['start_dir'], 'image.minfs'), size=\"200M\")\n\n # Copy a file from that image\n api.minfs.cp('file-on-image.json', 'file-on-host.json', 'image.minfs')\n\n # Copy everything from the image\n api.minfs.copy_image('copy_image_step', 'image.minfs', 'output_path')\n\n\ndef GenTests(api):\n yield api.test('basic')\n","sub_path":"recipe_modules/minfs/examples/full.py","file_name":"full.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"21433048","text":"# ch8_15.py\r\nimport requests, bs4\r\n\r\nurl_ppt = 'https://www.ptt.cc'\r\nbeauty = '/bbs/beauty/index.html'\r\n\r\nptthtml = requests.get(url_ppt+beauty, cookies={'over18':'1'})\r\nobjSoup = bs4.BeautifulSoup(ptthtml.text, 'lxml')\r\n\r\npttdivs = objSoup.find_all('div', 'r-ent')\r\nhref = pttdivs[0].find('a')['href'] # 文章超連結\r\n\r\nprint('目前連線網址 : ', url_ppt+href)\r\nbeauty_html = requests.get(url_ppt+href, cookies={'over18':'1'}) # 進入超連結\r\nbeauty_soup = bs4.BeautifulSoup(beauty_html.text, 'lxml') \r\n\r\nbeauty_divs = beauty_soup.find('div', id='main-content')\r\nitems = beauty_divs.find_all('div', 'article-metaline')\r\n\r\nfor item in items: # 列印標題\r\n field = item.find('span', 'article-meta-tag')\r\n print(field.text,end=' : ')\r\n field_data = item.find('span', 'article-meta-value')\r\n print(field_data.text)\r\n\r\nmylist = list(beauty_divs) # 轉成串列\r\nprint('內文 : ', mylist[4].strip()) # 列印本文\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"exercise/crawler_python_dm1920/ch8/ch8_15.py","file_name":"ch8_15.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"556370648","text":"from appium import webdriver\nimport os\nfrom subprocess import Popen, PIPE\n\ndef init_driver():\n desired_caps = {}\n # devices = os.system('adb devices')\n\n resp = Popen(\"adb devices\", shell=True, stdout=PIPE, stderr=PIPE).stdout.readlines()\n ip = (resp[1][0:-9]).decode('utf-8')\n\n Version = os.system('adb -s %s shell getprop ro.build.version.release'%ip)\n\n\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = Version\n desired_caps['deviceName'] = ip\n desired_caps['appPackage'] = 'com.android.settings'\n # desired_caps['appPackage'] = 'com.android.contacts'\n desired_caps['appActivity'] = '.Settings'\n # desired_caps['appActivity'] = '.activities.PeopleActivity'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n return driver\n\n\n\n","sub_path":"init/init_driver.py","file_name":"init_driver.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"626276076","text":"#Module for Nao Mark Visual Functions\n\n# -*- encoding: UTF-8 -*-\nfrom naoqi import ALProxy\nimport math\nimport almath\n\n#Returns a multiple dimension array of naomark data\ndef getMarkData (IP, portNumber):\n #Set up the memory proxy.\n memoryProxy = ALProxy(\"ALMemory\", IP, portNumber)\n\n #Set up the landmark proxy.\n landmarkProxy = ALProxy(\"ALLandMarkDetection\", IP, portNumber)\n\n #Subscribe to landmarkDetected\n landmarkProxy.subscribe(\"GetLandMarkData\")\n\n #Wait for a mark to be detected\n markData = memoryProxy.getData(\"LandmarkDetected\")\n markData = memoryProxy.getData(\"LandmarkDetected\")\n\n #Unsubscribe to proxy\n landmarkProxy.unsubscribe(\"GetLandMarkData\")\n\n return markData\n\n#Finds and returns a NaoMark's number\ndef getMarkNumber (markData):\n\n markNumber = markData[1][0][1][0]\n return markNumber\n\n#Finds and returns the vertical and horiztontal\n#offset of a nao mark relative to nao's camera\ndef getMarkAngles (markData):\n\n #Get the landmark positions(Relative to Camera)\n wzCamera = markData[1][0][0][1]\n wyCamera = markData[1][0][0][2]\n\n return wzCamera, wyCamera\n\n#Finds and returns the x,y,z position of a nao mark\n#relative to nao's camera\ndef getMarkXYZ (IP, portNumber, markData, landmarkSize):\n\n currentCamera = \"CameraTop\"\n\n # Retrieve landmark angular size in radians.\n angularSize = markData[1][0][0][3]\n\n # Compute distance to landmark.\n distanceFromCameraToLandmark = landmarkSize / ( 2 * math.tan( angularSize / 2))\n\n motionProxy = ALProxy(\"ALMotion\", IP, portNumber)\n\n # Retrieve landmark center position in radians.\n wzCamera = markData[1][0][0][1]\n wyCamera = markData[1][0][0][2]\n\n # Get current camera position in NAO space.\n transform = motionProxy.getTransform(currentCamera, 2, True)\n transformList = almath.vectorFloat(transform)\n robotToCamera = almath.Transform(transformList)\n\n # Compute the rotation to point towards the landmark.\n cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)\n\n # Compute the translation to reach the landmark.\n cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)\n\n # Combine all transformations to get the landmark position in NAO space.\n robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform\n\n return robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4","sub_path":"MovementTest/NaoMarkModule.py","file_name":"NaoMarkModule.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"160563602","text":"'''\r\n1、三个厨师,每3秒做一个,满了的话停三秒做\r\n2、有5个客人,每人100元,一个面包2元,没有面包等2两秒,钱花完为止\r\n3、房间可以放500个面包\r\n\r\n'''\r\n\r\nimport threading\r\nimport time\r\n\r\nbread = 0\r\n\r\n\r\nclass chef(threading.Thread):\r\n name = \"\"\r\n c = 0\r\n def run(self) ->None:\r\n global bread\r\n while True:\r\n if bread<500:\r\n bread = bread + 1\r\n self.c = self.c + 1\r\n print(self.name,\"做了一个面包,现在有\",bread,\"个面包\")\r\n time.sleep(0.01)\r\n else:\r\n time.sleep(3)\r\n print(self.name,\"睡了3秒\")\r\n print(self.name,\"做了\",self.c,\"个面包\")\r\n\r\nclass client(threading.Thread):\r\n name = \"\"\r\n money = 1000\r\n a = 0\r\n\r\n def run(self) -> None:\r\n global bread\r\n while True:\r\n if bread > 0 and self.money>0:\r\n self.money = self.money - 2\r\n self.a = self.a + 1\r\n bread = bread - 1\r\n print(self.name,\"卖了一个面包,现在还有\",bread,\"个面包,还有\",self.money,\"钱\")\r\n time.sleep(0.05)\r\n elif self.money <=0:\r\n print(self.name,\"没有钱了\")\r\n break\r\n else:\r\n time.sleep(2)\r\n print(self.name,\"卖了\",self.a,\"个面包\")\r\n print(self.name,\"等待两秒\")\r\n\r\n\r\n\r\nc1 = chef()\r\nc2 = chef()\r\nc3 = chef()\r\nc1.name = \"厨师1\"\r\nc2.name = \"厨师2\"\r\nc3.name = \"厨师3\"\r\n\r\nce1 = client()\r\nce2 = client()\r\nce3 = client()\r\nce4 = client()\r\nce5 = client()\r\nce1.name = \"顾客1\"\r\nce2.name = \"顾客2\"\r\nce3.name = \"顾客3\"\r\nce4.name = \"顾客4\"\r\nce5.name = \"顾客5\"\r\n\r\nc1.start()\r\nc2.start()\r\nc3.start()\r\n\r\nce1.start()\r\nce2.start()\r\nce3.start()\r\nce4.start()\r\nce5.start()\r\n","sub_path":"买面包.py","file_name":"买面包.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"409297798","text":"import math\r\n \r\nclass XmathError(Exception): pass\r\n \r\ndef fact(n):\r\n \"\"\"fact(n) --> Compute the factorial of n\"\"\"\r\n try:\r\n if isinstance(n, bool): raise #numeric type required\r\n if int(n) != n: raise #value must be an integer\r\n if n < 0: raise #value cannot be nagative\r\n # if n==0 or n==1: return 1\r\n # else: return n*fact(n-1)\r\n result = 1\r\n for i in range(2, n+1):\r\n result = i*result\r\n return result\r\n except:\r\n raise XmathError\r\n \r\ndef ave(x):\r\n \"\"\"ave(x) --> Calculate the mean of x\"\"\"\r\n try:\r\n if isinstance(x, bool): raise #numeric type required\r\n for type in [int, float]:\r\n if isinstance(x, type): return x\r\n xsum = 0.0\r\n for i in range(len(x)):\r\n xsum = xsum + x[i] \r\n return xsum/len(x)\r\n except:\r\n raise XmathError\r\n \r\nif __name__ == '__main__':\r\n print('fact(10) = ', fact(10))\r\n x = [10,19,30,33]\r\n print('ave([10,19,30,33]) = ', ave(x))\r\n\r\n","sub_path":"py/py_core_code/unittest/xmath.py","file_name":"xmath.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"160875937","text":"import gym\nfrom gym import spaces\nimport numpy as np\nfrom gym_minigrid.minigrid import OBJECT_TO_IDX, COLOR_TO_IDX\nfrom enum import IntEnum\n\n\ndef environment_name(environment_class):\n if isinstance(environment_class, tuple):\n return environment_class[1].name + '-' + environment_class[0].__name__\n elif isinstance(environment_class, list):\n name = environment_class[0].__name__\n for i in range(1, len(environment_class)):\n name = environment_class[i].name + '-' + name\n return name\n else:\n return environment_class.__name__\n\n\ndef instantiate_environment(environment_class):\n if isinstance(environment_class, tuple):\n return environment_class[1](environment_class[0]())\n elif isinstance(environment_class, list):\n obj = environment_class[0]()\n for i in range(1, len(environment_class)):\n obj = environment_class[i](obj)\n return obj\n else:\n return environment_class()\n\n\nclass MyFullyObservableWrapper(gym.core.ObservationWrapper):\n name = \"Fully\"\n\n def __init__(self, env):\n super().__init__(env)\n self.__dict__.update(vars(env)) # hack to pass values to super wrapper\n self.observation_space = spaces.Dict({\n 'image': spaces.Box(\n low=0,\n high=255,\n shape=(self.env.grid_size, self.env.grid_size, 3), # number of cells\n dtype='uint8'\n ),\n 'carrying': spaces.Box(\n low=0,\n high=255,\n shape=[2],\n dtype='uint8'\n )\n })\n\n def observation(self, obs):\n full_grid = self.env.grid.encode()\n full_grid[self.env.agent_pos[0]][self.env.agent_pos[1]] = np.array([255, self.env.agent_dir, 0])\n obs[\"image\"] = full_grid\n\n if self.env.carrying:\n obs[\"carrying\"] = np.array([OBJECT_TO_IDX[self.env.carrying.type], COLOR_TO_IDX[self.env.carrying.color]])\n else:\n obs[\"carrying\"] = np.array([0, 6]) # empty type (0) and non-existing color (6)\n return obs\n\n\nclass MyFullyObservableWrapperBroadcast(gym.core.ObservationWrapper):\n name = \"FullyBroad\"\n\n def __init__(self, env):\n super().__init__(env)\n self.__dict__.update(vars(env)) # hack to pass values to super wrapper\n self.observation_space = spaces.Dict({\n 'image': spaces.Box(\n low=0,\n high=255,\n shape=(self.env.grid_size, self.env.grid_size, 4), # number of cells\n dtype='uint8'\n ),\n })\n\n def observation(self, obs):\n full_grid = self.env.grid.encode()\n full_grid[self.env.agent_pos[0]][self.env.agent_pos[1]] = np.array([255, self.env.agent_dir, 0])\n carrying = OBJECT_TO_IDX[self.env.carrying.type] if self.env.carrying else 0\n extra_layer = np.full((full_grid.shape[0], full_grid.shape[1], 1), carrying)\n\n obs[\"image\"] = np.concatenate((full_grid, extra_layer), axis=2)\n\n print(np.transpose(obs[\"image\"]))\n\n return obs\n\n\nclass MyFullyObservableWrapperEgo(gym.core.ObservationWrapper):\n name = \"Ego\"\n\n def __init__(self, env):\n super().__init__(env)\n self.__dict__.update(vars(env)) # hack to pass values to super wrapper\n self.observation_space = spaces.Dict({\n 'image': spaces.Box(\n low=0,\n high=255,\n shape=(self.env.grid_size, self.env.grid_size, 3), # number of cells\n dtype='uint8'\n ),\n })\n\n def observation(self, obs):\n full_grid = self.env.grid.encode()\n carrying = OBJECT_TO_IDX[self.env.carrying.type] if self.env.carrying else 0\n full_grid[self.env.agent_pos[0]][self.env.agent_pos[1]] = np.array([9, self.env.agent_dir, carrying])\n\n obs[\"image\"] = full_grid\n\n # print(np.transpose(obs[\"image\"]))\n\n return obs\n\n\nclass ReducedActionWrapper(gym.core.Wrapper):\n name = \"ActRed\"\n\n class Actions(IntEnum):\n # Turn left, turn right, move forward\n left = 0\n right = 1\n forward = 2\n\n # Pick up an object\n interact = 3\n\n def reset(self):\n return self.env.reset()\n\n def __init__(self, env):\n super().__init__(env)\n # self.__dict__.update(vars(env))\n self.actions = ReducedActionWrapper.Actions\n self.action_space = spaces.Discrete(len(self.actions))\n\n def step(self, action):\n if action in [self.actions.left, self.actions.right, self.actions.forward]:\n # print(\"from {} to {}\".format(action, action))\n return self.env.step(action)\n else:\n # Get the position in front of the agent\n fwd_pos = self.unwrapped.front_pos\n\n # Get the contents of the cell in front of the agent\n fwd_cell = self.unwrapped.grid.get(*fwd_pos)\n\n if fwd_cell and fwd_cell.can_pickup():\n # print(\"from {} to {}\".format(action, self.unwrapped.actions.pickup))\n return self.env.step(self.unwrapped.actions.pickup)\n else:\n # print(\"from {} to {}\".format(action, self.unwrapped.actions.toggle))\n return self.env.step(self.unwrapped.actions.toggle)\n\n\nclass UndiscountedRewards(gym.core.RewardWrapper):\n name = \"Undis\"\n\n def __init__(self, env):\n super().__init__(env)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n if done:\n reward = 1\n else:\n reward = 0\n # reward = -(1 / self.unwrapped.max_steps)\n\n return observation, reward, done, info\n\n\nclass HastyRewards(gym.core.RewardWrapper):\n name = \"Haste\"\n\n def __init__(self, env):\n super().__init__(env)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n if done:\n reward = 1\n else:\n reward = -(1 / self.unwrapped.max_steps)\n\n return observation, reward, done, info\n","sub_path":"thesis/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"494813913","text":"import wikipedia\nimport sys\n\ntitle_file_input = sys.argv[1]\noutdir = sys.argv[2]\n\ndef download_to(title, dst):\n try:\n #page = wikipedia.page(title)\n suggestion = wikipedia.search(title, results=1, suggestion=True)[0]\n page = wikipedia.page(suggestion)\n except:\n print(\"Not found: %s\" % (title))\n return\n content = page.content\n\n with open(dst, \"w\") as f:\n f.write(content)\n\n\nwikipedia.set_lang(\"en\")\nwith open(title_file_input, \"r\") as titles_file:\n for title in titles_file.readlines():\n title = title.strip()\n download_to(title, os.path.join(outdir, title.replace(\" \",\"_\")))\n\n\n\n","sub_path":"download_pages.py","file_name":"download_pages.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"305533690","text":"from sc2.constants import (\r\n CANCEL,\r\n CANCEL_MORPHLAIR,\r\n CANCEL_MORPHHIVE,\r\n CANCEL_MORPHOVERSEER,\r\n HATCHERY,\r\n HIVE,\r\n INFESTATIONPIT,\r\n LAIR,\r\n MORPH_OVERSEER,\r\n OVERLORD,\r\n OVERLORDCOCOON,\r\n OVERSEER,\r\n UPGRADETOHIVE_HIVE,\r\n UPGRADETOLAIR_LAIR,\r\n ZERGLINGATTACKSPEED,\r\n ZERGGROUNDARMORSLEVEL3,\r\n ZERGMELEEWEAPONSLEVEL3,\r\n)\r\n\r\n\r\nclass extra_things:\r\n def __init__(self):\r\n self.location_index = 0\r\n\r\n def cancel_attacked_hatcheries(self):\r\n \"\"\"find the hatcheries that are building, and have low health and cancel then,\r\n can be better, its easy to burst 150 hp, but if I put more it might cancel itself,\r\n will look into that later\"\"\"\r\n if self.close_enemy_production and self.time < 300:\r\n for building in self.units(HATCHERY).filter(lambda x: 0.2 < x.build_progress < 1 and x.health < 400):\r\n self.actions.append(building(CANCEL))\r\n\r\n async def detection(self):\r\n \"\"\"Morph overseers\"\"\"\r\n lords = self.units(OVERLORD)\r\n if (\r\n (self.units(LAIR) or self.units(HIVE))\r\n and self.can_afford(OVERSEER)\r\n and lords\r\n and not self.units(OVERSEER)\r\n and not any([await self.is_morphing(h) for h in self.units(OVERLORDCOCOON)])\r\n ):\r\n self.actions.append(lords.random(MORPH_OVERSEER))\r\n\r\n async def is_morphing(self, homecity):\r\n \"\"\"Check if a base or overlord is morphing, good enough for now\"\"\"\r\n abilities = await self.get_available_abilities(homecity)\r\n morphing_upgrades = (CANCEL_MORPHLAIR, CANCEL_MORPHHIVE, CANCEL_MORPHOVERSEER)\r\n for morph in morphing_upgrades:\r\n if morph in abilities:\r\n return True\r\n return False\r\n\r\n async def morphing_townhalls(self):\r\n \"\"\"Works well, maybe the timing can be improved\"\"\"\r\n if not (\r\n all(\r\n self.caverns.ready and i == 1\r\n for i in (\r\n self.already_pending_upgrade(ZERGGROUNDARMORSLEVEL3),\r\n self.already_pending_upgrade(ZERGMELEEWEAPONSLEVEL3),\r\n self.already_pending_upgrade(ZERGLINGATTACKSPEED),\r\n )\r\n )\r\n ):\r\n lair = self.units(LAIR)\r\n hive = self.units(HIVE)\r\n base = self.units(HATCHERY)\r\n # Hive\r\n if (\r\n self.units(INFESTATIONPIT).ready\r\n and not hive\r\n and self.can_afford(HIVE)\r\n and not any([await self.is_morphing(h) for h in lair])\r\n and lair.ready.idle\r\n ):\r\n self.actions.append(lair.ready.idle.first(UPGRADETOHIVE_HIVE))\r\n # Lair\r\n if (\r\n len(self.townhalls) >= 3\r\n and self.can_afford(UPGRADETOLAIR_LAIR)\r\n and not (lair or hive)\r\n and not any([await self.is_morphing(h) for h in base])\r\n and base.ready.idle\r\n ):\r\n self.actions.append(base.ready.idle.furthest_to(self._game_info.map_center)(UPGRADETOLAIR_LAIR))\r\n","sub_path":"general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"493134325","text":"from person import *\n# Inherit the Person class\n\n\nclass Bomberman(Person):\n\n def __init__(self):\n Person.__init__(self, 2, 4, 'B')\n # Create the bomberman and update the board\n\n def create(self, arr):\n x = self.x\n y = self.y\n s = self.smbl\n for i in range(2):\n for j in range(4):\n arr[x + i][y + j] = s\n","sub_path":"bomberman.py","file_name":"bomberman.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"517428811","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fluent_pages', '0001_initial'),\n ('tests', '0009_auto_20170519_1232'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='unpublishablelayoutpage',\n name='layout',\n ),\n migrations.RemoveField(\n model_name='unpublishablelayoutpage',\n name='urlnode_ptr',\n ),\n migrations.AlterModelOptions(\n name='publishingm2mmodela',\n options={'permissions': (('can_publish', 'Can publish'), ('can_republish', 'Can republish'))},\n ),\n migrations.AlterModelOptions(\n name='publishingm2mmodelb',\n options={'permissions': (('can_publish', 'Can publish'), ('can_republish', 'Can republish'))},\n ),\n migrations.DeleteModel(\n name='UnpublishableLayoutPage',\n ),\n ]\n","sub_path":"icekit/tests/migrations/0010_auto_20170522_1600.py","file_name":"0010_auto_20170522_1600.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"214430954","text":"from logging import getLogger\n\nimport py\nimport pytest\nfrom _pytest.pytester import Testdir\n\nlog = getLogger(__name__)\nMPI_ARGS = (\"mpirun\", \"-n\")\n\n\n@pytest.fixture\ndef has_mpi4py():\n try:\n import mpi4py\n return True\n except ImportError:\n return False\n\n\nclass MPITestdir(Testdir):\n def __init__(self, request, tmpdir_factory):\n super().__init__(request, tmpdir_factory)\n method = self.request.config.getoption(\"--runpytest\")\n if method == \"inprocess\":\n log.warn(\"To run the MPI tests, you need to use subprocesses\")\n\n def runpytest_subprocess(\n self, *args, timeout=60, mpi_procs=2, max_retries=5\n ):\n \"\"\"\n Based on testdir.runpytest_subprocess\n \"\"\"\n retries = 0\n p = py.path.local.make_numbered_dir(\n prefix=\"runpytest-\", keep=None, rootdir=self.tmpdir\n )\n args = (\"--basetemp=%s\" % p,) + args\n plugins = [x for x in self.plugins if isinstance(x, str)]\n if plugins:\n args = (\"-p\", plugins[0]) + args\n args = MPI_ARGS + (str(mpi_procs),) + self._getpytestargs() + args\n while retries < max_retries:\n try:\n return self.run(*args, timeout=timeout)\n except self.TimeoutExpired as e:\n retries += 1\n if retries >= max_retries:\n raise\n raise e\n\n def runpytest(self, *args, **kwargs):\n return self.runpytest_subprocess(*args, **kwargs)\n\n\n@pytest.fixture\ndef mpi_testdir(request, tmpdir_factory):\n return MPITestdir(request, tmpdir_factory)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"285333667","text":"def earliest_ancestor(ancestors, starting_node):\n ### creating the graph ###\n # create an empty dictionary as our graph\n graph = {}\n # loop through ancestors\n for parent_child in ancestors:\n # for each person in a parent-child relationship\n for person in parent_child:\n # if the vertex doesn't exist for that person\n if person not in graph:\n # create a vertex in our graph and set it equal to an empty set\n graph[person] = set()\n # then add the second person as an adjacent vertex to the first person\n parent = parent_child[0]\n child = parent_child[1]\n graph[child].add(parent)\n\n # searching through our graph\n # create an empty stack and an empty visited set\n stack = []\n\n # initialize the stack with the person we're going to search the earliest ancestor for\n stack.append([starting_node])\n\n # edge case: starting node has no parents\n earliest_ancestor = -1\n # keep track of the length\n max_path_length = 1\n\n # repeat this code as long as there are nodes in the stack\n while len(stack) > 0:\n path = stack.pop()\n last_node = path[-1]\n\n if len(path) > max_path_length:\n max_path_length = len(path)\n earliest_ancestor = last_node\n elif len(path) >= max_path_length and last_node < earliest_ancestor:\n max_path_length = len(path)\n earliest_ancestor = last_node\n\n for neighbor in graph[last_node]:\n new_path = path + [neighbor]\n stack.append(new_path)\n\n return earliest_ancestor\n\n\n# my own testing\ntest_ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7),\n (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\nprint(earliest_ancestor(test_ancestors, 6))\n\n\n# # # extra code\n# if len(stack) == 2:\n# if stack[0][-1] < stack[1][-1]:\n# earliest_ancestor = stack[0][-1]\n# return earliest_ancestor\n# else:\n# earliest_ancestor = stack[1][-1]\n# return earliest_ancestor\n\n# if len(graph[earliest_ancestor]) == 0:\n# return -1\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181655684","text":"processo_cnj = input(\"Digite o número do processo: \")\r\nnumero_sequecial = processo_cnj[0:7] # NNNNNNN\r\ndigito_verificador = processo_cnj[8:10] # DD\r\nano_do_ajuizamento = processo_cnj[11:15] # AAAA\r\npoder_judiciario = processo_cnj[16] # J\r\ntribunal_do_segmento = processo_cnj[18:20] # TR\r\nunidade_de_origem = processo_cnj[21:25] # OOOO\r\n\r\nPoder_Judiciario = {'1': 'Supremo Tribunal Federal',\r\n '2': 'Conselho Nacional de Justiça',\r\n '3': 'Superior Tribunal de Justiça',\r\n '4': 'Justiça Federal',\r\n '5': 'Justiça do Trablho',\r\n '6': 'Justiça Eleitoral',\r\n '7': 'Justiça Militar da União',\r\n '8': 'Justiça dos Estados e do Distrito Federal e Territórios',\r\n '9': 'Justiça Militar Estadual',\r\n }\r\n\r\norgao = Poder_Judiciario.get(poder_judiciario)\r\nprint('Numero do Processo:', numero_sequecial)\r\nprint('Digito Verificador:', digito_verificador)\r\nprint('Ano do processo:', ano_do_ajuizamento)\r\nprint('Segmento do Poder Judiciário:', orgao)","sub_path":"Validação Orgão do Poder Judiciario.py","file_name":"Validação Orgão do Poder Judiciario.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"614723646","text":"import pydls\nimport numpy\nimport itertools\nimport struct\n\ncap_mem_key_map = {\n \"vLeak\" : pydls.Cap_mem_row( 0),\n \"vThresh\" : pydls.Cap_mem_row( 1),\n \"vSynEx\" : pydls.Cap_mem_row( 2),\n \"vSynIn\" : pydls.Cap_mem_row( 3),\n \"vUnused_4\" : pydls.Cap_mem_row( 4),\n \"vUnused_5\" : pydls.Cap_mem_row( 5),\n \"vUnused_6\" : pydls.Cap_mem_row( 6),\n \"vUnused_7\" : pydls.Cap_mem_row( 7),\n \"vUnused_8\" : pydls.Cap_mem_row( 8),\n \"iBiasSpkCmp\" : pydls.Cap_mem_row( 9),\n \"iBiasDelay\" : pydls.Cap_mem_row(10),\n \"iBiasLeak\" : pydls.Cap_mem_row(11),\n \"iBiasLeakSd\" : pydls.Cap_mem_row(12),\n \"iBiasReadOut\" : pydls.Cap_mem_row(13),\n \"iRefr\" : pydls.Cap_mem_row(14),\n \"iBiasSynGmEx\" : pydls.Cap_mem_row(15),\n \"iBiasSynSdEx\" : pydls.Cap_mem_row(16),\n \"iBiasSynResEx\" : pydls.Cap_mem_row(17),\n \"iBiasSynOffEx\" : pydls.Cap_mem_row(18),\n \"iBiasSynResIn\" : pydls.Cap_mem_row(19),\n \"iBiasSynGmIn\" : pydls.Cap_mem_row(20),\n \"iUnused_21\" : pydls.Cap_mem_row(21),\n \"iBiasSynSdIn\" : pydls.Cap_mem_row(22),\n \"iBiasSynOffIn\" : pydls.Cap_mem_row(23),\n }\n\ndac_key_map = {\n \"cadc_ramp_bias\" : pydls.Dac_channel.cadc_ramp_bias(),\n \"cadc_ramp_01\" : pydls.Dac_channel.cadc_ramp_01(),\n \"cadc_ramp_slope\" : pydls.Dac_channel.cadc_ramp_slope(),\n \"cadc_vbias\" : pydls.Dac_channel.cadc_vbias(),\n \"syn_vddresmeas\" : pydls.Dac_channel.syn_vddresmeas(),\n \"syn_vstore\" : pydls.Dac_channel.syn_vstore(),\n \"syn_vramp\" : pydls.Dac_channel.syn_vramp(),\n \"syn_vbias\" : pydls.Dac_channel.syn_vbias(),\n \"capmem_ioffset\" : pydls.Dac_channel.capmem_ioffset(),\n \"general_purpose_0\" : pydls.Dac_channel.general_purpose_0(),\n \"general_purpose_1\" : pydls.Dac_channel.general_purpose_1(),\n \"syn_vreset\" : pydls.Dac_channel.syn_vreset(),\n \"syn_coroutbias\" : pydls.Dac_channel.syn_coroutbias(),\n \"capmem_ibuf_bias\" : pydls.Dac_channel.capmem_ibuf_bias(),\n \"capmem_iref\" : pydls.Dac_channel.capmem_iref(),\n }\n\ndef fill_cap_mem_row(cap_mem, row, value):\n for index in range(pydls.Neuron_index.num_neurons):\n cap_mem.set(row, pydls.Cap_mem_column(index), value)\n\ndef set_cap_mem_values(cap_mem, values):\n for key, row in cap_mem_key_map.items():\n fill_cap_mem_row(cap_mem, row, values[key])\n # Global paramter vReset\n cap_mem.set(\n pydls.Cap_mem_row(0),\n pydls.Cap_mem_column(pydls.Neuron_index.num_neurons),\n values[\"vReset\"])\n\ndef set_fixed_indegree(synram, weight, degree, address=0):\n nonzero_synapse = pydls.Synapse()\n nonzero_synapse.address(address)\n nonzero_synapse.weight(weight)\n for col in range(pydls.Neuron_index.num_neurons):\n perm = numpy.random.permutation(pydls.Neuron_index.num_neurons)\n perm = perm[:degree]\n for row in perm:\n synram.set(\n pydls.Synapse_row(row),\n pydls.Synapse_column(col),\n nonzero_synapse)\n\ndef set_correlation_switches(synram, config):\n switch = pydls.Synapse()\n switch.config(config)\n for col in range(pydls.Neuron_index.num_neurons):\n synram.set(pydls.Synapse_row(33), pydls.Synapse_column(col), switch)\n\ndef set_syndrv_inhibitory(syndrv, indexes):\n for index in indexes:\n syndrv.senx(pydls.Synapse_row(index), False)\n syndrv.seni(pydls.Synapse_row(index), True)\n\ndef setup_dac(connection, values):\n dac_control = pydls.Dac_control()\n dac_control.gain = 0\n dac_control.buf = 3\n dac_control.vdo = 0\n pydls.set_dac_control(connection, pydls.dac12, dac_control)\n pydls.set_dac_control(connection, pydls.dac25, dac_control)\n for key, value in values.items():\n pydls.set_dac(connection, dac_key_map[key], value)\n\ndef start_ppu(program_builder):\n # Prepare control registers\n toggle_off = pydls.Ppu_control_reg()\n toggle_off.inhibit_reset(False)\n toggle_on = pydls.Ppu_control_reg()\n toggle_on.inhibit_reset(True)\n\n # Start the ppu by switching the inhibit reset bit off and on again\n program_builder.set_ppu_control_reg(toggle_off)\n program_builder.set_ppu_control_reg(toggle_on)\n\ndef stop_ppu(program_builder):\n # Prepare control register\n toggle_off = pydls.Ppu_control_reg()\n toggle_off.inhibit_reset(False)\n toggle_off.force_clock_off(True)\n\n # Stop the ppu by clearing the inhibit reset bit, plus forcing the clock to\n # be off\n program_builder.set_ppu_control_reg(toggle_off)\n\ndef make_synapse_array(synram):\n num_rows = pydls.Synapse_driver.num_drivers\n num_cols = pydls.Neuron_index.num_neurons\n synapse_array = numpy.zeros((num_rows, num_cols, 2), dtype=numpy.uint8)\n for row, col in itertools.product(range(num_rows), range(num_cols)):\n synapse = synram.get(pydls.Synapse_row(row), pydls.Synapse_column(col))\n synapse_array[row, col, 0] = synapse.weight()\n synapse_array[row, col, 1] = synapse.address()\n return synapse_array\n\ndef make_spiketrain_array(spiketrain):\n ret = numpy.zeros((2, len(spiketrain)), dtype=int)\n for index, spike in enumerate(spiketrain):\n ret[0, index] = spike.time\n ret[1, index] = spike.address\n return ret\n\ndef create_spikes_poisson(num_bins, probability):\n spikes = numpy.random.rand(num_bins, pydls.Neuron_index.num_neurons)\n spikes = (spikes < probability).astype(numpy.uint32)\n factors = numpy.power(2, range(pydls.Neuron_index.num_neurons)).astype(numpy.uint32)\n spike_masks = numpy.dot(spikes, factors)\n return spike_masks\n\ndef bytes_in_words(words):\n bytes_in_words = (struct.unpack('BBBB', struct.pack('>I', word)) for word in words)\n return itertools.chain.from_iterable(bytes_in_words)\n\ndef set_stdp_calib(synram, calib):\n for row in range(pydls.Synapse_driver.num_drivers):\n for col in range(pydls.Neuron_index.num_neurons):\n synapse = synram.get(pydls.Synapse_row(row), pydls.Synapse_column(col))\n synapse.config(calib[row, col])\n synram.set(pydls.Synapse_row(row), pydls.Synapse_column(col), synapse)\n","sub_path":"hardware/mdp/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"511412035","text":"# class Node:\n# def __init__(self, data):\n# self.data = data\n# self.next = None\n# self.prev = None\n\n# class doubly_linked_list:\n# def __init__(self):\n# self.head = None\n# def push(self, newdata):\n# NewNode = Node(newdata)\n# NewNode.next = self.head\n# if self.head is not None:\n# self.head.prev = NewNode\n# self.head = NewNode\n# def Print(self, node):\n# while node is not None:\n# print(node.data, end=\" \")\n# last = node\n# node = node.next\n\n# dll = doubly_linked_list()\n# dll.push(\"1\")\n# dll.push(\"2\")\n# dll.push(\"3\")\n# dll.Print(dll.head)\n\n#Program Insertion\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\nclass doubly_linked_list:\n def __init__(self):\n self.head = None\n def push(self, newdata):\n NewNode = Node(newdata)\n NewNode.next = self.head\n if self.head is not None:\n self.head.prev = NewNode\n self.head = NewNode\n def Insert(self, prev_node, newdata):\n if prev_node is None:\n return\n NewNode = Node(newdata)\n NewNode.next = prev_node.next\n prev_node.next = NewNode\n NewNode.prev = prev_node\n if NewNode.next is not None:\n NewNode.next.prev = NewNode\n def Append(self, NewVal):\n NewNode = Node(NewVal)\n NewNode.next = None\n if self.head is None:\n NewNode.prev = None\n self.head = NewNode\n return\n last = self.head\n while (last.next is not None):\n last = last.next\n last.next = NewNode\n NewNode.prev = last \n return\n\n\n # def Print(self, node):\n # while node is not None:\n # print(node.data, end=\" \")\n # last = node\n # node = node.next\n def Print(self):\n currentnode = self.head\n while currentnode is not None:\n print(currentnode.data, end=\" \")\n currentnode = currentnode.next\n print(\"Null\")\nif __name__ == \"__main__\":\n n = int(input(\"Enter the number of elements you would like to add: \"))\n dll = doubly_linked_list()\n # dll.push(\"1\")\n # dll.push(\"2\")\n # dll.push(\"3\")\n # dll.Insert(dll.head.next,\"4\")\n # dll.Print(dll.head)\n for i in range(n):\n newdata = int(input(\"Enter data: \"))\n dll.push(newdata) \n dll.Print()\n k = int(input(\"Enter the number of elements you would like to insert: \"))\n for j in range(k):\n newdata = int(input(\"Enter data: \"))\n dll.Insert(dll.head.next, newdata)\n dll.Print()\n m = int(input(\"Enter the number of elements you would like to append: \"))\n for i in range(m):\n NewVal = int(input(\"Enter data: \"))\n dll.Append(NewVal)\n dll.Print()\n\n\n\n","sub_path":"practice/Advanced_linked_list.py","file_name":"Advanced_linked_list.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"123975112","text":"\nfrom django import template\nfrom django.core.cache import cache\n\nfrom classytags.arguments import Argument\nfrom classytags.core import Options\nfrom classytags.helpers import InclusionTag\n\nfrom apps.fragment.models import Fragment\n\nregister = template.Library()\n\n\n@register.tag()\nclass FragmentShow(InclusionTag):\n\n name = 'fragment_show'\n template = 'fragment/render.html'\n\n options = Options(\n Argument('slug', required=True),\n Argument('not_found_template', required=False),\n )\n\n def get_context(self, context, slug, not_found_template=None):\n request = context['request']\n content = cache.get(Fragment.cache_name(slug), None)\n if content is None:\n try:\n fragment = Fragment.objects.get(slug=slug)\n if fragment.enabled:\n content = fragment.body\n cache.set(Fragment.cache_name(slug), content)\n else:\n cache.set(Fragment.cache_name(slug), '')\n\n except Fragment.DoesNotExist:\n if not_found_template:\n self.template = not_found_template\n\n context.update({\n 'fragment_content': content\n })\n\n return context\n","sub_path":"apps/fragment/templatetags/fragment.py","file_name":"fragment.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"1443647","text":"from enum import Enum\n\nclass State(Enum):\n null = 0\n player_action = 1\n player_noaction = 2\n exit = 999\n\nclass Gameplaystate(object):\n def __init__(self):\n self.status = State.null\n\n def setstate(self, x):\n self.status = x","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"336963978","text":"\nfrom __future__ import print_function\nimport io\nimport sys\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport picamera\nfrom builtins import input\nfrom readchar import readchar, readkey\nfrom openflexure_stage import OpenFlexureStage\nfrom openflexure_microscope import load_microscope\nfrom openflexure_microscope.microscope import picamera_supports_lens_shading\nimport scipy\nfrom scipy import ndimage, signal\nimport matplotlib.pyplot as plt\nfrom contextlib import contextmanager, closing\nimport data_file\nimport cv2\nfrom camera_stuff import find_template\n#import h5py\nimport threading\nimport queue\n\ndef movement(step, event, ms):\n while not event.wait(1):\n ms.stage.move_rel(step)\n\ndef printProgressBar(iteration, total, length = 10):\n percent = 100.0 * iteration / total\n filledLength = int(length * iteration // total)\n bar = '*' * filledLength + '-' * (length - filledLength)\n print('Progress: |%s| %d%% Completed' % (bar, percent), end = '\\r')\n if iteration == total: \n print()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description = \"Finds the smallest resolvable step\")\n parser.add_argument(\"step\", type = int, nargs = 3, help = \"The displacement between each point, in steps\")\n parser.add_argument(\"--n_frames\", type = int, default = 2000, help = \"The number of frames to record for each run\")\n parser.add_argument(\"--framerate\", type=int, default= 100, help=\"Rate at which to run the camera (frames/second)\")\n args = parser.parse_args()\n\n with load_microscope(\"microscope_settings.npz\", dummy_stage = False) as ms, \\\n closing(data_file.Datafile(filename = \"precision.hdf5\")) as df:\n\n assert picamera_supports_lens_shading(), \"You need the updated picamera module with lens shading!\"\n\n camera = ms.camera\n stage = ms.stage\n\n N_frames = args.n_frames\n step = args.step\n framerate = args.framerate\n backlash = 256\n\n camera.resolution=(640,480)\n camera.framerate = framerate\n stage.backlash = backlash\n\n cam_pos = df.new_group(\"data\", \"precision\")\n data = np.zeros((N_frames, 3))\n\n outputs = [io.BytesIO() for i in range(N_frames)]\n\n camera.start_preview(resolution=(640,480))\n\n stage.move_rel([-backlash, -backlash, -backlash])\n stage.move_rel([backlash, backlash, backlash])\n\n image = ms.rgb_image().astype(np.float32)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n mean = np.mean(image)\n templ8 = (image - mean)[100:-100, 100:-100]\n imgfile_location = \"/home/pi/summer/drift/calibration/precision_templ8.jpg\"\n cv2.imwrite(imgfile_location, templ8)\n imgfile_location = \"/home/pi/summer/drift/calibration/precision_image.jpg\"\n cv2.imwrite(imgfile_location, image)\n\n event = threading.Event()\n\n initial_stage_position = stage.position\n t = threading.Thread(target = movement, args = (step, event, ms), name = 'thread1')\n t.start()\n\n try:\n start_t = time.time()\n camera.capture_sequence(outputs, 'jpeg', use_video_port=True)\n end_t = time.time()\n finally:\n event.set()\n t.join()\n stage.move_abs(initial_stage_position)\n camera.stop_preview()\n print (\"Stopping...\")\n\n print(\"Recorded {} frames in {} seconds ({} fps)\".format(N_frames, end_t - start_t, N_frames / (end_t - start_t)))\n print(\"Camera framerate was set to {}, and reports as {}\".format(framerate, camera.framerate))\n\n for j, k in enumerate(outputs):\n frame_data = np.fromstring(k.getvalue(), dtype = np.uint8)\n frame = cv2.imdecode(frame_data, 1)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n data[j, 1:], corr = find_template(templ8, frame - np.mean(frame), return_corr = True, fraction = 0.5)\n data[j, 0] = float(j) / float(framerate)\n printProgressBar(j, N_frames)\n print(\"\")\n\n df.add_data(data, cam_pos, \"data\")\n","sub_path":"precision.py","file_name":"precision.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"234840156","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 20:09:12 2020\n\n@author: adeela\n\"\"\"\n\n'''\n\nhttps://www.thepythoncorner.com/2017/12/the-art-of-avoiding-nested-code/\nhttps://nedbatchelder.com/text/iter.html\nhttps://nedbatchelder.com/blog/201608/breaking_out_of_two_loops.html\nhttps://book.pythontips.com/en/latest/map_filter.html\nGood Explanation of reduce() at the top --> \n https://www.geeksforgeeks.org/reduce-in-python/\n\n'''\n\n# =============================================================================\n# find odd numbers in list using list comprehension\n# =============================================================================\n\nA = [1,2,3,4,5,6,7,8,9,11]\n\n##same old for loop\nO = []\nfor x in A:\n if x % 2 != 0:\n O.append(x) \n \n## fancy straight forward list comprehension syntax \nX = [x for x in A if x%2!=0] \nprint(X)\n\n\n# =============================================================================\n# Manipulate elements in the list \n# =============================================================================\n# add letter 'a' times the element in the list\nimport itertools\n#itertools.chain(*E) --> E is a list of lists and choice() flattens it to one list\nB = [1,3,4,5]\n\nB_ = ['a '* i for i in B]\nprint(B_)\n# output --> ['a ', 'a a a ', 'a a a a ', 'a a a a a ']\n\n#Now let's suppose C represents the weights of elements in array B \n# we can multiple weights OR we would like to exapnd elements in B as per weights in C \nC = [4,5,2,1]\n#explanation of line 76 \n# integer * string means string is replicated number of times indicated by integer \n# join in this case is adding space between each element of string we got from step 1; its still one string\n# split() will make split the string and put each element as a separte element in list\n# 4 * '1' --> '4444' -->'4 4 4 4' --> ['4', '4', '4', '4']\n\nE = [(' '.join(C[i]* str(B[i]))).split(' ') for i in range(len(B))]\nE = list(itertools.chain(*E)) # flatten list of list \nE = [int(x) for x in E ] # convert to integer list \n\n# =============================================================================\n# #### OR simply it further as follows w/o using join()\n# =============================================================================\n# list('ABC') --> ['A', 'B', 'C']\n\nE = [list(C[i]* str(B[i])) for i in range(len(B))]\nE = list(itertools.chain(*E)) # flatten list of list \nE = [int(x) for x in E ] # convert to integer list \n\n# =============================================================================\n# Filter the numbers from list using list comprehension vs filter()\n# =============================================================================\n\n#filter out number that are NOT multiple of 11 \n\n#LIST COMPREHENSION way\nX = [1,2,3,4,5,5,6,7,11,22,55,44]\n\nmultiples_11 = [x for x in X if x%11 == 0]\n\n# FILTER way \n# filter (function_to_apply, Iterable) and it returns Iterable object thats\n# why we need to wrap the results in list() to get a list object \n# lambda lets us define an anonymus function \n#\nmultiples_11_f = list(filter(lambda x: x%11==0, X))\n\n# =============================================================================\n# Find coordinate pairs using zip() vs list comprehension\n# =============================================================================\n\nX = range(0,100, 1) # numbers from 0 to 100 with jump_size == 1\nY = range(0,200, 2) # numbers from 0 to 200 with jump_size == 2\n\n# create (x,y) pairs by combining elements in both list by indices \n# meaning x0 combined with y0, x1 with y1 and so on. \n# The final size of list is same as size of X or Y\nX_Y = list(zip (X,Y)) \n\n# this gives the cartesian pair of tuples \nX_Y_cartesian = [(x, y) for x in X for y in Y]\n\n# Now we can expnd it further to filter out some of the pairs\n# only pairs where both x and y is positive \nX_Y_cartesian_even = [(x,y) for x in X for y in Y if x%2== 0 and y%2==0]\n\n# =============================================================================\n# sum(product) numbers using list comprehension vs reduce() \n# from functools library\n# =============================================================================\n\nX = range(1,100)\n\n# Plain old way to calculate sum \ntemp = 0 \nfor x in X:\n temp +=x\n \n# As per google search sum not possible via list comprehension\n\nfrom functools import reduce\n#Using reduce() ; It takes lambda function with two inputs only \n# first x and y are first two elements of list and its result s computed then \n# in the next iteration next element and result from prevuious \n# computation is picked and result is computed and so on\n\nX_sum = reduce(lambda x,y: x+y, X) \n\n# =============================================================================\n# changing elements of the list via map() vs list comprehension\n# =============================================================================\n \n# Task: convert all elments in a list of int to string and append a space after \n# the number\n\nX = range(1,5)\n\n# plain old way to do it \nX_m= []\nfor x in X:\n X_m.append(str(x) + ' ')\n\n# list comprehension \n \nX_m = [str(x)+ ' ' for x in X]\n\n# map() , map also returns an Iterable object so we need to wrap it with list()\n# to get list of elements\n\nX_m = list(map (lambda x: str(x) + ' ', X ))\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python/python_language/list_comprehension_vs_map_filter_reduce.py","file_name":"list_comprehension_vs_map_filter_reduce.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"319000528","text":"#!/usr/bin/python3\n\nimport glob\nimport dbus\nimport dbus.service\n\nDBUS_PATH = '/com/zexilonoxiouz/dimmer/device'\nDBUS_INTERFACE = 'com.zexilonoxiouz.dimmer.device'\nDBUS_INTROSPECT = \"\"\"\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\"\"\"\n\nclass Device(dbus.service.Object):\n def __init__(self, name):\n self.bus_name = dbus.service.BusName(name, dbus.SystemBus())\n dbus.service.Object.__init__(self, self.bus_name, DBUS_PATH)\n def __del__(self):\n self.remove_from_connection()\n return\n\n @dbus.service.method(DBUS_INTERFACE, '', 'a{si}')\n def GetTemperature(self):\n try:\n types = []\n for child in glob.glob('/sys/class/thermal/thermal_zone*/type'):\n types.append(open(child, 'r').read().strip())\n for child in glob.glob('/sys/devices/platform/coretemp.0/hwmon/hwmon*/temp*_label'):\n types.append(open(child, 'r').read().strip())\n temps = []\n for child in glob.glob('/sys/class/thermal/thermal_zone*/temp'):\n temps.append(open(child, 'r').read().strip())\n for child in glob.glob('/sys/devices/platform/coretemp.0/hwmon/hwmon*/temp*_input'):\n temps.append(open(child, 'r').read().strip())\n except:\n return {}\n results = {}\n for key, value in enumerate(types):\n v = int(temps[key])\n k = value\n while k in results:\n if k.find('-') < 0:\n k = k + '-1'\n else:\n b = k.split('-')[0]\n l = k.split('-')[1]\n k = b + '-' + (l + 1)\n results.update({k:v})\n return results\n\n @dbus.service.method(DBUS_INTERFACE, 'b')\n def SetWatchdog(self, value):\n if value == self.GetWatchdog():\n return\n open('/proc/sys/kernel/nmi_watchdog', 'w').write('1' if value else '0' + '\\n')\n @dbus.service.method(DBUS_INTERFACE, '', 'b')\n def GetWatchdog(self):\n return open('/proc/sys/kernel/nmi_watchdog', 'r').read().strip() == '1'\n\n # PROPERTIES\n @dbus.service.method(dbus.PROPERTIES_IFACE, 'ss', 'v')\n def Get(self, interface, property):\n if interface != DBUS_INTERFACE:\n raise dbus.exceptions.DBusException('no such interface')\n if property == 'Temperature':\n return self.GetTemperature()\n elif property == 'Watchdog':\n return self.GetWatchdog()\n else:\n raise dbus.exceptions.DBusException('no such property')\n\n @dbus.service.method(dbus.PROPERTIES_IFACE, 's', 'a{sv}')\n def GetAll(self, interface):\n if interface != DBUS_INTERFACE:\n raise dbus.exceptions.DBusException('no such interface')\n return {\n 'Temperature': self.GetTemperature(),\n 'Watchdog' : self.GetWatchdog(),\n }\n\n @dbus.service.method(dbus.PROPERTIES_IFACE, 'ssv')\n def Set(self, interface, property, value):\n if interface != DBUS_INTERFACE:\n raise dbus.exceptions.DBusException('no such interface')\n if property == 'Watchdog':\n self.SetWatchdog(value)\n else:\n raise dbus.exceptions.DBusException('no such property')\n self.PropertiesChanged(interface, {property:value}, [])\n\n @dbus.service.signal(dbus.PROPERTIES_IFACE, 'sa{sv}as')\n def PropertiesChanged(self, interface, changed, invalidated):\n pass\n\n # INTROSPECTABLE\n @dbus.service.method(dbus.INTROSPECTABLE_IFACE, '', 's')\n def Introspect(self):\n return DBUS_INTROSPECT\n","sub_path":"server/component/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"268963782","text":"from rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom accounts.models import User\n\nfrom tests.factories import ServiceFactory\n\n\nclass ServicesApiTestCase(APITestCase):\n \"\"\"Test module for services\"\"\"\n\n def setUp(self):\n User.objects.create_user(\n username='root1@root.com',\n email='root1@root.com',\n password='root1'\n )\n self.client.login(username='root1@root.com', password='root1')\n self.service1 = ServiceFactory()\n self.service2 = ServiceFactory()\n\n def test_get_all_services(self):\n \"\"\"Test GET all services\"\"\"\n response = self.client.get('/v1/services/')\n self.assertEqual(len(response.data['results']), 2)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_get_single_service(self):\n \"\"\"Test GET a single service\"\"\"\n response = self.client.get(f'/v1/services/{self.service1.id}/')\n self.assertEqual(response.data['code'], self.service1.code)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_filter_services_by_exact_code(self):\n \"\"\"Test filter services by exact code\"\"\"\n response = self.client.get(f'/v1/services/?code={self.service1.code}')\n self.assertEqual(response.data['results'][0]['code'], self.service1.code)\n self.assertNotEqual(response.data['results'][0]['code'], self.service2.code)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_filter_services_by_exact_start_date(self):\n \"\"\"Test filter services by exact start date\"\"\"\n response = self.client.get(\n f'/v1/services/?start_date={self.service1.start_date}'\n )\n self.assertEqual(\n response.data['results'][0]['start_date'],\n str(self.service1.start_date)\n )\n self.assertNotEqual(\n response.data['results'][0]['start_date'],\n str(self.service2.start_date)\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_filter_services_by_exact_end_date(self):\n \"\"\"Test filter services by exact end date\"\"\"\n response = self.client.get(\n f'/v1/services/?end_date={self.service1.end_date}'\n )\n self.assertEqual(\n response.data['results'][0]['end_date'],\n str(self.service1.end_date)\n )\n self.assertNotEqual(\n response.data['results'][0]['end_date'],\n str(self.service2.end_date)\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n","sub_path":"app/tests/api/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"615942230","text":"import random\n\nfrom copy import deepcopy\n\n\nclass Matrix:\n def __init__(self, nrows, ncols):\n \"\"\"Construct a (nrows X ncols) matrix\"\"\"\n self.rows=nrows\n self.cols=ncols\n self.matrix=[[] for i in range(nrows)]\n for i in range(nrows):\n for j in range(ncols):\n self.matrix[i].append(random.randint(0,9))\n def add(self, m):\n \"\"\"return a new Matrix object after summation\"\"\"\n if self.cols!=m.cols:\n print(\"Matrixs' size should be in the same size\")\n return\n elif self.rows!=m.rows:\n print(\"Matrixs' size should be in the same size\")\n return\n temp=self.matrix\n for i in range(self.rows):\n for j in range(self.cols):\n self.matrix[i][j]=temp[i][j]+m.matrix[i][j]\n print(\"=\"*10,\"A + B\",\"=\"*10)\n self.display()\n def sub(self, m):\n \"\"\"return a new Matrix object after substraction\"\"\"\n if self.cols!=m.cols:\n print(\"Matrixs' size should be in the same size\")\n return\n elif self.rows!=m.rows:\n print(\"Matrixs' size should be in the same size\")\n return\n temp=self.matrix\n for i in range(self.rows):\n for j in range(self.cols):\n self.matrix[i][j]=temp[i][j]-m.matrix[i][j]\n print(\"=\"*10,\"A - B\",\"=\"*10)\n self.display()\n\n def mul(self, m):\n \"\"\"return a new Matrix object after multiplication\"\"\"\n if self.cols!=m.rows:\n print(\"Matrixs' size should be in the same size\")\n return\n temp=[[] for i in range(self.rows)]\n for i in range(self.rows):\n for j in range(self.cols):\n temp[i].append(self.matrix[i][j])\n for i in range(self.rows):\n for j in range(m.cols):\n sum=0\n for k in range(self.cols):\n sum+=temp[i][k]*(m.matrix[k][j])\n self.matrix[i][j]=sum\n print(\"=\"*10,\"A * B\",\"=\"*10)\n self.display()\n\n def transpose(self):\n \"\"\"return a new Matrix object after transpose\"\"\"\n if self.rows!=self.cols:\n print(\"can't transpose in different cols and rows size \")\n return\n temp=[[] for i in range(self.rows)]\n for i in range(self.rows):\n for j in range(self.cols):\n temp[i].append(self.matrix[i][j])\n for i in range(self.rows):\n for j in range(self.cols):\n if i==j:\n continue\n self.matrix[i][j]=temp[j][i]\n print(\"=\"*10,\"Transpose\",\"=\"*10)\n self.display()\n def display(self):\n \"\"\"Display the content in the matrix\"\"\"\n for i in range(self.rows):\n for j in range(self.cols):\n print('{0:>3}'.format(self.matrix[i][j]),end = \" \")\n print()\n \nArows=int(input(\"Enter A Matrix's rows :\"))\nAcols=int(input(\"Enter A Matrix's cols :\"))\nBrows=int(input(\"Enter B Matrix's rows :\"))\nBcols=int(input(\"Enter B Matrix's cols :\"))\nA=Matrix(Arows,Acols)\nB=Matrix(Brows,Bcols)\nA.add(B)\nA.transpose()\nA.sub(B)\nA.mul(B)\n\n","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"426163464","text":"# -*- coding: utf-8 -*-\n\"\"\"## Imports\"\"\"\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer\nfrom collections import Counter\nfrom num2words import num2words\n\nimport nltk\nimport os\nimport string\nimport numpy as np\nimport copy\nimport pandas\nimport pandas as pd\nimport pickle\nimport re\nimport math\nimport csv\nimport time\nimport psutil\n\ndef readfile(Input):\n start_time = time.time()\n\n \"\"\"# Preprocessing\"\"\"\n\n def convert_lower_case(data):\n return np.char.lower(data)\n\n def remove_stop_words(data):\n stop_words = stopwords.words('english')\n words = word_tokenize(str(data))\n new_text = \"\"\n for w in words:\n if w not in stop_words and len(w) > 1:\n new_text = new_text + \" \" + w\n return new_text\n\n def remove_punctuation(data):\n symbols = \"!\\\"#$%&()*+-./:;<=>?@[\\]^_`{|}~\\n\"\n for i in range(len(symbols)):\n data = np.char.replace(data, symbols[i], ' ')\n data = np.char.replace(data, \" \", \" \")\n data = np.char.replace(data, ',', '')\n return data\n\n def remove_apostrophe(data):\n return np.char.replace(data, \"'\", \"\")\n\n def stemming(data):\n stemmer= PorterStemmer()\n \n tokens = word_tokenize(str(data))\n new_text = \"\"\n for w in tokens:\n new_text = new_text + \" \" + stemmer.stem(w)\n return new_text\n\n def convert_numbers(data):\n tokens = word_tokenize(str(data))\n new_text = \"\"\n for w in tokens:\n try:\n w = num2words(int(w))\n except:\n a = 0\n new_text = new_text + \" \" + w\n new_text = np.char.replace(new_text, \"-\", \" \")\n return new_text\n\n def preprocess(data):\n data = convert_lower_case(data)\n data = remove_punctuation(data) #remove comma seperately\n data = remove_apostrophe(data)\n data = remove_stop_words(data)\n data = convert_numbers(data)\n data = stemming(data)\n data = remove_punctuation(data)\n data = convert_numbers(data)\n data = stemming(data) #needed again as we need to stem the words\n data = remove_punctuation(data) #needed again as num2word is giving few hypens and commas fourty-one\n data = remove_stop_words(data) #needed again as num2word is giving stop words 101 - one hundred and one\n return data\n\n alpha = 0.3\n arrayURL = []\n arrayList = []\n CutarrayList = []\n\n URLDatas = []\n arrayURLDatas = []\n CutarrayListDatas = []\n times = ''\n timesFile = []\n CountBinay = []\n number = []\n url = 100\n URL = pandas.read_csv('url1000.csv')\n # for i in range(0,len(URL)):\n for i in range(0,url):\n arrayURL.append(URL['url'][i])\n\n with open('DataSets.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n arrayList.append(row)\n\n # for x in range(0,len(arrayList)):\n # print(len(arrayList))\n # print(arrayList[x])\n # print('*******************')\n\n DF = {}\n\n for i in range(0,url):\n tokens = arrayList[i]\n for w in tokens:\n try:\n DF[w].add(i)\n except:\n DF[w] = {i}\n\n for i in DF:\n DF[i] = len(DF[i])\n\n # print(DF)\n # DF\n\n total_vocab_size = len(DF)\n\n total_vocab_size\n\n total_vocab = [x for x in DF]\n\n # print([x for x in DF])\n # print(total_vocab[:20])\n\n def doc_freq(word):\n c = 0\n try:\n c = DF[word]\n except:\n pass\n return c\n\n \"\"\"### Calculating TF-IDF for body, we will consider this as the actual tf-idf as we will add the title weight to this.\"\"\"\n\n doc = 0\n\n tf_idf = {}\n\n for i in range(0,url):\n \n tokens = arrayList[i]\n \n counter = Counter(tokens)\n words_count = len(tokens)\n\n # print('tokens : ', tokens)\n # print('counter : ', counter)\n # print('words_count : ', words_count)\n \n for token in np.unique(tokens):\n \n tf = counter[token]/words_count\n df = doc_freq(token)\n idf = np.log((url+1)/(df+1))\n \n tf_idf[doc, token] = tf*idf\n\n doc += 1\n\n # print(type(tf_idf))\n # print(len(tf_idf))\n # print(tf_idf[(0,Input)])\n\n # tf_idf\n\n \"\"\"## Merging the TF-IDF according to weights\"\"\"\n\n for i in tf_idf:\n tf_idf[i] *= alpha\n\n def matching_score(k, query):\n preprocessed_query = preprocess(query)\n tokens = word_tokenize(str(preprocessed_query))\n\n # print(\"Matching Score\")\n # print(\"\\nQuery:\", query)\n # print(\"\")\n # print(tokens)\n \n query_weights = {}\n \n for key in tf_idf:\n \n if key[1] in tokens:\n try:\n print('yes')\n query_weights[key[0]] += tf_idf[key]\n except:\n print('no')\n query_weights[key[0]] = tf_idf[key]\n\n query_weights = sorted(query_weights.items(), key=lambda x: x[1], reverse=True)\n\n # print(\"\")\n \n l = []\n \n for i in query_weights[:k]:\n l.append(i[0])\n \n # print(l)\n # print(sorted(l))\n # print(len(l))\n # print(type(l))\n\n # Ranking = {}\n # for x in range(0,url):\n # Ranking[l[x]] = arrayURL[x]\n # # print(arrayURL[x])\n\n # time_2f = '%.2f' % (time.time() - start_time)\n # times = ((\" %s seconds \" % time_2f ))\n\n # cpu = psutil.cpu_percent(interval=1)\n # memory = psutil.swap_memory()[3]\n # disk = psutil.disk_usage('/')[3]\n\n\n # return tokens, dict(sorted(Ranking.items())), times, cpu, memory, disk\n \n\n Q = matching_score(url, Input)\n\n # def cosine_similarity(k, query):\n # # print(\"Cosine Similarity\")\n # preprocessed_query = preprocess(query)\n # tokens = word_tokenize(str(preprocessed_query))\n \n # # print(\"\\nQuery:\", query)\n # # print(\"\")\n # # print(tokens)\n \n # d_cosines = []\n \n # query_vector = gen_vector(tokens)\n \n # for d in D:\n # d_cosines.append(cosine_sim(query_vector, d))\n \n # out = np.array(d_cosines).argsort()[-k:][::-1]\n # # print(len(d_cosines))\n # # print(d_cosines)\n # # print(sorted(d_cosines))\n # # print(\"\")\n \n # # print(out)\n # # print(sorted(out))\n\n # time_2f = '%.2f' % (time.time() - start_time)\n # times = ((\" %s seconds \" % time_2f ))\n # # print(times)\n # # for i in out:\n # # print(i, dataset[i][0])\n # Ranking = {}\n # for x in range(0,url):\n # Ranking[out[x]] = arrayURL[x]\n # # print(arrayURL[x])\n\n # cpu = psutil.cpu_percent(interval=1)\n # memory = psutil.swap_memory()[3]\n # disk = psutil.disk_usage('/')[3]\n\n # return tokens, dict(sorted(Ranking.items())), times, cpu, memory, disk, sorted(d_cosines)\n \n # Q = cosine_similarity(url, Input)\n # print('********')\n # print(Q[0])\n # print(len(Q[1]))\n # print(Q[2])\n # print('*****************')\n # print(dict(sorted(Q[3].items())))\n # for key, value in sorted (Q[3].items()):\n # print(key, value)\n # return Q[0], Q[1], Q[2], Q[3], Q[4], Q[5]\n return Q\n\nprint(readfile(\"Without the drive of Rebeccah's insistence, Kate lost her momentum. She stood next a slatted oak bench, canisters still clutched, surveying\"))\n# print(readfile('to'))","sub_path":"TF_IDFMatchingScoreRanking.py","file_name":"TF_IDFMatchingScoreRanking.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364798202","text":"import pandas as pd\nfrom os import listdir\n\n# Change path to where you have the data\npath = '\\\\data\\\\random_stocks\\\\'\n\n\"\"\"Ingest function needs this exact signature\"\"\"\n\ndef random_stock_data(environ,\n asset_db_writer,\n minute_bar_writer,\n daily_bar_writer,\n adjustment_writer,\n calendar,\n start_session,\n end_session,\n cache,\n show_progress,\n output_dir\n ):\n # Get list of files from path\n # Slicing off teh last part\n # 'example.csv'[:-4] = 'example'\n symbols = [f[:-4] for f in listdir(path)]\n \n if not symbols:\n raise ValueError(\"No symbols foound in the folder\")\n \n # Prepare an empty DataFrame for dividends\n divs = pd.DataFrame(columns=['sid',\n 'amount',\n 'ex_date',\n 'record_date',\n 'declared_date',\n 'pay_date'\n ]\n )\n \n # Prepare an empty DataFrame for splits\n splits = pd.DateFrame(columns=['sid',\n 'ratio',\n 'effective_date'\n ])\n \n # Prepare an empty DataFrame for metadata\n metadata = pd.DataFrame(columns=['start_date',\n 'end_date',\n 'auto_close_date',\n 'symbol',\n 'exchange'\n ])\n \n # Check valid trading dates, according to the selected exchange calendar\n sessions = calendar.sessions_in_range(start_session, end_session)\n \n # Get data for all stocks and wrtite to Zipline\n daily_bar_writer.write(process_stocks(symbols, sessions, metadata, divs))\n \n # Write the metadata\n asset_db_writer.write(equities=metadata)\n \n # Write splits and dividends\n adjustments_writer.write(splits=splits, dividends=divs)\n \n \"\"\"Generator function to iterate stocks, \nbuild historical data, metadata, and dividend data\"\"\"\ndef process_stocks(symbol, sessions, metadata, divs):\n # Loop the stocks, setting a unique Security ID (SID)\n for sid, symbol in enumerate(symbols):\n print('Loading {} ...'.format(symbol))\n # Read the stock data from csv file\n df = pd.read_csv('{}/{}.csv'.format(path, symbol), index_col=[0], parse_dates=[0])\n \n # Check first and last date\n start_date = df.index[0]\n end_date = df.index[-1]\n \n # Synch to the official exchange calendar\n df = df.reindex(sessions.tz_localize(None))[start_date : end_date]\n \n # Forward fill missing data\n df.fillna(method='ffill', inplace=True)\n \n # Drop remaining Nana\n df.dropna(inplace=True)\n \n # The auto_close date is teh day after the last trade\n ac_date = end_date + pd.Timedelta(days=1)\n \n # Add a row to the metadata DataFrame. Don't forget to add an exchange field\n metadata.loc[sid] = start_date, end_date, ac_date, symbol, 'NYSE'\n \n # If there's dividend data, add that to the dividend DataFrame\n if 'dividend' in df.columns:\n \n # Slice off the days with dividends\n tmp = df[df['dividend'] != 0.0]['dividend']\n div = pd.DataFrame(data=tmp.index.tolist(), columns=['ex_date'])\n \n \n # Provide emty columns as we don't have these data now\n div['record_date'] = pd.NaT\n div['declared_date'] = pd.NaT\n div['pay_date'] = pd.NaT\n \n # Store the dividends and set teh Security ID\n div['amount'] = tmp.tolist()\n div['sid'] = sid\n \n # Start numbering at where we left last time\n ind = pd.Index(range(divs.shape[0], divs.shape[0] + div.shape[0]))\n div.set_index(ind, inplace=True)\n \n # Append thios stock's dividend to the list of all dividends\n divs = divs.append(div)\n \n yield sid, df","sub_path":"random_stock_data.py","file_name":"random_stock_data.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"147230491","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport gettext\nimport os\nimport re\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nimport urllib\n\nimport lxml\nimport sass\nfrom lxml import etree\nfrom lxml.builder import ElementMaker\nfrom mwlib.log import Log\nfrom mwlib.writer.licensechecker import LicenseChecker\n\nfrom . import htmlfilters\nfrom . import linuxmem\nfrom . import utils\nfrom .collection import Article\nfrom .config import gutter_width_pt, column_width_pt, page_margins\nfrom .generators import contributors, table_of_contents, cover\n\nlog = Log(\"mwlib.pdf.html2pdf\")\n\nE = ElementMaker()\nfile_regex = re.compile(r\"((File|Datei):[^?\\n]*)\")\n\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\ncss_dir = os.path.join(current_dir, \"css\")\nscss_file = os.path.join(css_dir, \"base.scss\")\njs_file = os.path.join(current_dir, \"js\", \"base.js\")\n\n\nclass PrincePdfWriter(object):\n boxid_regex = re.compile(\".*? boxid: (?P\\d+)$\")\n width_regex = re.compile(\"^msg\\|out\\|width: (?P(\\.|\\d)+)$\")\n height_regex = re.compile(\"^msg\\|out\\|height: (?P(\\.|\\d)+)$\")\n\n def __init__(\n self, env, out_fn, debug=False, crop_marks=False, lang=\"en\",\n ):\n \"\"\"\n Initialize HTML renderer\n :param env: environment with full metabook\n :param out_fn: output filename\n :param debug: debug mode\n :param crop_marks: render crop-marks\n \"\"\"\n self.pdf_output_filename = out_fn\n self.lang = lang\n self.init_l10n(lang)\n self.crop_marks = crop_marks\n self.css_file = self._compile_sass()\n self.js_file = js_file\n self.debug = debug\n self.articles = []\n self.env = env\n if self.env is not None:\n self.book = self.env.metabook\n self.imgDB = env.images\n self.image_metadata = dict()\n self.img_count = 0\n\n try:\n strict_server = self.env.wiki.siteinfo[\"general\"][\"server\"] in [u\"//de.wikipedia.org\"]\n except:\n strict_server = False\n if strict_server:\n self.license_checker = LicenseChecker(image_db=self.imgDB, filter_type=\"whitelist\")\n else:\n self.license_checker = LicenseChecker(image_db=self.imgDB, filter_type=\"blacklist\")\n self.license_checker.readLicensesCSV()\n\n def init_l10n(self, lang):\n log.info('Using \"{}\" for localization'.format(lang))\n localedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"locale\")\n translation = gettext.NullTranslations()\n if lang:\n try:\n translation = gettext.translation(\"mwlib.pdf\", localedir, [lang])\n except IOError as exc:\n log.warn(str(exc))\n translation.install(unicode=True)\n\n def render_zip(self):\n \"\"\"\n Renders Zip-File\n \"\"\"\n start_time = time.time()\n filtered_html = []\n article_idx = 0\n\n # build Article List and DOM tree\n for n, item in enumerate(self.env.metabook.walk()):\n if item.type == \"chapter\":\n log.warning(\"chapter skipped\")\n continue\n elif item.type == \"article\":\n article_idx += 1\n log.info(\"Article {}\".format(item.title.encode(\"utf-8\")))\n article = Article.from_wiki_item(item, self.env, article_idx)\n self._write_image_metadata(article.dom)\n self.articles.append(article)\n filtered_html.append(article.parse())\n root = self._render_front_matter()\n body = root.find(\"body\")\n articles = E.section({\"id\": \"articles\"})\n for article_dom in filtered_html:\n for item in article_dom.find(\"body\").getchildren():\n articles.append(item)\n body.append(articles)\n appendix = E.section({\"id\": \"appendix\"})\n appendix.append(contributors.generate_article_contributors(self.articles))\n appendix.append(contributors.generate_image_contributors(self.image_metadata))\n body.append(appendix)\n self._tag_nodes(root)\n\n # render DOM tree\n if self.debug:\n utils.append_class(root.find(\"body\"), \"debug\")\n self._dump_html(root, \"debug.html\")\n\n # 1st render process\n render_log_filename = self._render_cmd(root, save_pdf_file=False)\n self._post_width_hook(root, render_log_filename)\n if self.debug:\n self._dump_html(root, \"debug_final.html\")\n # 2nd render process\n self._render_cmd(root, use_js=False)\n log.info(\n \"rendering {} finished in {:.2f}\".format(\n self.pdf_output_filename, time.time() - start_time\n )\n )\n\n def _render_cmd(self, root, save_pdf_file=True, use_js=True):\n \"\"\"\n Render root tree with PrinceXML\n \"\"\"\n mem_start = linuxmem.memory()\n pdf_filename = self.pdf_output_filename if save_pdf_file else os.devnull\n cmd = [\n \"prince\",\n \"-\",\n \"-o\",\n pdf_filename,\n \"--media\",\n \"print\",\n \"-s\",\n self.css_file,\n \"--no-network\",\n \"--server\",\n ]\n if use_js and self.js_file:\n cmd.extend([\"--script\", self.js_file])\n\n stdout_fd, stdout_filename = tempfile.mkstemp(prefix=\"render_out_\", suffix=\".log\")\n log.info(\"running cmd: {} (stdout: {}) \".format(cmd, stdout_filename))\n p = subprocess.Popen(\n cmd, stdin=subprocess.PIPE, stdout=stdout_fd, stderr=subprocess.STDOUT\n )\n p.communicate(utils.tree_to_string(root))\n mem_end = linuxmem.memory()\n log.info(\"MEMORY before {0:.2f}MB after {1:.2f}MB rendering\".format(mem_start, mem_end))\n return stdout_filename\n\n def _post_width_hook(self, root, render_log_filename):\n \"\"\"\n Resize elements based on box sizes determined in previous render process\n \"\"\"\n self._parse_render_output(root, render_log_filename)\n htmlfilters.sizetools.fix_nested_widths(root)\n htmlfilters.sizetools.resize_tables(root)\n htmlfilters.sizetools.resize_overwide_tables(root)\n\n def _compile_sass(self):\n \"\"\"\n compile css from scss\n \"\"\"\n self._write_css_config()\n css_fn = os.path.splitext(scss_file)[0] + \".css\"\n source_map_fn = css_fn + \".map\"\n compiled_css, source_map = sass.compile(\n filename=scss_file, source_map_filename=source_map_fn\n )\n with open(css_fn, \"w\") as css_file:\n css_file.write(compiled_css)\n with open(source_map_fn, \"w\") as map_file:\n map_file.write(source_map)\n\n log.info(\"compiled sass {} -> {}\".format(scss_file, css_fn))\n return css_fn\n\n def _write_css_config(self):\n with open(os.path.join(css_dir, \"_config.scss\"), \"w\") as fn:\n scss_config = [\n \"$gutter-width: {}pt;\".format(gutter_width_pt),\n \"$base-column-width: {}pt;\".format(column_width_pt),\n \"@page {\",\n \" size: a4;\",\n \" margin: {};\".format(\" \".join([str(p) + \"pt\" for p in page_margins])),\n \" padding: 0;\",\n \"}\",\n ]\n fn.write(\"\\n\".join(scss_config))\n\n def _dump_html(self, root, filename=\"debug.html\"):\n \"\"\"\n Dump HTML content of root tree into a file\n :param root: DOM-Tree of the document\n :param filename: output filename\n \"\"\"\n head = root.xpath(\"//head\")[0]\n head.append(E.link(rel=\"stylesheet\", type=\"text/css\", href=self.css_file))\n html_filename = os.path.join(os.path.dirname(self.pdf_output_filename), filename)\n with open(html_filename, \"w\") as f:\n f.write(utils.tree_to_string(root))\n log.info(\"wrote HTML {}\".format(html_filename))\n\n def _render_front_matter(self):\n \"\"\"\n Generate \"front matter\" pages\n \"\"\"\n body = E.body()\n root = E.html(E.head(), body)\n if len(self.env.metabook.items) == 1:\n return root\n\n body.append(cover.generate_cover_page(self.env, self.lang))\n front_matter = E.section(id=\"front_matter\")\n front_matter.append(table_of_contents.generate_toc(self.env))\n body.append(front_matter)\n return root\n\n def _convert_colorspace(self, colorspace=None):\n \"\"\"\n Convert Colorspace to CMYK for Print Output\n :param colorspace:\n :return:\n \"\"\"\n if colorspace == \"cmyk\":\n tmp_out_fn = tempfile.mkstemp(suffix=\".pdf\")[1]\n cmd = [\n \"ps2pdf\",\n \"-sProcessColorModel=DeviceCMYK\",\n \"-dHaveTransparency=/false\",\n \"-sColorConversionStrategy=CMYK\",\n \"-dAutoRotatePages=/None\",\n \"-dPDFSETTINGS=/prepress\", # increase image quality\n self.pdf_output_filename, # srce\n tmp_out_fn, # target\n ]\n print(\"\\n\".join([\"-\" * 40, \"CONVERTING TO CMYK\", \" \".join(cmd)]))\n ret = subprocess.call(cmd)\n if ret == 0:\n shutil.move(tmp_out_fn, self.pdf_output_filename)\n\n def _tag_nodes(self, root):\n for idx, node in enumerate(root.iter()):\n try:\n node.set(\"boxid\", str(idx))\n except TypeError:\n if isinstance(node, lxml.etree._Comment):\n pass\n else:\n log.error(\"Setting boxid on node {} failed\".format(node.tag))\n\n def _parse_render_output(self, root, render_log_filename):\n \"\"\"\n add width and height in pt to root tree boxes based on render log\n \"\"\"\n widths, heights = [], []\n id2width, id2height = {}, {}\n boxid = 0\n with open(render_log_filename) as f:\n for line in f:\n boxid_res = self.boxid_regex.match(line)\n if boxid_res:\n boxid = int(boxid_res.group(\"boxid\"))\n if len(widths):\n id2width[boxid - 1] = max(widths or [0])\n if len(heights):\n id2height[boxid - 1] = sum(heights or [0])\n widths, heights = [], []\n width = self.width_regex.match(line)\n height = self.height_regex.match(line)\n if width:\n width = float(width.group(\"width\"))\n widths.append(width)\n if height:\n height = float(height.group(\"height\"))\n heights.append(height)\n id2width[boxid] = max(widths or [0])\n id2height[boxid] = sum(heights or [0])\n\n for node in root.iterdescendants():\n _id = int(node.get(\"boxid\") or -1)\n if _id > -1:\n node.set(\"box_width\", \"{:.2f}\".format(id2width.get(_id, 0)))\n node.set(\"box_height\", \"{:.2f}\".format(id2height.get(_id, 0)))\n del node.attrib[\"boxid\"]\n\n def _write_image_metadata(self, body):\n \"\"\"\n write image metadata and tag images\n \"\"\"\n for link in body.xpath(\"//a[img]\"):\n img_name = link.attrib.get(\"title\")\n if img_name is None or not self.imgDB.getContributors(img_name):\n match = file_regex.findall(link.attrib.get(\"href\"))\n if not match:\n continue\n img_name = urllib.unquote(match[0][0]).decode(\"utf-8\")\n\n if not self.image_metadata.get(img_name):\n self.img_count += 1\n url = self.imgDB.getDescriptionURL(img_name) or self.imgDB.getURL(img_name)\n if url:\n url = unicode(urllib.unquote(url.encode(\"utf-8\")), \"utf-8\")\n else:\n url = \"\"\n license_name = self.license_checker.getLicenseDisplayName(img_name)\n display = self.license_checker.displayImage(img_name)\n if not display:\n log.debug(\"remove image {}\".format(img_name))\n contributor_list = self.imgDB.getContributors(img_name)\n occurrences = 1\n self.image_metadata[img_name] = (\n self.img_count,\n img_name,\n url,\n license_name,\n contributor_list,\n display,\n occurrences,\n )\n else:\n occurrences = self.image_metadata[img_name][6] + 1\n metadata = list(self.image_metadata[img_name])\n metadata[6] = occurrences\n self.image_metadata[img_name] = tuple(metadata)\n\n for img in link.iter(\"img\"):\n if self.image_metadata[img_name][5] is False:\n utils.append_class(img, \"remove\")\n img.getparent().attrib[\"name\"] = \"image_{}_{}\".format(\n self.image_metadata[img_name][0], occurrences\n )\n","sub_path":"mwlib/pdf/html2pdf.py","file_name":"html2pdf.py","file_ext":"py","file_size_in_byte":13177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"449062128","text":"from collections import namedtuple\n\nimport numpy as np\nimport pytest\nfrom qtpy.QtWidgets import QAbstractButton\n\nfrom napari._qt.layer_controls.qt_layer_controls_container import (\n QtLayerControlsContainer,\n create_qt_layer_controls,\n layer_to_controls,\n)\nfrom napari._qt.layer_controls.qt_shapes_controls import QtShapesControls\nfrom napari.components import ViewerModel\nfrom napari.layers import Labels, Points, Shapes\n\nLayerTypeWithData = namedtuple('LayerTypeWithData', ['type', 'data'])\n_POINTS = LayerTypeWithData(type=Points, data=np.random.random((5, 2)))\n_SHAPES = LayerTypeWithData(type=Shapes, data=np.random.random((10, 4, 2)))\n_LINES_DATA = np.random.random((6, 2, 2))\n\n\ndef test_create_shape(qtbot):\n shapes = _SHAPES.type(_SHAPES.data)\n\n ctrl = create_qt_layer_controls(shapes)\n qtbot.addWidget(ctrl)\n\n assert isinstance(ctrl, QtShapesControls)\n\n\ndef test_unknown_raises(qtbot):\n class Test:\n \"\"\"Unmatched class\"\"\"\n\n with pytest.raises(TypeError):\n create_qt_layer_controls(Test())\n\n\ndef test_inheritance(qtbot):\n class QtLinesControls(QtShapesControls):\n \"\"\"Yes I'm the same\"\"\"\n\n class Lines(Shapes):\n \"\"\"Here too\"\"\"\n\n lines = Lines(_LINES_DATA)\n layer_to_controls[Lines] = QtLinesControls\n ctrl = create_qt_layer_controls(lines)\n qtbot.addWidget(ctrl)\n assert isinstance(ctrl, QtLinesControls)\n\n\n@pytest.mark.parametrize('layer_type_with_data', [_POINTS, _SHAPES])\ndef test_text_set_visible_updates_checkbox(qtbot, layer_type_with_data):\n text = {\n 'string': {'constant': 'test'},\n 'visible': True,\n }\n layer = layer_type_with_data.type(layer_type_with_data.data, text=text)\n ctrl = create_qt_layer_controls(layer)\n qtbot.addWidget(ctrl)\n assert ctrl.textDispCheckBox.isChecked()\n\n layer.text.visible = False\n\n assert not ctrl.textDispCheckBox.isChecked()\n\n\n@pytest.mark.parametrize('layer_type_with_data', [_POINTS, _SHAPES])\ndef test_set_text_then_set_visible_updates_checkbox(\n qtbot, layer_type_with_data\n):\n layer = layer_type_with_data.type(layer_type_with_data.data)\n ctrl = create_qt_layer_controls(layer)\n qtbot.addWidget(ctrl)\n layer.text = {\n 'string': {'constant': 'another_test'},\n 'visible': False,\n }\n assert not ctrl.textDispCheckBox.isChecked()\n\n layer.text.visible = True\n\n assert ctrl.textDispCheckBox.isChecked()\n\n\n@pytest.mark.parametrize(('ndim', 'editable_after'), ((2, False), (3, True)))\ndef test_set_3d_display_with_points(qtbot, ndim, editable_after):\n \"\"\"Interactivity only works for 2D points layers rendered in 2D and not\n in 3D. Verify that layer.editable is set appropriately upon switching to\n 3D rendering mode.\n\n See: https://github.com/napari/napari/pull/4184\n \"\"\"\n viewer = ViewerModel()\n container = QtLayerControlsContainer(viewer)\n qtbot.addWidget(container)\n layer = viewer.add_points(np.zeros((0, ndim)), ndim=ndim)\n assert viewer.dims.ndisplay == 2\n assert layer.editable\n\n viewer.dims.ndisplay = 3\n\n assert layer.editable == editable_after\n\n\ndef test_set_3d_display_with_shapes(qtbot):\n \"\"\"Interactivity only works for shapes layers rendered in 2D and not\n in 3D. Verify that layer.editable is set appropriately upon switching to\n 3D rendering mode.\n\n See: https://github.com/napari/napari/pull/4184\n \"\"\"\n viewer = ViewerModel()\n container = QtLayerControlsContainer(viewer)\n qtbot.addWidget(container)\n layer = viewer.add_shapes(np.zeros((0, 2, 4)))\n assert viewer.dims.ndisplay == 2\n assert layer.editable\n\n viewer.dims.ndisplay = 3\n\n assert not layer.editable\n\n\n# The following tests handle changes to the layer's visible and\n# editable state for layer control types that have controls to edit\n# the layer. For more context see:\n# https://github.com/napari/napari/issues/1346\n\n\n@pytest.fixture(\n params=(\n (Labels, np.zeros((3, 4), dtype=int)),\n (Points, np.empty((0, 2))),\n (Shapes, np.empty((0, 2, 4))),\n )\n)\ndef editable_layer(request):\n LayerType, data = request.param\n return LayerType(data)\n\n\ndef test_make_visible_when_editable_enables_edit_buttons(\n qtbot, editable_layer\n):\n editable_layer.editable = True\n editable_layer.visible = False\n controls = make_layer_controls(qtbot, editable_layer)\n assert_no_edit_buttons_enabled(controls)\n\n editable_layer.visible = True\n\n assert_all_edit_buttons_enabled(controls)\n\n\ndef test_make_not_visible_when_editable_disables_edit_buttons(\n qtbot, editable_layer\n):\n editable_layer.editable = True\n editable_layer.visible = True\n controls = make_layer_controls(qtbot, editable_layer)\n assert_all_edit_buttons_enabled(controls)\n\n editable_layer.visible = False\n\n assert_no_edit_buttons_enabled(controls)\n\n\ndef test_make_editable_when_visible_enables_edit_buttons(\n qtbot, editable_layer\n):\n editable_layer.editable = False\n editable_layer.visible = True\n controls = make_layer_controls(qtbot, editable_layer)\n assert_no_edit_buttons_enabled(controls)\n\n editable_layer.editable = True\n\n assert_all_edit_buttons_enabled(controls)\n\n\ndef test_make_not_editable_when_visible_disables_edit_buttons(\n qtbot, editable_layer\n):\n editable_layer.editable = True\n editable_layer.visible = True\n controls = make_layer_controls(qtbot, editable_layer)\n assert_all_edit_buttons_enabled(controls)\n\n editable_layer.editable = False\n\n assert_no_edit_buttons_enabled(controls)\n\n\ndef make_layer_controls(qtbot, layer):\n QtLayerControlsType = layer_to_controls[type(layer)]\n controls = QtLayerControlsType(layer)\n qtbot.addWidget(controls)\n return controls\n\n\ndef assert_all_edit_buttons_enabled(controls) -> None:\n assert all(map(QAbstractButton.isEnabled, controls._EDIT_BUTTONS))\n\n\ndef assert_no_edit_buttons_enabled(controls) -> None:\n assert not any(map(QAbstractButton.isEnabled, controls._EDIT_BUTTONS))\n","sub_path":"napari/_qt/layer_controls/_tests/test_qt_layer_controls.py","file_name":"test_qt_layer_controls.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"511333725","text":"from tkinter import*\r\n\r\nroot = Tk()\r\n\r\n#creating a label widget\r\nmyLabel1= Label(root,text=\"helloworld!\")\r\nmyLabel2=Label(root,text=\"My name is SWETHA\")\r\nmyLabel3= Label(root,text=\" \")\r\n#showing it onto the screen\r\nmyLabel1.grid(row=0,column=0)\r\nmyLabel2.grid(row=1,column=5)\r\nmyLabel3.grid(row=1,column=1)\r\n\r\n\r\n\r\n\r\nroot.mainloop()","sub_path":"python codes/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364721615","text":"# ------------------------------------------------------------\n# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.\n# Copyright (c) 2016-2018, The TensorLayer contributors.\n#\n# Licensed under the BSD 2-Clause License.\n# You should have received a copy of the BSD 2-Clause License\n# along with the software. If not, See,\n#\n# \n#\n# ------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom dragon.core.framework import context\nfrom dragon.core.framework import workspace\nfrom dragon.core.util import nest\nfrom dragon.core.util import string\nfrom dragon.vm.tensorlayer.core import initializers\n\n\nclass LayerMetaclass(object):\n \"\"\"Meta class for layer like objects.\"\"\"\n\n def __init__(self, name=None):\n self._name = name\n self._all_weights = None\n self._trainable_weights = None\n self._nontrainable_weights = None\n self._nodes_fixed = False\n self._training = True\n\n @property\n def name(self):\n \"\"\"Return the layer name.\n\n Returns\n -------\n str\n The layer name.\n\n \"\"\"\n if self._name is None:\n self._init_set_name()\n return self._name\n\n @property\n def nontrainable_weights(self):\n \"\"\"Return the non-trainable weights.\n\n Returns\n -------\n Sequence[dragon.Tensor]\n The weights sequence.\n\n \"\"\"\n return self._nontrainable_weights\n\n @property\n def training(self):\n return self._training\n\n @training.setter\n def training(self, value):\n self._training = value\n\n @property\n def trainable_weights(self):\n \"\"\"Return the trainable weights.\n\n Returns\n -------\n Sequence[dragon.Tensor]\n The weights sequence.\n\n \"\"\"\n return self._trainable_weights\n\n def forward(self, inputs, **kwargs):\n \"\"\"Method to define the forward operations.\"\"\"\n pass\n\n def _init_set_name(self, name=None, zero_based=True):\n \"\"\"Set the model name when necessary.\"\"\"\n if name is None:\n self._name = workspace.get_workspace().unique_name(\n name=self.__class__.__name__.lower(),\n namespace='Object',\n zero_based=zero_based,\n )\n else:\n self._name = name\n\n def _fix_nodes(self):\n \"\"\"Fix layer nodes to stop growing.\"\"\"\n self._nodes_fixed = True\n\n\nclass Layer(LayerMetaclass):\n \"\"\"Represent a single layer of a neural network.\n\n It should be subclassed when implementing new types of layers:\n\n ```python\n class MyLayer(tl.layers.Layer):\n def __init__(name=None, act=None):\n super(MyLayer, self).__init__(name=name, act=act)\n ```\n\n \"\"\"\n\n def __init__(self, name=None, act=None, *args, **kwargs):\n \"\"\"Create a new ``Layer``.\n\n Parameters\n ----------\n name : str, optional.\n The layer name.\n act : str or function, optional\n The optional activation.\n\n \"\"\"\n super(Layer, self).__init__(name=name)\n self._built = False\n self._nodes = []\n self.act = act\n\n @staticmethod\n def _compute_shape(tensors):\n if isinstance(tensors, list):\n shape_mem = [t.shape for t in tensors]\n else:\n shape_mem = tensors.shape\n return shape_mem\n\n @property\n def all_weights(self):\n \"\"\"Return all the weights, both trainable and non-trainable.\n\n Returns\n -------\n Sequence[dragon.Tensor]\n The weights sequence.\n\n \"\"\"\n if self._all_weights is None:\n self._all_weights = []\n if self._trainable_weights is not None:\n self._all_weights.extend(self._trainable_weights)\n if self._nontrainable_weights is not None:\n self._all_weights.extend(self._nontrainable_weights)\n return self._all_weights\n\n def build(self, inputs_shape):\n \"\"\"Method to define the weights.\"\"\"\n self._built = True\n\n def _add_node(self, inputs, outputs):\n \"\"\"Add a layer node for inputs and outputs.\n\n Parameters\n ----------\n inputs : Sequence[dragon.Tensor]\n The input tensors.\n outputs : Sequence[dragon.Tensor]\n The output tensors.\n\n \"\"\"\n inputs = nest.flatten(inputs)\n outputs = nest.flatten(outputs)\n input_info = [getattr(e, '_info', [None, None]) for e in inputs]\n\n self._nodes.append(\n LayerNode(\n self,\n node_index=len(self._nodes),\n in_nodes=[e[0] for e in input_info],\n in_tensor_idxes=[e[1] for e in input_info],\n in_tensors=inputs,\n out_tensors=outputs,\n )\n )\n\n for idx, tensor in enumerate(outputs):\n tensor._info = (self._nodes[-1], idx)\n\n def _release_memory(self):\n \"\"\"\n WARINING: This function should be called with great caution.\n\n self.inputs and self.outputs will be set as None but not deleted in order to release memory.\n \"\"\"\n # FIXME : not understand why saving inputs/outputs shape\n for node in self._nodes:\n node.in_tensors = None\n node.out_tensors = None\n\n def _get_weights(\n self,\n name=None,\n shape=None,\n init=initializers.glorot_uniform(),\n trainable=True,\n ):\n \"\"\"Add a new weight into the layer.\"\"\"\n name = name if name else 'weights'\n shape = shape if shape is not None else []\n weight = init(shape=shape, trainable=trainable)\n weight._name = context.get_name_scope() + name\n if trainable is True:\n if self._trainable_weights is None:\n self._trainable_weights = []\n self._trainable_weights.append(weight)\n else:\n if self._nontrainable_weights is None:\n self._nontrainable_weights = []\n self._nontrainable_weights.append(weight)\n return weight\n\n def __call__(self, inputs, **kwargs):\n \"\"\"The preprocessor for ``self.forward(...)``.\"\"\"\n with context.name_scope(self.name):\n # Maybe build the layer at the first time.\n if not self._built:\n if isinstance(self, LayerList):\n self._input_tensors = inputs\n input_list = nest.flatten(inputs)\n input_shapes = None\n if all(hasattr(x, 'shape') for x in input_list):\n input_shapes = [x.shape for x in input_list]\n if not nest.is_sequence(inputs):\n input_shapes = input_shapes[0]\n self.build(input_shapes)\n # Call the forward implementation to get outputs.\n outputs = self.forward(inputs, **kwargs)\n\n # Record the nodes if necessary.\n if not self._nodes_fixed:\n self._add_node(inputs, outputs)\n\n return outputs\n\n def __delitem__(self, key):\n raise TypeError('The Layer API does not allow to use the method: `__delitem__`')\n\n def __repr__(self):\n return 'Layer'\n\n def __setitem__(self, key, item):\n raise TypeError('The Layer API does not allow to use the method: `__setitem__`')\n\n\nclass LayerNode(object):\n \"\"\"\n The class :class:`LayerNode` class represents a conceptional node for a layer.\n\n LayerNode is used for building static model and it is actually a light weighted\n wrapper over Layer. Specifically, it is used for building static computational graph\n (see _construct_graph() in tl.models.Model). In static model, each layer relates to\n one or more LayerNode, and the connection relationship between layers is built upon\n LayerNode. In addition, LayerNode eases layer reuse and weights sharing.\n\n Parameters\n ----------\n layer : tl.layers.Layer\n A tl layer that wants to create a node.\n node_index : int\n Index of this node in layer._nodes.\n in_nodes :a list of LayerNode\n Father nodes to this node.\n in_tensors : a list of tensors\n Input tensors to this node.\n out_tensors : a list of tensors\n Output tensors to this node.\n in_tensor_idxes : a list of int\n Indexes of each input tensor in its corresponding node's out_tensors.\n\n Methods\n ---------\n __init__()\n Initializing the LayerNode.\n __call__()\n (1) Forwarding through the layer. (2) Update its input/output tensors.\n \"\"\"\n\n def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors,\n in_tensor_idxes):\n \"\"\"\n\n Parameters\n ----------\n layer\n node_index\n in_nodes\n in_tensors\n out_tensors\n in_tensor_idxes\n \"\"\"\n self.layer = layer\n self.node_index = node_index\n self.in_nodes = in_nodes\n self.out_nodes = []\n self.in_tensors = in_tensors\n self.out_tensors = out_tensors\n self.name = layer.name + \"_node_{}\".format(node_index)\n\n self.in_tensors_idxes = in_tensor_idxes\n\n self.visited = False\n\n def __call__(self, inputs, **kwargs):\n \"\"\"(1) Forwarding through the layer. (2) Update its input/output tensors.\"\"\"\n outputs = self.layer.forward(inputs, **kwargs)\n self.in_tensors = nest.flatten(inputs)\n self.out_tensors = nest.flatten(outputs)\n return self.out_tensors\n\n\nclass LayerList(Layer):\n \"\"\"Layer to stack a group of layers.\"\"\"\n\n def __init__(self, layers, name=None):\n \"\"\"Create a ``LayerList``.\n\n Parameters\n ----------\n layers : Sequence[dragon.vm.tensorlayer.layers.Layer]\n The layers to stack.\n name : str, optional\n The layer name.\n\n \"\"\"\n super(LayerList, self).__init__(name=name)\n self._built = True\n self._all_layers = layers\n for layer in layers:\n if layer._built is False:\n self._built = False\n if layer._built and layer.all_weights is not None:\n if self._all_weights is None:\n self._all_weights = []\n self._all_weights.extend(layer.all_weights)\n\n def build(self, input_shapes):\n \"\"\"Build the layers sequentially.\"\"\"\n inputs = self._input_tensors\n for layer in self._all_layers:\n built = layer._built\n outputs = layer.__call__(inputs)\n if not built and layer.all_weights is not None:\n if self._all_weights is None:\n self._all_weights = []\n self._all_weights.extend(layer.all_weights)\n inputs = outputs\n\n def forward(self, inputs, *args, **kwargs):\n \"\"\"Forward the computation sequentially.\"\"\"\n outputs = inputs\n for layer in self._all_layers:\n outputs = layer.forward(outputs, *args, **kwargs)\n return outputs\n\n @Layer.training.setter\n def training(self, mode):\n \"\"\"Set training mode.\"\"\"\n self.training = mode\n for layer in self._all_layers:\n layer.training = mode\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n return LayerList(list(self._all_layers)[idx])\n else:\n return self._all_layers[idx]\n\n def __len__(self):\n return len(self._all_layers)\n\n def __repr__(self):\n tmp_str = 'LayerList' + '(\\n'\n for idx, layer in enumerate(self._all_layers):\n mod_str = layer.__repr__()\n mod_str = string.add_indent(mod_str, 2)\n tmp_str = tmp_str + ' (' + str(idx) + '): ' + mod_str + '\\n'\n tmp_str = tmp_str + ')'\n return tmp_str\n","sub_path":"tensorlayer/core/layers/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":11920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"598984400","text":"# -*- coding: utf-8 -*-\n\"\"\"\nA module to convert dictionary items to Pandas Dataframes.\n\nCreated on Fri Jul 19 09:55:00 2019\n\n@author: sdtaylor\n\"\"\"\n\n\nimport pandas as pd\n\ndef perform_conversion(dictionary_to_convert):\n \"\"\"\n Function to convert the dictionary values, formatted as dictionaries\n to Pandas Dataframes.\n \n Parameters\n ----------\n dictionary_to_convert : dict\n dictionary of the sheets (keys) and the associated data to fill the \n rows as a dictionary where keys are header names and values are the\n row values.\n \n Returns\n -------\n dictionary_converted : dict\n dictionary where keys are the sheet names of the excel sheet and\n the values are Pandas Dataframes.\n \"\"\"\n \n # print dictionary to command line\n print(dictionary_to_convert)\n \n dictionary_converted = dict()\n \n # convert to dataframe for export to excel\n for key, value in dictionary_to_convert.items():\n dictionary_converted[key] = pd.DataFrame.from_dict(value)\n \n return dictionary_converted\n","sub_path":"Analyze/convert_to_dataframe.py","file_name":"convert_to_dataframe.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"542897359","text":"\"\"\"\nSYS-611: Buffon's Needle Experiment Example with Antithetic Variables.\n\nThis example performs a Monte Carlo simulation of Buffon's Needle Experiment\nto estimate the probability of a needle of certain length crossing lines\non a floor with certain spacing. This probability is proportional to the\nmathematical constant pi.\n\n@author: Paul T. Grogan \n\"\"\"\n\n# import the python3 behavior for importing, division, and printing in python2\nfrom __future__ import absolute_import, division, print_function\n\n# import the matplotlib pyplot package and refer to it as `plt`\n# see http://matplotlib.org/api/pyplot_api.html for documentation\nimport matplotlib.pyplot as plt\n\n# import the scipy stats package and refer to it as `stats`\n# see http://docs.scipy.org/doc/scipy/reference/stats.html for documentation\nimport scipy.stats as stats\n\n# import the numpy package and refer to it as `np`\n# see http://docs.scipy.org/doc/numpy/reference/ for documentation\nimport numpy as np\n\n# define the line width and needle length for buffon's experiment\nline_width = 3.0\nneedle_length = 2.5\n\n# define a process generator for the event if a needle crosses a line\ndef drop_needle():\n r_1 = np.random.rand()\n r_2 = np.random.rand()\n # generate distance between needle centroid and nearest line from uniform \n # distribution between 0 and line_width/2\n d_1 = r_1*line_width/2\n d_2 = (1-r_1)*line_width/2\n # generate acute angle between needle and line from uniform distribution\n # between 0 and pi/2 radians\n theta_1 = r_2*np.pi/2\n theta_2 = (1-r_2)*np.pi/2\n \n # for each antithetic variable, record 1 if d < needle_length/2*sin(theta)\n # otherwise record 0\n x_1 = 1 if d_1 < needle_length/2*np.sin(theta_1) else 0\n x_2 = 1 if d_2 < needle_length/2*np.sin(theta_2) else 0\n # return the average of the two antithetic variables\n return (x_1+x_2)/2.\n\n# set the random number generator seed to 0\nnp.random.seed(0)\n\n# generate 850 samples\nsamples = [drop_needle() for i in range(850)]\n\n# compute the lower and upper-bounds using a 95% confidence interval\nconfidence_level = 0.05\nz_crit = stats.norm.ppf(1-confidence_level/2)\n\nprint('P(X) = {:.3f} +/- {:.3f} (95% CI)'.format(\n np.average(samples),\n z_crit*stats.sem(samples)\n ))\n\n# compute the exact solution, as solved by calculus\nsolution = 2*needle_length/(line_width*np.pi)\n\n# compute running statistics for mean and confidence interval\nmean_estimate = np.array([np.average(samples[0:i]) for i in range(len(samples))])\nconfidence_int = z_crit*np.array([stats.sem(samples[0:i]) for i in range(len(samples))])\n\n# create a plot to show the mean estimate with 95% confidence interval bounds\nplt.figure()\nplt.plot(range(len(samples)), mean_estimate, \n 'b', label='Mean Estimate')\nplt.plot(range(len(samples)), mean_estimate-confidence_int, \n 'g', label='95% CI Lower Bound')\nplt.plot(range(len(samples)), mean_estimate+confidence_int, \n 'r', label='95% CI Upper Bound')\nplt.plot([0, len(samples)], [solution, solution], \n '-k', label='Analytical Solution')\nplt.xlabel('Sample')\nplt.ylabel('Estimate of $P(x)$')\nplt.legend(loc='best')\n\n#%%\n\n# transform the mean estimate to estimate pi using the solution form\npi_estimate = 2*needle_length/(line_width*mean_estimate)\npi_lower_bound = 2*needle_length/(line_width*(mean_estimate+confidence_int))\npi_upper_bound = 2*needle_length/(line_width*(mean_estimate-confidence_int))\n\nprint('pi = {:.3f} +/- {:.3f} (95% CI)'.format(\n pi_estimate[-1],\n pi_upper_bound[-1] - pi_estimate[-1]\n ))\n\n# create a plot to show the pi estimate with 95% confidence interval bounds\nplt.figure()\nplt.plot(range(len(samples)), pi_estimate, \n 'b', label='Mean Estimate')\nplt.plot(range(len(samples)), pi_lower_bound, \n 'g', label='95% CI Lower Bound')\nplt.plot(range(len(samples)), pi_upper_bound, \n 'r', label='95% CI Upper Bound')\nplt.plot([0, len(samples)], [np.pi, np.pi], \n '-k', label='Analytical Solution')\nplt.xlabel('Sample')\nplt.ylabel('Estimate of $\\pi$')\nplt.legend(loc='best')","sub_path":"previous/week4/buffonsNeedleAntithetic.py","file_name":"buffonsNeedleAntithetic.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"192125975","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 29 09:58:03 2020\n\n@author: sowja\n\"\"\"\n# python code to demonstrate working of reduce() \n\n\nfrom tqdm import tqdm, tqdm_notebook\ntqdm_notebook().pandas()\n# importing functools for reduce() \nfrom functools import reduce \nimport pandas as pd\n\ndf = pd.DataFrame({'c1': [1,2,3,4,5,1,2,3,4], 'year': [2016, 2017,2016,2017,2016,2015,2014,2016,2015],\n 'Team': ['MI', 'MI','CSK','CSK','KKR','DC','DC','DDD','KKR'],'c3': [5, 6,3,4,5,6,2,1,10]})\n\ngroups=df.groupby(['year','Team'])\n\nno2015 = groups.filter(lambda x: x['c3'] > 4)\n\ndf = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n'foo', 'bar'],\n'B' : [1, 2, 3, 4, 5, 6],\n'C' : [2.0, 5., 8., 1., 2., 9.]})\ngrouped = df.groupby('A')\ngrouped.filter(lambda x: x['B'].mean() > 3.)\n\nprint(df)\ndf['NewA'] = df.apply(lambda x: x['A']+'bhargav I am alright',axis=1)\n\nprint(df)\n\n#we want to filter those rows where the number of words in the NewA is greater than or equal \n#to than 4.\n\n#new_df = df[len(df['NewA'].split(\" \"))>=4]\n#This will give an error, since there is no split attribute in series.\n\n#One way is to first create a column which contains no of words \n#in the title using apply and then filter on that column.\n#create a new column\ndf['num_words'] = df.apply(lambda x : len(x['NewA'].split(\" \")),axis=1)\n\n#simple filter on new column\nnew_df = df[df['num_words']>=4]\n\n#I would rather prefer\nnew_df = df[df.apply(lambda x : len(x['NewA'].split(\" \"))>=4,axis=1)]\n\n#To show progress of apply\nnew_df = df[df.progress_apply(lambda x : len(x['NewA'].split(\" \"))>=4,axis=1)]\n\n#Incase you need to deal with Price which has , like 13,000\n#df['Price'] = df.apply(lambda x: int(x['Price'].replace(',', '')),axis=1)\n\n","sub_path":"MyCode/LambdaFunctionsIllustrated.py","file_name":"LambdaFunctionsIllustrated.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"385472262","text":"import os\nfrom string import ascii_lowercase as letters\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nimport InterruptionAnalysis as ia\n\nimport Dyadic\n\nnp.random.seed(12345)\n\n# import reference data and convert time step to 1/10th second\ndata = pd.read_csv('./data/timeseries.csv', index_col = 0)\nnumeric_cols = ['begin', 'end', 'dur', 'lat']\nfor col in numeric_cols:\n data[col] = data[col]/100\n\n# sim parameters for all sims\nnsims = 100\ngIDs = pd.unique(data[\"gID\"])\n\n# prepare subdirectory structure\n# separate sims into subdirectories to speed file lookup\n# if mimic-groups is level 1, there will be `nsims` level 2 directories with 33 level 3 directories each\nsavepath = \"./data/simulations/synthetic-groups-dyadic\"\nif not os.path.isdir(savepath):\n os.mkdir(savepath)\n\n# prepare probability distributions for p and q\ndists = pd.read_csv(\"./data/fits.csv\", index_col = 0)\n\ndist_choice = \"theory\"\nif dist_choice == \"theory\":\n d1 = dists[(dists[\"transition\"] == \"AB\") & (dists[\"dist\"] == \"weibull_min\")]\n d2 = dists[(dists[\"transition\"] == \"BA\") & (dists[\"dist\"] == \"lognorm\")]\n chosen_dists = pd.concat([d1, d2])\nelif dist_choice == \"beta\":\n chosen_dists = dists[dists[\"dist\"] == \"beta\"]\nelse:\n chosen_dists = dists.loc[dists.groupby(\"transition\")[\"ΔAIC\"].idxmin()]\n\nchosen_dists.set_index(\"transition\", inplace = True)\nfitrows = [\"AB\", \"BA\"]\nfits = {}\nfor row in fitrows:\n dist = getattr(stats, chosen_dists.loc[row, \"dist\"])\n args = [chosen_dists.loc[row, \"arg1\"], chosen_dists.loc[row, \"arg2\"]]\n arg = [a for a in args if ~np.isnan(a)]\n loc = chosen_dists.loc[row, \"loc\"]\n scale = chosen_dists.loc[row, \"scale\"]\n if arg:\n fit = dist(loc = loc, scale = scale, *arg)\n else:\n fit = dist(loc = loc, scale = scale)\n fits[row] = fit\n \n# main loop\n# The number of sims here means how many times through simulating each group once\nfor sim in range(nsims):\n print(sim)\n\n # prepare subdirectory for this sim\n simpath = savepath + f\"/sim{sim:03d}\" # sim number with leading zeros\n if not os.path.isdir(simpath):\n os.mkdir(simpath)\n \n # loop through the groups\n for gID in gIDs:\n print(gID)\n # filename for this gID\n filename = f\"{simpath}/{gID}.csv\"\n\n # set parameters for this group\n pIDs = pd.unique(data[data[\"gID\"] == gID][\"pID\"])\n T = round(data[data[\"gID\"] == gID][\"end\"].max())\n N = len(pIDs)\n ns = list(range(N))\n\n p_i = fits[\"AB\"].rvs(size = N)\n q_i = fits[\"BA\"].rvs(size = N)\n P_i = [\n np.array([[1 - p, p],\n [q, 1 - q]]) for p, q in zip(p_i, q_i)\n ]\n\n # run sim itself\n Y = Dyadic.simulation(P_i, T, N, ns, scale = 1e-5)\n X = ia.Y_to_X(Y, ns)\n \n # clean up data frame for later ease of use\n X[\"gID\"] = gID\n\n # save this sim in the right place\n X.to_csv(f\"{filename}\")\n","sub_path":"generate-synthetic-groups-dyadic.py","file_name":"generate-synthetic-groups-dyadic.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"516697355","text":"# -*- coding:utf-8 -*-\n\nimport sys\nimport itchat\nimport logging\nimport threading\nimport collections\nimport re\n\n\ncontext = threading.local()\ncontext.msg = None\n\nmsg = context.msg\n\n\nclass ChatRobot:\n nick_name = \"chatrobot\"\n user_name = \"\"\n\n def __init__(self, conf=None):\n \"\"\"\n init methods.\n initialize listen rule, there are three element in it, `onechat`, `groupchat` and\n `mechat`, onechat means private chat, groupchat means a chatroom, mechat means self\n word content.All the rules defined will store in this dict, and in order to reduce \n code logic to set these three value as defaultdict.\n\n login wechat client.it set hotReload as True, so you can login without scan QR image\n agin and agin.\n\n get your information such as nick_name and user_name, nick name is different from user_name\n refer from itchat document and itchat support using user_name to search user information.\n\n initialize logger module.chatbot use python `logging` module to note the important data.\n\n initialize chat context.Chat context store the message object and it's relative independence\n in different threading.\n \"\"\"\n # listen_rule\n # store your listen rules\n # you can add new rule by using `listen` methods or `add_listen_rule` method\n self.listen_rule = {\n \"onechat\": collections.defaultdict(list),\n \"groupchat\": collections.defaultdict(list),\n \"mechat\": collections.defaultdict(list)\n }\n\n # login to wechat client\n if conf is not None:\n login_conf = conf.get('login_conf', {})\n else:\n login_conf = {}\n hot_reload = login_conf.get('hotReload', False)\n status_storage_dir = login_conf.get('statusStorageDir', 'chatbot.pkl')\n enable_cmd_qr = login_conf.get('enableCmdQR', False)\n pic_dir = login_conf.get('picDir', None)\n qr_callback = login_conf.get('qr_callback', None)\n login_callback = login_conf.get('loginCallback', None)\n exit_callback = login_conf.get('exitCallback', None)\n itchat.auto_login(\n hotReload=hot_reload,\n statusStorageDir=status_storage_dir,\n enableCmdQR=enable_cmd_qr,\n picDir=pic_dir,\n qrCallback=qr_callback,\n loginCallback=login_callback,\n exitCallback=exit_callback)\n\n # initialize self information\n # itchat provide `search_friends` methods to search user information by user name\n # if no user name support it return your own infomation, it is useful so save it.\n me = itchat.search_friends()\n self.nick_name = me['nick_name']\n self.user_name = me['user_name']\n\n # initialize logger module\n # it's important to log while the program is running, chatbot use logging module to\n # log the important data, and it send to stout device\n # TODO: log configurable\n if conf is not None:\n logger_conf = conf.get('logger_conf', {})\n else:\n logger_conf = {}\n level = logger_conf.get('level', 'DEBUG')\n log_format = logger_conf.get('format', '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n name = logger_conf.get('name', __name__)\n path = logger_conf.get('path', None)\n\n if level.upper() == \"INFO\":\n level = logging.INFO\n elif level.upper() == \"WARNING\":\n level = logging.WARNING\n elif level.upper() == \"ERROR\":\n level = logging.ERROR\n elif level.upper() == \"FATAL\":\n level = logging.FATAL\n else:\n level = logging.DEBUG\n\n logging.basicConfig(level=level, format=log_format, filename=path)\n self.logger = logging.getLogger(name)\n\n def add_listen_rule(self, key_word, handler, is_one=True, is_self=False, is_group=False, is_at=False, nick_name=None):\n \"\"\"\n add_listen_rule\n add a listen rule to chatbot.\n \"\"\"\n listen_rule = self.listen_rule\n\n rules_box = []\n if is_self:\n rules_box.append(listen_rule[\"mechat\"])\n if is_group:\n rules_box.append(listen_rule[\"groupchat\"])\n if is_one:\n rules_box.append(listen_rule[\"onechat\"])\n\n for rules in rules_box:\n rule = {\n \"handler\": handler,\n \"handlerName\": handler.__name__,\n \"is_at\": is_at\n }\n if nick_name is not None:\n rule['nick_name'] = nick_name\n rules[key_word].append(rule)\n\n def listen(self, key_word, is_one=False, is_self=False, is_group=False, is_at=False, nick_name=None):\n \"\"\"\n add listen rule by decorator\n \"\"\"\n if not is_one and not is_self and not is_group:\n is_one = True\n\n def decorator(f):\n self.add_listen_rule(key_word, f, is_one, is_self, is_group, is_at, nick_name)\n return f\n\n return decorator\n\n @staticmethod\n def get_from_user_name(msg, is_group_chat=False):\n \"\"\"\n get msg sender nick_name\n \"\"\"\n if is_group_chat:\n return msg['Actualnick_name'].encode()\n\n friend = itchat.search_friends(user_name=msg[\"from_user_name\"])\n if friend is None:\n return \"未知\"\n else:\n return friend['nick_name']\n\n def get_group_selfname(self, msg):\n \"\"\"\n get your nick_name in a centain group\n \"\"\"\n if msg.get('User').has_key('Self') and msg['User']['Self']['DisplayName'].encode() != '':\n return msg['User']['Self']['DisplayName'].encode()\n else:\n return self.nick_name\n\n def _get_rules(self):\n \"\"\"\n get the rules base on context.\n \"\"\"\n global context\n msg = context.msg\n\n text = msg[\"Text\"].encode()\n if context.is_at:\n prefix = '@' + self.get_group_selfname(msg) + ' '\n text = text.replace(prefix, '')\n self.logger.debug('关键词: ({})'.format(text))\n\n rules = []\n aim_rules = None\n if context.from_user_nick_name == self.nick_name:\n self.logger.debug('检索个人规则词表')\n aim_rules = self.listen_rule['mechat']\n elif context.is_group_chat:\n self.logger.debug('检索群聊规则词表')\n aim_rules = self.listen_rule[\"groupchat\"]\n else:\n self.logger.debug('检索私聊规则词表')\n aim_rules = self.listen_rule[\"onechat\"]\n\n for key, value in aim_rules.items():\n key_com = re.compile(key)\n if sys.version_info.major < 3 and key_com.match(text):\n rules.extend(value)\n elif sys.version_info.major == 3 and key_com.match(text.decode()):\n rules.extend(value)\n return rules\n\n def _handler_one_rule(self, rule):\n \"\"\"\n running a handler rule\n \"\"\"\n self.logger.info(\"触发处理函数: {}\".format(rule['handlerName']))\n global context\n msg = context.msg\n\n if not context.is_group_chat:\n rule['is_at'] = False\n\n if rule['is_at'] == context.is_at and rule.get('nick_name', context.from_user_nick_name) == context.from_user_nick_name:\n handler = rule['handler']\n content = handler()\n\n if type(content) == type(str()):\n self.logger.debug(\"返回信息: {}\".format(content))\n msg.User.send(content)\n elif type(content) == type(tuple()):\n t, arg = content\n if t == \"text\":\n self.logger.debug(\"返回信息: {}\".format(arg))\n msg.User.send(arg)\n elif t == \"image\":\n self.logger.debug(\"返回图片: {}\".format(arg))\n msg.User.send_image(arg)\n else:\n self.logger.debug(\"未支持返回类型: {}\".format(t))\n else:\n self.logger.warning(\"处理函数返回格式错误,错误类型: {}\".format(str(type(content))))\n else:\n self.logger.info(\"处理函数配置项匹配失败\")\n if rule['is_at'] != context.is_at:\n self.logger.debug(\"群聊@属性不匹配\")\n self.logger.debug(\"{} != {}\".format(str(rule['is_at']), str(context.is_at)))\n if rule.get('nick_name', context.from_user_nick_name) != context.from_user_nick_name:\n self.logger.debug(\"对象昵称不匹配\")\n self.logger.debug(\n \"{} != {}\".format(rule.get('nick_name', context.from_user_nick_name), context.from_user_nick_name))\n\n def _handler_diliver(self, msg, is_group_chat):\n \"\"\"\n while msg is comming, check it and return\n \"\"\"\n global context\n context.msg = msg\n context.is_group_chat = is_group_chat\n context.is_at = msg.get('is_at', False)\n context.from_user_nick_name = self.get_from_user_name(msg)\n\n rules = self._get_rules()\n\n self.logger.info(\"触发规则: {} 条\".format(len(rules)))\n\n for rule in rules:\n self._handler_one_rule(rule)\n\n def run(self):\n \"\"\"\n run chatbot\n \"\"\"\n\n @itchat.msg_register(itchat.content.TEXT)\n def trigger_chatone(msg):\n from_user_name = self.get_from_user_name(msg)\n text = msg['Text'].encode()\n self.logger.info('(普通消息){}: {}'.format(from_user_name, text))\n\n t = threading.Thread(target=self._handler_diliver, args=(msg, False))\n t.setDaemon(True)\n t.start()\n\n @itchat.msg_register(itchat.content.TEXT, is_group_chat=True)\n def trigger_chatgroup(msg):\n from_user_name = self.get_from_user_name(msg, is_group_chat=True)\n text = msg['Text'].encode()\n self.logger.info('(群消息){}: {}'.format(from_user_name, text))\n\n t = threading.Thread(target=self._handler_diliver, args=(msg, True))\n t.setDaemon(True)\n t.start()\n\n itchat.run()\n","sub_path":"src/chatrobot.py","file_name":"chatrobot.py","file_ext":"py","file_size_in_byte":10254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"21003240","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 13 12:32:22 2019\n\n@author: nebelgrau\n\"\"\"\n\n# Dice Cup\n\ndice1, dice2 = map(int, input().split())\n\noutcomes = [d1+d2 for d1 in range(1,dice1+1) for d2 in range(1,dice2+1)]\n#all possible combinations between the dice\n\ncombos = set(outcomes)\n#each just once\n\nprobabilities = {}\n\n# check if there's already a combo with a given probability(count)\n# if not, create it, otherwise append the combo to the list\n\nfor combo in combos:\n if outcomes.count(combo) in probabilities.keys():\n probabilities[outcomes.count(combo)].append(combo)\n else:\n probabilities[outcomes.count(combo)] = [combo]\n \nbest_result = max(probabilities.keys())\n\nfor _ in probabilities[best_result]:\n print(_)","sub_path":"DiceCup.py","file_name":"DiceCup.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"160768900","text":"import tensorflow as tf\n\n\nclass TBSummariser:\n\n def __init__(self, scalar_names):\n self.placeholders = {name: tf.placeholder(dtype=tf.float32, name=name) for name in scalar_names}\n self.scalars = {name: tf.summary.scalar(name, placeholder) for name, placeholder in self.placeholders.items()}\n\n self.merged = tf.summary.merge([v for v in self.scalars.values()])\n\n def summarise(self, sess, scalar_values):\n\n feed_dict = {\n self.placeholders[name]: scalar_values[name] for name in self.placeholders\n }\n return sess.run(self.merged, feed_dict=feed_dict)\n","sub_path":"rlpoker/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"263278002","text":"# Teil 1: Optionen\n\ninclude_dateien = [ # wo wird nach .h Dateien gesucht?\n '.' # im lokalen Verzeichnis\n]\n\nshared_libraries = [\n 'm' # mathematische Funktionen der Standardbibliothek\n]\n\ncpp_standard = [ # Angaben zum Standard\n '-std=c++98', # ISO/IEC Standard 14882\n '-pedantic' # keine Erweiterungen zulaessig\n]\n\nwarnungen = [ # Einstellungen zu Fehlern\n '-Wall', # viel meckern\n '-Wextra', # extra viel meckern\n '-Werror' # Warnungen sind Fehler\n]\n\n# Teil 2: Definition des Environments\n\nstd = Environment(\n CXXFLAGS = cpp_standard + warnungen,\n CPPPATH = include_dateien,\n LIBS = shared_libraries\n)\n\n# Teil 3: Programm bauen\n\ncpp_dateien = [\n 'square.cpp',\n 'console_input.cpp',\n 'file_controller.cpp',\n 'magic_square_set.cpp',\n 'magic_square.cpp',\n 'main.cpp',\n]\n\nprogramm_name = 'Magic'\n\nstd.Program(programm_name, cpp_dateien) # Programm bauen\n","sub_path":"aufgabe_9/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"221617402","text":"from bs4 import BeautifulSoup\nimport requests\nfrom PIL import Image, ImageChops\nimport numpy as np\n\ndef equal(im1, im2):\n return ImageChops.difference(im1, im2).getbbox() is None\n\ndef is_new(img):\n if len(imgs) == 0:\n imgs.append(img)\n else:\n dup = False\n for prev_img in imgs:\n bol = equal(prev_img, img)\n if bol:\n dup = True\n if not dup:\n imgs.append(img)\n return not dup\n else:\n return dup\n\ncounter = 0\nimgs = []\n\nfor i in range(30):\n source = requests.get('https://www.stockvault.net/c/animals/cats-and-dogs/?s=l&p={}'.format(i)).text\n \n soup = BeautifulSoup(source, 'lxml')\n \n page_wrap = soup.find_all('div', class_='clearfix', id='wrapper')\n \n content = page_wrap[0].find('div', id='content')\n \n rows = content.find_all('div', class_='section nomargin nopadding')\n \n for row in rows:\n items = row.find_all('div', class_='item')\n for item in items:\n # print(item.a.img)\n image = item.a.img\n image_url = image['src']\n\n try:\n img = Image.open(requests.get(image_url, stream=True).raw)\n except:\n pass\n img_name = 'image_{}.jpg'.format(counter)\n if is_new(img):\n img.save('Images/{}'.format(img_name))\n counter += 1\n print(counter)\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"628369269","text":"from PIL import Image\nimport json\nimport sys\n\nDataPath = 'board.json'\ncol=[\n [0, 0, 0],\n [255, 255, 255],\n [170, 170, 170],\n [85, 85, 85],\n [254, 211, 199],\n [255, 196, 206],\n [250, 172, 142],\n [255, 139, 131],\n [244, 67, 54],\n [233, 30, 99],\n [226, 102, 158],\n [156, 39, 176],\n [103, 58, 183],\n [63, 81, 181],\n [0, 70, 112],\n [5, 113, 151],\n [33, 150, 243],\n [0, 188, 212],\n [59, 229, 219],\n [151, 253, 220],\n [22, 115, 0],\n [55, 169, 60],\n [137, 230, 66],\n [215, 255, 7],\n [255, 246, 209],\n [248, 203, 140],\n [255, 235, 59],\n [255, 193, 7],\n [255, 152, 0],\n [255, 87, 34],\n [184, 63, 39],\n [121, 85, 72]\n]\nif len(sys.argv)<5:\n print(\"transform.py 'add'/'create' ImagePath LeftTopX LeftTopY\")\n exit(0)\nType = sys.argv[1]\nImagePath = sys.argv[2]\nX = int(sys.argv[3])\nY = int(sys.argv[4])\nSpeedType = 0\n# print(Type, ImagePath, X, Y)\nif Type != 'add' and Type != 'create':\n print(\"transform.py ImagePath add/create\")\n exit(0)\nif Type == 'add':\n Type = 0\nelse:\n Type = 1\nif len(sys.argv) >= 6 and sys.argv[5] == 'speed':\n SpeedType = 1\noldimage = Image.open(ImagePath)\n\ncoldata = []\nLength = len(col)\nfor i in range(Length):\n coldata.append(col[i][0])\n coldata.append(col[i][1])\n coldata.append(col[i][2])\n\n# 转换为board.json\ndef toboard(im):\n size = im.size\n src = im.load()\n if Type == 0:\n try:\n with open(DataPath,'r') as boardjson:\n board = json.load(boardjson)\n except:\n board = []\n else:\n board = []\n for x in range(size[0]):\n for y in range(size[1]):\n if SpeedType == 1 and src[x,y] ==2:\n continue\n board.append([x + X, y + Y, src[x,y]])\n board = json.dumps(board)\n with open(DataPath,'w+') as f:\n f.write(board)\n\n\n# 抖动算法\ndef method1():\n palimage = Image.new('P', oldimage.size)\n palimage.putpalette(coldata * (int)(256/Length))\n newimage = oldimage.quantize(palette=palimage)\n toboard(newimage)\n\ndef quantizetopalette(silf, palette, dither=False):\n \"\"\"Convert an RGB or L mode image to use a given P image's palette.\"\"\"\n\n silf.load()\n\n # use palette from reference image\n palette.load()\n if palette.mode != \"P\":\n raise ValueError(\"bad mode for palette image\")\n if silf.mode != \"RGB\" and silf.mode != \"L\":\n raise ValueError(\n \"only RGB or L mode images can be quantized to a palette\"\n )\n im = silf.im.convert(\"P\", 1 if dither else 0, palette.im)\n # the 0 above means turn OFF dithering\n\n # Later versions of Pillow (4.x) rename _makeself to _new\n try:\n return silf._new(im)\n except AttributeError:\n return silf._makeself(im)\n\n# 非抖动算法\ndef method2():\n palimage = Image.new('P', oldimage.size)\n palimage.putpalette(coldata * (int)(256/Length))\n newimage = quantizetopalette(oldimage, palimage, dither=False)\n toboard(newimage)\n\nmethod1()\n\n# ↓效果较差,勿用↓\n# method2()","sub_path":"data/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"160322139","text":"from setuptools import find_packages\n\nfrom distutils.core import setup\n\nversion = '1.0.0'\n\nsetup(\n name = \"user_profile\",\n version = \"1.0.0\",\n packages = ['user_profile'],\n author = \"vahid chakoshy\",\n author_email = \"vchakoshy@gmail.com\",\n description = \"user profile for pinterest apllication like in Django\",\n url = \"http://www.wisgoon.com/\",\n py_modules = [\"user_rofile\"],\n #package_dir = {},\n package_dir = {'user_rofile': 'user_rofile'},\n include_package_data = True,\n zip_safe=False,\n)\n","sub_path":"pypi_install_script/user_profile-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"452743675","text":"import matplotlib.pyplot as plt\nimport csv\nimport matplotlib.dates as dates\nimport datetime as dt\nimport matplotlib.dates as mdates\n\ndate = []\nMACD = []\nSignal = []\nwith open('project/btc.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for i in range(0,34):\n next(data)\n for row in data:\n date_str=row[1]\n date_str=date_str.split()[0]\n# print date_str\n date.append(date_str)\n MACD.append(float(row[5]))\n Signal.append(float(row[6]))\n\n\nf=plt.figure(figsize=(12,6))\n\n\nx = [dt.datetime.strptime(d,'%m/%d/%y').date() for d in date]\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%y'))\nplt.gca().xaxis.set_major_locator(mdates.MonthLocator())\nplt.plot(x,MACD,label='MACD')\nplt.plot(x, Signal,label='Signal')\nplt.xlabel('date')\nplt.ylabel('EMA of price')\nplt.title('trending', fontsize=40)\nplt.legend()\nplt.show()\nf.savefig(\"trending_month.png\", bbox_inches='tight')\n\ndate = []\nprice = []\nwith open('project/btc.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n next(data)\n for row in data:\n date_str=row[1]\n date_str=date_str.split()[0]\n date.append(date_str)\n price.append(float(row[2]))\nf=plt.figure(figsize=(12,6))\nx = [dt.datetime.strptime(d,'%m/%d/%y').date() for d in date]\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%y'))\nplt.gca().xaxis.set_major_locator(mdates.MonthLocator())\nplt.plot(x,price,label='price')\nplt.xlabel('date')\nplt.ylabel('price')\nplt.title('price of BITCOIN', fontsize=40)\nplt.legend()\nplt.show()\nf.savefig(\"price_bitcoin.png\", bbox_inches='tight')","sub_path":"trend.py","file_name":"trend.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"184573003","text":"\"\"\"Interfaces with multimatic sensors.\"\"\"\n\nfrom __future__ import annotations\n\nimport datetime\nimport logging\n\nfrom pymultimatic.model import EmfReport, Report\n\nfrom homeassistant.components.sensor import (\n DEVICE_CLASS_ENERGY,\n DEVICE_CLASS_PRESSURE,\n DEVICE_CLASS_TEMPERATURE,\n DOMAIN,\n STATE_CLASS_MEASUREMENT,\n STATE_CLASS_TOTAL_INCREASING,\n SensorEntity,\n)\nfrom homeassistant.const import ENERGY_WATT_HOUR, TEMP_CELSIUS\nfrom homeassistant.helpers.typing import StateType\nfrom homeassistant.util.dt import utc_from_timestamp\n\nfrom .const import EMF_REPORTS, OUTDOOR_TEMP, REPORTS\nfrom .coordinator import MultimaticCoordinator\nfrom .entities import MultimaticEntity\nfrom .utils import get_coordinator\n\n_LOGGER = logging.getLogger(__name__)\n\nUNIT_TO_DEVICE_CLASS = {\n \"bar\": DEVICE_CLASS_PRESSURE,\n \"ppm\": \"\",\n \"°C\": DEVICE_CLASS_TEMPERATURE,\n}\n\n\nasync def async_setup_entry(hass, entry, async_add_entities):\n \"\"\"Set up the multimatic sensors.\"\"\"\n sensors = []\n outdoor_temp_coo = get_coordinator(hass, OUTDOOR_TEMP, entry.unique_id)\n reports_coo = get_coordinator(hass, REPORTS, entry.unique_id)\n emf_reports_coo = get_coordinator(hass, EMF_REPORTS, entry.unique_id)\n\n if outdoor_temp_coo.data:\n sensors.append(OutdoorTemperatureSensor(outdoor_temp_coo))\n\n if reports_coo.data:\n sensors.extend(ReportSensor(reports_coo, report) for report in reports_coo.data)\n\n if emf_reports_coo.data:\n sensors.extend(\n EmfReportSensor(emf_reports_coo, report) for report in emf_reports_coo.data\n )\n\n _LOGGER.info(\"Adding %s sensor entities\", len(sensors))\n\n async_add_entities(sensors)\n return True\n\n\nclass OutdoorTemperatureSensor(MultimaticEntity, SensorEntity):\n \"\"\"Outdoor temperature sensor.\"\"\"\n\n def __init__(self, coordinator: MultimaticCoordinator) -> None:\n \"\"\"Initialize entity.\"\"\"\n super().__init__(coordinator, DOMAIN, \"outdoor_temperature\")\n\n @property\n def native_value(self) -> StateType:\n \"\"\"Return the state of the entity.\"\"\"\n return self.coordinator.data\n\n @property\n def available(self):\n \"\"\"Return True if entity is available.\"\"\"\n return super().available and self.coordinator.data is not None\n\n @property\n def native_unit_of_measurement(self) -> str | None:\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\n return TEMP_CELSIUS\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the entity.\"\"\"\n return \"Outdoor temperature\"\n\n @property\n def device_class(self) -> str:\n \"\"\"Return the class of this device, from component DEVICE_CLASSES.\"\"\"\n return DEVICE_CLASS_TEMPERATURE\n\n @property\n def state_class(self) -> str | None:\n \"\"\"Return the state class of this entity.\"\"\"\n return STATE_CLASS_MEASUREMENT\n\n\nclass ReportSensor(MultimaticEntity, SensorEntity):\n \"\"\"Report sensor.\"\"\"\n\n def __init__(self, coordinator: MultimaticCoordinator, report: Report) -> None:\n \"\"\"Init entity.\"\"\"\n MultimaticEntity.__init__(self, coordinator, DOMAIN, report.id)\n self._report_id = report.id\n self._unit = report.unit\n self._name = report.name\n self._class = UNIT_TO_DEVICE_CLASS.get(report.unit, None)\n self._device_name = report.device_name\n self._device_id = report.device_id\n\n @property\n def report(self):\n \"\"\"Get the current report based on the id.\"\"\"\n return next(\n (\n report\n for report in self.coordinator.data\n if report.id == self._report_id\n ),\n None,\n )\n\n @property\n def native_value(self) -> StateType:\n \"\"\"Return the state of the entity.\"\"\"\n return self.report.value\n\n @property\n def available(self):\n \"\"\"Return True if entity is available.\"\"\"\n return super().available and self.report is not None\n\n @property\n def native_unit_of_measurement(self) -> str | None:\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\n return self._unit\n\n @property\n def device_info(self):\n \"\"\"Return device specific attributes.\"\"\"\n return {\n \"identifiers\": {(DOMAIN, self._device_id)},\n \"name\": self._device_name,\n \"manufacturer\": \"Vaillant\",\n \"model\": self.report.device_id,\n }\n\n @property\n def state_class(self) -> str | None:\n \"\"\"Return the state class of this entity, from STATE_CLASSES, if any.\"\"\"\n return STATE_CLASS_MEASUREMENT\n\n @property\n def device_class(self) -> str | None:\n \"\"\"Return the class of this device, from component DEVICE_CLASSES.\"\"\"\n return self._class\n\n @property\n def name(self) -> str | None:\n \"\"\"Return the name of the entity.\"\"\"\n return self._name\n\n\nclass EmfReportSensor(MultimaticEntity, SensorEntity):\n \"\"\"Emf Report sensor.\"\"\"\n\n def __init__(self, coordinator: MultimaticCoordinator, report: EmfReport) -> None:\n \"\"\"Init entity.\"\"\"\n self._device_id = f\"{report.device_id}_{report.function}_{report.energyType}\"\n self._name = f\"{report.device_name} {report.function} {report.energyType}\"\n MultimaticEntity.__init__(self, coordinator, DOMAIN, self._device_id)\n\n @property\n def report(self):\n \"\"\"Get the current report based on the id.\"\"\"\n return next(\n (\n report\n for report in self.coordinator.data\n if f\"{report.device_id}_{report.function}_{report.energyType}\"\n == self._device_id\n ),\n None,\n )\n\n @property\n def native_value(self):\n \"\"\"Return the state of the entity.\"\"\"\n return self.report.value\n\n @property\n def available(self):\n \"\"\"Return True if entity is available.\"\"\"\n return super().available and self.report is not None\n\n @property\n def native_unit_of_measurement(self) -> str | None:\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\n return ENERGY_WATT_HOUR\n\n @property\n def device_info(self):\n \"\"\"Return device specific attributes.\"\"\"\n return {\n \"identifiers\": {(DOMAIN, self.report.device_id)},\n \"name\": self.report.device_name,\n \"manufacturer\": \"Vaillant\",\n \"model\": self.report.device_id,\n }\n\n @property\n def device_class(self) -> str | None:\n \"\"\"Return the class of this device, from component DEVICE_CLASSES.\"\"\"\n return DEVICE_CLASS_ENERGY\n\n @property\n def name(self) -> str | None:\n \"\"\"Return the name of the entity.\"\"\"\n return self._name\n\n @property\n def last_reset(self) -> datetime.datetime:\n \"\"\"Return the time when the sensor was last reset, if any.\"\"\"\n return utc_from_timestamp(0)\n\n @property\n def state_class(self) -> str:\n \"\"\"Return the state class of this entity.\"\"\"\n return STATE_CLASS_TOTAL_INCREASING\n","sub_path":"custom_components/multimatic/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":7061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364585612","text":"\n\nfrom xai.brain.wordbase.verbs._vegetate import _VEGETATE\n\n#calss header\nclass _VEGETATED(_VEGETATE, ):\n\tdef __init__(self,): \n\t\t_VEGETATE.__init__(self)\n\t\tself.name = \"VEGETATED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"vegetate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_vegetated.py","file_name":"_vegetated.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"126495763","text":"# -*- coding: utf-8 -*-\n#\n# (c) Copyright 2017-2018 Hewlett Packard Enterprise Development LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nManifest = {\n 'Name': 'interface_tx_rx_stats_monitor',\n 'Description': 'Interface tx/rx statistics monitoring agent',\n 'Version': '2.0',\n 'Author': 'Aruba Networks'\n}\n\n\nclass Agent(NAE):\n\n def __init__(self):\n # algorithm for dynamic threshold calculation\n self.alg = MaxAlgorithm(continuous_learning_window=\"10m\")\n\n # rx packets\n uri1 = '/rest/v1/system/interfaces/*?attributes=statistics.rx_packets'\n rate_m1 = Rate(uri1, \"10 seconds\")\n self.m1 = Monitor(rate_m1, 'Rx Packets (packets per second)')\n self.r1 = Rule('Rule for Monitor Interface rx Packets')\n\n title1 = Title(\"Baseline for Interface rx Packets\")\n self.baseline1 = Baseline(self.m1, algorithm=self.alg, title=title1,\n high_threshold_factor=2,\n low_threshold_factor=1.2,\n initial_learning_time='1d')\n self.r1.condition('{} > {}', [self.m1, self.baseline1])\n self.r1.clear_condition('{} < {}', [self.m1, self.baseline1])\n self.r1.action(\"ALERT_LEVEL\", AlertLevel.CRITICAL)\n self.r1.clear_action(\"ALERT_LEVEL\", AlertLevel.NONE)\n\n # rx packets dropped\n uri2 = '/rest/v1/system/interfaces/*?attributes=statistics.rx_dropped'\n self.m2 = Monitor(\n uri2,\n 'Rx Packets Dropped (packets)')\n\n # tx packets\n uri3 = '/rest/v1/system/interfaces/*?attributes=statistics.tx_packets'\n rate_m3 = Rate(uri3, \"10 seconds\")\n self.m3 = Monitor(rate_m3, 'Tx Packets (packets per second)')\n self.r3 = Rule('Rule for Monitor Interface tx Packets')\n title3 = Title(\"Baseline for Interface tx Packets\")\n self.baseline3 = Baseline(self.m3, algorithm=self.alg, title=title3,\n high_threshold_factor=2,\n low_threshold_factor=1.2,\n initial_learning_time='1d')\n self.r3.condition('{} > {}', [self.m3, self.baseline3])\n self.r3.clear_condition('{} < {}', [self.m3, self.baseline3])\n self.r3.action(\"ALERT_LEVEL\", AlertLevel.CRITICAL)\n self.r3.clear_action(\"ALERT_LEVEL\", AlertLevel.NONE)\n\n # tx packets dropped\n uri4 = '/rest/v1/system/interfaces/*?attributes=statistics.tx_dropped'\n self.m4 = Monitor(\n uri4,\n 'Tx Packets Dropped (packets)')\n","sub_path":"agents/Interface/8320/interface_tx_rx_stats_monitor.2.0.py","file_name":"interface_tx_rx_stats_monitor.2.0.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"528771913","text":"import sys\nimport os\nfrom deepclaw.driver.arms.franka.FrankaController import FrankaController\n\nif __name__ == '__main__':\n _root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n sys.path.append(_root_path)\n os.chdir(_root_path)\n print('work_dir: ', _root_path)\n FC = FrankaController('./Grasping_Franka/config/franka.yaml')\n\n # print('My Cartesian Pose: ', FC.getCartesianPose())\n\n allState = FC.get_state()\n print('My state: ', allState)\n # move_p position\n print('My current position in base: ', allState['TCP_Pose'])\n # move_j position\n print('My current joint space: ', FC.getJoint())\n","sub_path":"Driver/Franka_status.py","file_name":"Franka_status.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176571832","text":"# -*- coding: utf-8 -*-\nfrom osv import fields, osv\nfrom tools.translate import _\n\nclass projectScrumRelease(osv.osv):\n _name = 'project.scrum.release'\n \n _columns = {\n 'name': fields.char(\"Name\", size=128, required=True),\n 'goal': fields.text(\"Goal\"),\n 'project_id': fields.many2one('project.project', \"Project\", domain=[('is_scrum', '=', True)], required=True),\n 'date_start': fields.date('Starting Date'),\n 'date_stop': fields.date('Ending Date'),\n 'delivery_date_estimated': fields.date(\"Estimated date of delivery\"),\n 'delivery_date_effective': fields.date(\"Effective date of delivery\"),\n }\n\nclass projectProjectInehrit(osv.osv):\n _inherit = 'project.project'\n _columns = {\n 'release_ids': fields.one2many('project.scrum.release', 'project_id', \"Releases\", readonly=True),\n }\n\n","sub_path":"project_scrum/project_scrum_release.py","file_name":"project_scrum_release.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"584559484","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('djangoStudent', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='students',\n name='pol',\n field=models.CharField(choices=[('M', 'Мужской'), ('F', 'Женский')], verbose_name='пол', default='M', max_length=5),\n ),\n ]\n","sub_path":"djangoStudent/migrations/0002_auto_20170109_2127.py","file_name":"0002_auto_20170109_2127.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"159642240","text":"###由于多重继承很容易引起代码混乱,最好不要使用(*与方法解析顺序有关Method Resolution Order 也叫做MRO):\r\nfrom pprint import pprint\r\nclass One():\r\n str1 = 'One类的数据'\r\nclass Two():\r\n str2 = 'Two类的数据'\r\nclass Main(One,Two):\r\n def OutPut(self):\r\n print('第三个类调用多重继承后结果:')\r\n print(self.str1)\r\n print(self.str2)\r\nP = Main()\r\npprint(Main.mro()) ###查看运行顺序\r\nP.OutPut()\r\nP = None\r\n###相关网页:https://www.jianshu.com/p/71c14e73c9d9\r\n###在零基础学习书中,有调用父类方法和super方法一说:\r\n###在多重继承中.调用父类会使每一次父类按顺序运行(同时会运行重复的父类),可能使代码数据重新赋值\r\n###用super方法,在调用顺序中,会使后调用的类(一般是父类)先运行(避免同种父类再次调用),再接着运行前面调用的类(从后往前):\r\nclass Fir():\r\n def __init__(self):\r\n print('调用了一层父类')\r\nclass Sec(Fir):\r\n def __init__(self):\r\n super().__init__()\r\n print('调用了二层第一个子类')\r\nclass Thi(Fir):\r\n def __init__(self):\r\n super().__init__()\r\n print('调用了二层第二个子类')\r\nclass For(Sec,Thi):\r\n def __init__(self):\r\n super().__init__()\r\n print('调用了三层子类')\r\npprint(For.mro())\r\nP = For()\r\n","sub_path":"类和对象/继承另类功能(多重继承).py","file_name":"继承另类功能(多重继承).py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"549584096","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 24 22:15:11 2018\n\n@author: Lidor\n\"\"\"\n\nfrom List_Item import List_Item\nfrom Shop_Item import Shop_Item\n\nclass Shopping_List:\n \"\"\"Represents a shopping list.\n Attributes: shopping list id, customer name, items, total price\"\"\"\n def __init__(self, sl_id, customer_name):\n \"Constructor for shopping list\"\n self.sl_id=sl_id\n self.customer_name=customer_name\n self.items=[]\n self.price_total=0\n \n def get_id(self):\n \"Return this shopping list id\"\n return self.sl_id\n \n def get_customer_name(self):\n \"Return customer name for this shopping list\"\n return self.customer_name\n \n def total_price(self):\n \"Return total price of this shopping list\"\n return self.price_total\n \n def __add__(self, item):\n \"Adds item/s to shopping list\"\n item_instance=List_Item(1, 'request')\n given_items_list=[]\n flag=False #to check if item exists in this item list\n if issubclass(type(item), type(item_instance)): #received 1 item\n given_items_list.append(item)\n else: #received multiple items\n given_items_list=item\n for i in given_items_list: #for given items\n for j in self.items: #for exist items\n if i.item_id==j.item_id:\n j.qty+=i.qty\n j.request=i.request\n flag=True\n if flag==False:\n self.items.append(i)\n flag=False #initial flag for next iteration\n self.price_total+=i.qty*i.unit_price\n \n def item_summary(self):\n \"Returns all products and quantities in shopping list\"\n res_list=[]\n for i in self.items:\n t=(i.item_name,i.qty)\n res_list.append(t)\n return res_list\n \n def is_empty(self):\n \"Returns True if shopping list is empty. Else - False\"\n return len(self.items)==0\n \n def __gt__(self, other):\n \"Returns True if this shopping list is greater than other. Else - False\"\n if self.price_total>other.price_total:\n return True\n if self.price_totalsum_other:\n return True\n if sum_selflen(other.items):\n return True\n else:\n return False\n \n def num_of_items_total(self):\n \"Returns total number of items in shopping list\"\n sum=0\n for i in self.items:\n sum+=i.qty\n return sum\n \n def __repr__(self):\n \"Returns a string, representing shopping list\"\n res='*' * 30 + '\\n'\n res+='List id: ' + str(self.get_id()) + '\\n'\n res+='Number of Products: ' + str(len(self.items)) + '\\n'\n res+='Number of Items: ' + str(self.num_of_items_total()) + '\\n'\n res+='Total Price: ' + str(self.price_total)\n res+='\\n' + '*' * 30\n return res","sub_path":"assign.8/Shopping_List.py","file_name":"Shopping_List.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"500152431","text":"from scipy.cluster.hierarchy import linkage, dendrogram\nfrom sklearn.externals import joblib\nimport matplotlib.pyplot as plt\n\n\ndef cls_ward():\n vec = joblib.load('country_vec.pkl')\n country = joblib.load('country_dic.pkl')\n result = linkage(vec, method='ward')\n plt.figure(num=None, figsize=(16, 9), dpi=300)\n dendrogram(result, labels=list(country.values()))\n plt.show()\n\n\nif __name__ == '__main__':\n cls_ward()","sub_path":"hotate/chapter10/knock98.py","file_name":"knock98.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"189198133","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError, transaction\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom ..base import BaseTestCase\nfrom ..views import regular_view\n\n\n@override_settings(DEBUG_TOOLBAR_PANELS=['debug_toolbar.panels.profiling.ProfilingPanel'])\nclass ProfilingPanelTestCase(BaseTestCase):\n\n def setUp(self):\n super(ProfilingPanelTestCase, self).setUp()\n self.panel = self.toolbar.get_panel_by_id('ProfilingPanel')\n\n def test_regular_view(self):\n self.panel.process_view(self.request, regular_view, ('profiling',), {})\n self.panel.process_response(self.request, self.response)\n self.panel.generate_stats(self.request, self.response)\n self.assertIn('func_list', self.panel.get_stats())\n self.assertIn('regular_view', self.panel.content)\n\n def test_insert_content(self):\n \"\"\"\n Test that the panel only inserts content after generate_stats and\n not the process_response.\n \"\"\"\n self.panel.process_view(self.request, regular_view, ('profiling',), {})\n self.panel.process_response(self.request, self.response)\n # ensure the panel does not have content yet.\n self.assertNotIn('regular_view', self.panel.content)\n self.panel.generate_stats(self.request, self.response)\n # ensure the panel renders correctly.\n self.assertIn('regular_view', self.panel.content)\n\n\n@override_settings(DEBUG=True,\n DEBUG_TOOLBAR_PANELS=['debug_toolbar.panels.profiling.ProfilingPanel'])\nclass ProfilingPanelIntegrationTestCase(TestCase):\n\n def test_view_executed_once(self):\n self.assertEqual(User.objects.count(), 0)\n\n response = self.client.get('/new_user/')\n self.assertContains(response, 'Profiling')\n self.assertEqual(User.objects.count(), 1)\n\n with self.assertRaises(IntegrityError):\n with transaction.atomic():\n response = self.client.get('/new_user/')\n self.assertEqual(User.objects.count(), 1)\n","sub_path":"tests/panels/test_profiling.py","file_name":"test_profiling.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"216119891","text":"import torch\n\nfrom ..bbox import TemplatePseudoSamplerNobbox, build_assigner\nfrom ..utils import multi_apply\n\n\ndef template2delta(proposals, scales, gt, means, stds, use_out_scale):\n assert proposals.shape[0] == gt.shape[0]\n\n proposals = proposals.view(proposals.shape[0], -1, 2)\n proposals = proposals.float()\n gt = gt.float()\n\n px = proposals[:, :, 0]\n py = proposals[:, :, 1]\n\n gx = gt[:, 0::3]\n gy = gt[:, 1::3]\n gv = gt[:, 2::3]\n\n if use_out_scale:\n dx = (gx - px) / scales\n dy = (gy - py) / scales\n else:\n pw = torch.max(proposals[:, :, 0], -1)[0] - torch.min(proposals[:, :, 0], -1)[0] + 1.0\n ph = torch.max(proposals[:, :, 1], -1)[0] - torch.min(proposals[:, :, 1], -1)[0] + 1.0\n dx = (gx - px) / pw.view(-1, 1)\n dy = (gy - py) / ph.view(-1, 1)\n\n inds = gv == 0\n dx[inds] = 0\n dy[inds] = 0\n\n deltas = torch.stack([dx, dy], dim=-1)\n deltas = deltas.view(deltas.shape[0], -1)\n\n means = deltas.new_tensor(means).unsqueeze(0)\n stds = deltas.new_tensor(stds).unsqueeze(0)\n deltas = deltas.sub_(means).div_(stds)\n\n return deltas\n\n\ndef template_target_nobbox(anchor_list,\n anchor_scale_list,\n valid_flag_list,\n gt_keypoints_list,\n img_metas,\n target_means,\n target_stds,\n anchor_infos,\n use_out_scale,\n cfg,\n gt_labels_list=None,\n label_channels=1,\n sampling=True,\n unmap_outputs=True):\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n anchor_scale_list[i] = torch.cat(anchor_scale_list[i])\n\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(num_imgs)]\n (all_labels, all_label_weights, all_reg_targets, all_reg_weights,\n pos_inds_list, neg_inds_list) = multi_apply(\n template_target_single,\n anchor_list,\n anchor_scale_list,\n valid_flag_list,\n gt_keypoints_list,\n gt_labels_list,\n img_metas,\n target_means=target_means,\n target_stds=target_stds,\n anchor_infos=anchor_infos,\n use_out_scale=use_out_scale,\n cfg=cfg,\n sampling=sampling,\n unmap_outputs=unmap_outputs)\n # no valid anchors\n if any([labels is None for labels in all_labels]):\n return None\n # sampled anchors of all images\n num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n\n\n # split targets to a list w.r.t. multiple levels\n labels_list = images_to_levels(all_labels, num_level_anchors)\n label_weights_list = images_to_levels(all_label_weights, num_level_anchors)\n reg_targets_list = images_to_levels(all_reg_targets, num_level_anchors)\n reg_weights_list = images_to_levels(all_reg_weights, num_level_anchors)\n\n num_pos_list = []\n num_neg_list = []\n for l, w in zip(labels_list, label_weights_list):\n num_pos = torch.sum(l > 0)\n num_all = torch.sum(w > 0)\n num_neg = num_all - num_pos\n num_pos_list.append(num_pos.float())\n num_neg_list.append(num_neg.float())\n\n return (labels_list, label_weights_list, reg_targets_list,\n reg_weights_list, num_total_pos, num_total_neg, num_pos_list, num_neg_list)\n\n\ndef images_to_levels(target, num_level_anchors):\n target = torch.stack(target, 0)\n level_targets = []\n start = 0\n for n in num_level_anchors:\n end = start + n\n level_targets.append(target[:, start:end].squeeze(0))\n start = end\n return level_targets\n\n\ndef template_target_single(flat_anchors,\n flat_anchors_scales,\n valid_flags,\n gt_keypoints,\n gt_labels,\n img_meta,\n target_means,\n target_stds,\n anchor_infos,\n use_out_scale,\n cfg,\n sampling=True,\n unmap_outputs=True):\n inside_flags = template_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n cfg.allowed_border)\n if not inside_flags.any():\n return (None, ) * 6\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n anchors_scales = flat_anchors_scales[inside_flags, :]\n\n if sampling:\n assert 0\n else:\n template_assigner = build_assigner(cfg.assigner)\n assign_result = template_assigner.assign(anchors, anchors_scales, anchor_infos, gt_keypoints, gt_labels)\n template_sampler = TemplatePseudoSamplerNobbox()\n sampling_result = template_sampler.sample(assign_result, anchors, anchors_scales,\n gt_keypoints.view(gt_keypoints.shape[0], -1))\n\n num_valid_anchors = anchors.shape[0]\n template_targets = torch.zeros_like(anchors)\n template_weights = torch.zeros_like(anchors)\n labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n pos_template_targets = template2delta(sampling_result.pos_templates,\n sampling_result.pos_templates_scales,\n sampling_result.pos_gt_keypoints,\n target_means, target_stds, use_out_scale)\n template_targets[pos_inds, :] = pos_template_targets\n t_v = sampling_result.pos_gt_keypoints[:, 2::3].clone()\n t_v[t_v > 0.5] = 1.0\n t_v[t_v < 0.5] = 0.0\n t_v = torch.stack([t_v, t_v], dim=-1).reshape(t_v.shape[0], -1)\n template_weights[pos_inds, :] = t_v\n if gt_labels is None:\n assert 0\n else:\n labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]\n if cfg.pos_weight <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = cfg.pos_weight\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n labels = unmap(labels, num_total_anchors, inside_flags)\n label_weights = unmap(label_weights, num_total_anchors, inside_flags)\n template_targets = unmap(template_targets, num_total_anchors, inside_flags)\n template_weights = unmap(template_weights, num_total_anchors, inside_flags)\n\n return (labels, label_weights, template_targets, template_weights, pos_inds,\n neg_inds)\n\n\ndef template_inside_flags(flat_anchors, valid_flags, img_shape,\n allowed_border=0):\n img_h, img_w = img_shape[:2]\n if allowed_border >= 0:\n inside_flags = valid_flags & \\\n (flat_anchors[:, 0] >= -allowed_border).type(torch.uint8) & \\\n (flat_anchors[:, 1] >= -allowed_border).type(torch.uint8) & \\\n (flat_anchors[:, 2] < img_w + allowed_border).type(torch.uint8) & \\\n (flat_anchors[:, 3] < img_h + allowed_border).type(torch.uint8)\n else:\n inside_flags = valid_flags\n return inside_flags\n\n\ndef unmap(data, count, inds, fill=0):\n \"\"\" Unmap a subset of item (data) back to the original set of items (of\n size count) \"\"\"\n if data.dim() == 1:\n ret = data.new_full((count, ), fill)\n ret[inds] = data\n else:\n new_size = (count, ) + data.size()[1:]\n ret = data.new_full(new_size, fill)\n ret[inds, :] = data\n return ret\n","sub_path":"mmdet/core/anchor/template_target_nobbox.py","file_name":"template_target_nobbox.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"473152399","text":"# This program is written in Python3\n# Author : Masazumi Katoh\n# coding : UTF-8\n# Last Update : 2017/12/1\n\n# to use MCP3008\nfrom gpiozero import MCP3008\n# to check whether path exists or not\nfrom os import path\n# to get date and time\nfrom datetime import datetime\nfrom datetime import timedelta\n# to sleep\nfrom time import sleep\n# to write data to csv file\nimport csv\n# to run function per fixed time\nimport signal\n\n# initial setting\n# sampling rate [Hz]\n# maximum rate is 60[Hz] @ Raspberry Pi 3\nsampling_rate = 10\n# sampling period [sec]\nsampling_period = timedelta(seconds = 1. / sampling_rate)\n# length of value list\nlength = 6\n# list to save sensor values\nvalue_list = [0 for i in range(length)]\n# threshold value[V]\nthreshold = 1.\n# flag to save judge_threshold\nthreshold_flag = False\n\n# date\ndate = datetime.now().strftime(\"%Y%m%d\")\n# logfile's name\nlogfile = date + \"_log.csv\"\n\n# channel to use MCP3008\nchannel = 0\n# voltage range [V]\nV0 = 3.3\n\ndef get_sensor_value(channel, value_list, length):\n\tinput_value = MCP3008(channel = channel)\n\tpresent_value = V0 * input_value.value\n\n\tfor i in range(length - 1):\n\t\tvalue_list[i] = value_list[i+1]\n\n\tvalue_list[length - 1] = present_value\n\n\treturn value_list\n\ndef smoothing(value_list, length):\n\tsmoothed_value = sum(value_list) / length\n\n\treturn smoothed_value\n\ndef judge_threshold(smoothed_value, threshold):\n\tif (smoothed_value >= threshold):\n\t\tthreshold_flag = True\n\telse:\n\t\tthreshold_flag = False\n\n\treturn threshold_flag\n\ndef calc_timedelta(standard_time, sampling_period):\n\t# calculate timedelta\n\tcurrent_time = datetime.now()\n\tdif = current_time - standard_time\n\ttimedelta = (dif % sampling_period).total_seconds()\n\n\treturn timedelta\n\ndef write_to_csv(logfile, smoothed_value, threshold_flag):\n\t# open csv file to record data\n\tif path.exists(logfile):\n\t\tf = open(logfile, \"a\")\n\t\twriter = csv.writer(f)\n\telse:\n\t\tf = open(logfile, \"a\")\n\t\twriter = csv.writer(f)\n\t\twriter.writerow([\"Time\", \"Value\", \"Over Threshold\"])\n\n\trecord_time = datetime.now().strftime(\"%H:%M:%S.%f\")\n\twriter.writerow([record_time, \"%.3f\" %(smoothed_value), threshold_flag])\n\tprint(record_time, \"%.3f\" %(smoothed_value), threshold_flag)\n\n\tf.close()\n\n# get standard time\nstandard_time = datetime.now()\n\nwhile True:\n\tt1 = datetime.now()\n\tvalue_list = get_sensor_value(channel, value_list, length)\n\tt2 = datetime.now()\n\tsmoothed_value = smoothing(value_list, length)\n\tt3 = datetime.now()\n\tthreshold_flag = judge_threshold(smoothed_value, threshold)\n\tt4 = datetime.now()\n\twrite_to_csv(logfile, smoothed_value, threshold_flag)\n\tt5 = datetime.now()\n\twait_time = sampling_period.total_seconds() - calc_timedelta(standard_time, sampling_period)\n\tt6 = datetime.now()\n\tprint(\"get_sensor_value : \", t2 - t1)\n\tprint(\"smoothing : \", t3 - t2)\n\tprint(\"judge_threshold : \", t4 - t3)\n\tprint(\"write_to_csv : \", t5 - t4)\n\tprint(\"wait_time : \", t6 - t5)\n\tprint(\"total :\",t6 - t1)\n\tsleep(wait_time)\n","sub_path":"Analog/main/main_Ver.1.py","file_name":"main_Ver.1.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"363441740","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_auto_20160622_1017'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='post',\n name='img',\n field=models.ImageField(upload_to='%Y/%m/%d/', verbose_name='配图', default='1'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='post',\n name='title',\n field=models.CharField(max_length=50, verbose_name='文章标题'),\n ),\n ]\n","sub_path":"blog/migrations/0003_auto_20160622_1524.py","file_name":"0003_auto_20160622_1524.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"610247253","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os.path\n\nn = 5\n\ncategories = []\ndfs = []\n\nprocs = [\"HW-\", \"Htt\", \"HW+\", \"W-ZZ\", \"W+ZZ\"]\n\ni = 0\nfor nem in range(n + 1):\n for nep in range(n - nem + 1):\n for nmm in range(n - nem - nep + 1):\n for nmp in range(n - nem - nep - nmm + 1):\n if sum([nem, nep, nmm, nmp]) == n and abs(nep + nmp - nem - nmm) <= 1:\n print(i)\n i = i + 1\n print(\"Finding ways to get {} e-, {} e+, {} m-, {} m+:\".format(nem, nep, nmm, nmp))\n categories.append(nep*\"$e^+$\"+nem*\"$e^-$\"+nmp*\"$\\mu^+$\"+nmm*\"$\\mu^-$\")\n fname = \"{}em{}ep{}mm{}mp.csv\".format(nem, nep, nmm, nmp)\n if os.path.isfile(fname):\n df = pd.read_csv(fname)\n df = df[df.label.isin(procs)]\n dfs.append(df)\n else:\n dfs.append(pd.DataFrame())\n\n# for i, df in enumerate(dfs):\n # if len(df) == 0:\n # continue\n # print(i)\n # print(categories[i])\n # print(df)\n\n# df1 = dfs[0] + dfs[2] + dfs[18] + dfs[20]\n# df2 = dfs[1] + dfs[8] + dfs[19] + dfs[23]\n# df3 = dfs[9] + dfs[11]\n# df4 = dfs[10] + dfs[17]\n\ndf1 = dfs[0] + dfs[2] + dfs[18] + dfs[20] + dfs[9] + dfs[11]\ndf2 = dfs[1] + dfs[8] + dfs[19] + dfs[23] + dfs[10] + dfs[17]\n\ndf1.label = dfs[0].label\ndf2.label = dfs[1].label\n\nprint(df1)\nprint(df2)\n\nplt.figure()\nplt.ylabel(r'effective cross section [fb${}^{-1}$]')\nplt.bar([\"WH\", \"WZZ\", \"ttH\"], (df2.xsec_times_br + df1.xsec_times_br), width=0.5, label =r'$q=-1$')\nplt.bar([\"WH\", \"WZZ\", \"ttH\"], df1.xsec_times_br, width=0.5, label =r'$q=+1$')\n\nplt.legend(loc=\"upper right\")\nax = plt.gca()\nax.tick_params(labeltop=False, labelright=True)\nplt.savefig(\"5-leptons.pdf\")\nplt.savefig(\"5-leptons.png\")\n","sub_path":"python/5-leptons_plot.py","file_name":"5-leptons_plot.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"291521962","text":"#!/usr/bin/python\nfrom datetime import datetime, time, timedelta\nfrom django.core.management.base import BaseCommand\nfrom optparse import make_option\nfrom redistricting.models import *\nfrom redistricting.utils import *\n\nclass Command(BaseCommand):\n \"\"\"\n This command prints the number of active users in the system over a period of time\n \"\"\"\n args = None\n help = 'Print the number of active users in the system over a period of time'\n option_list = BaseCommand.option_list + (\n make_option('-m', '--minutes', dest='minutes', default='5', action='store', help='Number of minutes'),\n )\n\n def handle(self, *args, **options):\n \"\"\"\n Print the number of active users\n \"\"\"\n minutes = int(options.get('minutes'))\n users = 0\n for session in Session.objects.all():\n decoded = session.get_decoded()\n if 'activity_time' in decoded and (decoded['activity_time'] - timedelta(0,0,0,0,settings.SESSION_TIMEOUT)) > (datetime.now() - timedelta(0,0,0,0,minutes)):\n users += 1\n\n self.stdout.write('Number of active users over the last %d minute(s): %d\\n' % (minutes, users))\n","sub_path":"django/publicmapping/redistricting/management/commands/numusers.py","file_name":"numusers.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"181907858","text":"from odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass LateEarlyTimeInterval(models.Model):\n _name = 'late.early.time.interval'\n _order = 'id asc'\n\n resource_calendar_id = fields.Many2one('resource.calendar')\n time_interval = fields.Char(compute='_compute_time_interval')\n first_operator = fields.Selection([('<', '<'), ('<=', '<=')])\n first_operand = fields.Float()\n second_operator = fields.Selection([('<', '<'), ('<=', '<=')])\n second_operand = fields.Float()\n late_early_penalty_line_ids = fields.One2many('late.early.penalty.line', 'late_early_time_interval_id')\n\n @api.depends('first_operator', 'first_operand', 'second_operator', 'second_operand')\n def _compute_time_interval(self):\n float_to_time = lambda x: '{0:02.0f}:{1:02.0f}'.format(*divmod(x * 60, 60))\n if self.first_operator:\n self.time_interval = \"%s%st\" % (str(float_to_time(self.first_operand)), str(self.first_operator))\n if self.second_operator:\n self.time_interval += \"%s%s\" % (str(self.second_operator), str(float_to_time(self.second_operand)))\n\n @api.constrains('first_operand', 'second_operand')\n def _check_first_second_operand(self):\n for rec in self:\n if not rec.first_operand and not rec.second_operand:\n raise ValidationError(\"First & Second operand can't both be zero\")\n if rec.first_operand and rec.second_operator:\n if rec.second_operand <= rec.first_operand:\n raise ValidationError(\"Second operand can't be smaller than first operand\")\n elif rec.first_operand < 0 or rec.second_operand < 0:\n raise ValidationError(\"Can't using negative numbers in time interval (There is no time in negative)\")","sub_path":"centione_hr_late_early_absence/models/late_early_time_interval.py","file_name":"late_early_time_interval.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"555306676","text":"#!/usr/bin/env python\n# coding: utf-8\n#Give the directory at the place of Directory_path which has your file\n# In[2]:\n\n\nimport phonenumbers,csv\nfrom phonenumbers import carrier\nfrom phonenumbers import geocoder\nfrom tabulate import tabulate\n\ndef phonenumber_scanner(phone_numbers):\n info = [[\"Number\",\"Country\",\"Network\"]]\n for phonnumber in range(0,len(phone_numbers)):\n number = phonenumbers.parse(phone_numbers[phonnumber])\n description = geocoder.description_for_number(number,\"en\")\n supplier = carrier.name_for_number(number,\"en\")\n info.append([phone_numbers[phonnumber],description,supplier])\n data = str(tabulate(info,headers = \"firstrow\",tablefmt = \"fancy_grid\"))\n print(data)\n with open('Directory_path(target)','a+') as results:\n writer = csv.writer(results)\n with open('Directory_path(source)','r') as csvfile:\n reader = csv.reader(csvfile)\n record = list(reader)\n if len(record) == 0:\n writer.writerow(info[0])\n for records in range(1,len(info)):\n writer.writerow(info[records])\n else:\n for records in range(1,len(info)):\n writer.writerow(info[records])\n\n\nphone_numbers = []\n#If the input is from any csv files\nwith open('Directory_path(source)','r') as csvfile: \n reader = csv.DictReader(csvfile)\n for record in reader:\n phone_numbers.append(record['Phone'])\n\n#If the input needs to be given manually\n#Enter the input as 1 to stop giving inputs\nwhile(1):\n phone_number = input(\"Enter Phone Number(s) :\")\n if(phone_number == str(1)):\n break\n else:\n phone_numbers.append(phone_number)\nphonenumber_scanner(phone_numbers)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"PhoneNetwork_finder.py","file_name":"PhoneNetwork_finder.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"342503224","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nuntitled.py\n\nCreated by Loki on 2013-08-18.\nCopyright (c) 2013 CreativeCreations LLC.\nAll rights reserved.\n\n\nDescription:\n\"\"\"\n\n## Authorship:\n__author__ = \"loki\"\n__copyright__ = \"CreativeCreationsLLC\"\n__credits__ = [\"Loki\", \"Johnny\"]\n__version__ = \"1.0.1\"\n__email__ = \"jgdipalma@creativecreationsllc.com\"\n__status__ = \"Alpha\"\n__name__ = \"urlandfileutility\"\n\n## Imports:\n\nclass OpenUtility(object):\n \"\"\"docstring for OpenUtility\"\"\"\n def __init__(self, name, type, ext, state, loc, permission, excutable, timestamp):\n super(OpenUtility, self).__init__()\n self.type = type\n self.ext = ext\n self.state = state\n self.location = location\n self.permission = permission\n self.excutable = excutable\n","sub_path":"Modules/urlandfileutility.py","file_name":"urlandfileutility.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"297780580","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n @author hsjfans \n\"\"\"\nimport json\n\nimport pandas as pd\nimport re\n\nfile_path = \"/Volumes/doc/xxx.xlsx\"\n\n\n# head[\"info\"] = {\ndef gen_head(topic):\n # \"name\": \"fast_repair\",\n # \"schema\": \"https://schema.getpostman.com/json/collection/v2.1.0/collection.json\"\n head = {\n \"name\": topic,\n \"auth\": \"hua\",\n \"description\": topic,\n }\n head[\"requests\"] = []\n return head\n\n\nclass Api:\n def __init__(self, topic, api_name, api_url, api_params, api_response, desc, idx):\n \"\"\"\n 定义结构体\n :param topic: 主题\n :param api_name: 接口名称\n :param api_url: 接口链接\n :param api_params: 参数\n :param api_response: 返回值\n :param desc: 描述\n :param idx: index\n \"\"\"\n self.topic = topic\n self.api_name = api_name\n self.api_url = api_url\n self.api_params = api_params\n self.api_response = api_response\n if desc == \"Non\":\n desc = \"\"\n self.desc = desc\n self.idx = idx\n\n def to_standard_item(self):\n \"\"\"\n 生成标准类型\n :return:\n \"\"\"\n item = {}\n item[\"name\"] = self.api_name\n item[\"description\"] = self.desc\n item[\"dataMode\"] = \"raw\"\n item[\"method\"] = \"post\"\n item[\"data\"] = []\n item[\"url\"] = self.api_url\n item[\"queryParams\"] = []\n item[\"auth\"] = \"hua\"\n # item[\"headers\"] = \"Content-Type: application/json\\n\"\n item[\"headerData\"] = {\n \"key\": \"Content-Type\",\n \"name\": \"Content-Type\",\n \"type\": \"text\",\n \"value\": \"application/json\"\n }\n item[\"pathVariableData\"] = \"\"\n item[\"rawModeData\"] = self.api_params\n return item\n\n def parse_api_url(self):\n items = re.split(r'[(://)|/|.]+', self.api_url)\n protocol = items[0]\n host = [items[1], items[2], items[3]]\n path = items[4:]\n return protocol, host, path\n\n\nclass Iter:\n\n def __init__(self, file_path=file_path):\n self.file_path = file_path\n self.api = []\n self.topics = []\n self.index = 0\n self.result = []\n\n def start(self):\n self.__build()\n return self\n\n def __build(self):\n \"\"\"\n 开始\n :return:\n \"\"\"\n self.__load_api()\n self.__change_to_api()\n\n def __load_api(self):\n \"\"\"result\n 加载文档\n :return:\n \"\"\"\n xl = pd.ExcelFile(self.file_path)\n self.topics = xl.sheet_names\n for topic in self.topics:\n self.api.append(xl.parse(sheet_name=topic))\n\n def _parse_dataSet(self, dataSet: pd.DataFrame, topic, body):\n \"\"\"\n 解析文档\n :return:\n \"\"\"\n raise NotImplementedError(\"该方法必须被重写\")\n\n def __change_to_api(self):\n \"\"\"\n 转化为结构体列表\n :return: []Api\n \"\"\"\n for idx, topic in enumerate(self.topics):\n dataSet = self.api[idx]\n if len(dataSet) > 5:\n dataSet = dataSet.ix[:, :5]\n body = gen_head(topic)\n self._parse_dataSet(dataSet, topic, body)\n self.result.append(body)\n\n\n\n\n\n\nclass XbIter(Iter):\n\n def _parse_dataSet(self, dataSet: pd.DataFrame, topic, body):\n \"\"\"\n 解析\n :param dataSet:\n :param models:\n :param topic\n :return:\n \"\"\"\n if len(dataSet.columns) == 5:\n dataSet.columns = [\"api_name\", \"api_url\", \"api_params\", \"api_response\", \"desc\"]\n # 删掉Non 行\n dataSet = dataSet.dropna(axis=0, how='all')\n dataSet = dataSet.fillna(\"\")\n for item in dataSet.to_dict(orient='records'):\n self.index += 1\n api = Api(topic, item[\"api_name\"], item[\"api_url\"], item[\"api_params\"], item[\"api_response\"],\n item[\"desc\"], self.index)\n body[\"requests\"].append(api.to_standard_item())\n\n\nif __name__ == '__main__':\n p = XbIter().start() # 加载\n for idx, item in enumerate(p.result):\n with open(\"./result/\" + p.topics[idx] + \".json\", 'w') as f:\n f.write(json.dumps(item))\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514517207","text":"#CSCI 1133 Homework 5\n#Sid Lin\n#Problem 5C\n\ndef factorial(n):\n if n <= 1:\n return 1 #base case\n else:\n return n * factorial(n-1) #n multiplied by the previous term\n\ndef sinApprox(angle, term):\n\n negNum = (-1)**(term+1)\n if (term == 0):\n return 0 #base case of term less than 1\n else:\n oddNum = 2*term - 1 #alternates between - and +\n #print(angle, term, negNum, oddNum) debug\n approx = negNum * ((angle**oddNum) / factorial(oddNum)) #taylor series\n #print(approx) debug\n return approx + sinApprox(angle, term -1) #return taylor series of the terms\n #plus the next taylor series\n\n\n\n\nrad = float(input(\"Enter the angle to approximate in radians:\"))\nnum = int(input(\"Enter the number of terms to compute:\"))\n\nresult = sinApprox(rad, num)\nprint((\"sin {0} is approximately {1}.\").format(rad, result))\n","sub_path":"Eleven Thirty Three/hw5/linx1052_5C.py","file_name":"linx1052_5C.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"431437931","text":"from django.conf import settings\nfrom django import forms\nfrom contact.models import Contact\nfrom crispy_forms.helper import FormHelper\nimport urllib.parse as urllib\nimport urllib.request as urllib2\nimport json, codecs\n\n\nclass ContactForm(forms.ModelForm):\n class Meta:\n model = Contact\n fields = ['name', 'email', 'subject', 'message']\n widgets = {\n 'name': forms.TextInput(attrs={'placeholder': 'Nome'}),\n 'email': forms.TextInput(attrs={'placeholder': 'Email'}),\n 'subject': forms.TextInput(attrs={'placeholder': 'Assunto'}),\n 'message': forms.Textarea(attrs={'placeholder': 'Mensagem'}),\n }\n\n def __init__(self, *args, **kwargs):\n # make the request object available to the form object\n self.request = kwargs.pop('request', None)\n super(ContactForm, self).__init__(*args, **kwargs)\n\n self.fields['name'].label = ''\n self.fields['email'].label = ''\n self.fields['subject'].label = ''\n self.fields['message'].label = ''\n\n self.helper = FormHelper()\n self.helper.form_show_labels = False\n\n def clean(self):\n super(ContactForm, self).clean()\n\n # test the google recaptcha\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n values = {\n 'secret': settings.RECAPTCHA_SECRET_KEY,\n 'response': self.request.POST.get(u'g-recaptcha-response', None),\n 'remoteip': self.request.META.get(\"REMOTE_ADDR\", None),\n }\n data = urllib.urlencode(values)\n binary_data = data.encode('utf-8')\n req = urllib2.Request(url, binary_data)\n response = urllib2.urlopen(req)\n reader = codecs.getreader('utf-8')\n result = json.load(reader(response))\n\n # result[\"success\"] will be True on a success\n if not result[\"success\"]:\n raise forms.ValidationError('A validação reCAPTCHA falhou. Por favor tente de novo.')\n\n return self.cleaned_data\n","sub_path":"contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"102112620","text":"#coding=utf-8\n\n\"\"\"\n@author: evilXu\n@file: factor_data.py\n@time: 2017/11/14 16:12\n@description: \n\"\"\"\n\nimport pandas as pd\nfrom datetime import datetime,date,timedelta\nimport shutil\nimport os\n\nclass FactorData():\n def __init__(self,fname,path=\"\",defaultInitDate = date(1990,1,1)):\n '''\n 每年一个文件\n path, 因子数据文件目录\n '''\n # print(\"FactorData\",path,fname)\n self._factorPath = os.path.join(path,fname)\n self._name = fname\n self._defaultInitDate = defaultInitDate if defaultInitDate is not None else date(1990,1,1)\n self.create()\n\n @property\n def name(self):\n return self._name\n\n def create(self):\n if not os.path.exists(self._factorPath):\n os.mkdir(self._factorPath)\n\n def reset(self):\n if os.path.exists(self._factorPath):\n shutil.rmtree(self._factorPath)\n os.mkdir(self._factorPath)\n\n def getLatestDate(self):\n # df = pd.DataFrame()\n _fileList = []\n for i in os.listdir(self._factorPath):\n _absPath = os.path.join(self._factorPath, i)\n if os.path.isfile(_absPath):\n _fileList.append(_absPath)\n _fileList = sorted(_fileList) # 按时间顺序取数据\n if len(_fileList) > 0:\n df1 = pd.read_hdf(_fileList[-1])\n return df1.index[-1].date()\n else:\n return self._defaultInitDate\n\n def load(self,startDt = None,endDt = None):\n df = pd.DataFrame()\n if startDt is None or endDt is None:\n return df\n _fileList = []\n _startDt = startDt\n _endDt = endDt\n _startYear = str(_startDt.year)\n _endYear = str(_endDt.year)\n for i in os.listdir(self._factorPath):\n _absPath = os.path.join(self._factorPath, i)\n if os.path.isfile(_absPath) and i >= _startYear and i<= _endYear:\n _fileList.append(_absPath)\n _fileList = sorted(_fileList) # 按时间顺序取数据\n for _file in _fileList:\n df1 = pd.read_hdf(_file)\n df = df.append(df1)\n return df.loc[_startDt:_endDt]\n\n def append(self,datas=pd.DataFrame()):\n if len(datas) < 1:\n return\n a = datas.index\n _lastYear = a[0].year\n years = [_lastYear]\n idxs = [0]\n _idx = 0\n for day in a[1:]:\n _idx += 1\n if day.year != _lastYear:\n idxs.append(_idx)\n years.append(day.year)\n _lastYear = day.year\n idxs.append(len(a))\n # print(idxs, years)\n for i in range(len(years)):\n self._appendAyear(years[i],datas.iloc[idxs[i]:idxs[i + 1]])\n\n def _appendAyear(self, year, datas=pd.DataFrame()):\n if len(datas) < 1:\n return\n _file = os.path.join(self._factorPath, str(year))\n if os.path.exists(_file):\n df = pd.read_hdf(_file)\n df = df.append(datas, verify_integrity=True)\n df.to_hdf(_file,key=\"root\", mode='w')\n else:\n datas.to_hdf(_file,key=\"root\", mode='w')\n return\n\nclass DependingData():\n def __init__(self,ucontext):\n self._ucontext = ucontext\n self.dependency = set(ucontext.dependency)\n\n def getDependingData(self,fname,startdt,enddt):\n if fname not in self.dependency:\n raise NotImplementedError(\" not regiester as dependency\")\n modconfig = self._ucontext.modconfig\n return FactorData(fname=fname,path=modconfig.factor_data_path,defaultInitDate=modconfig.factor_data_init_date)\\\n .load(startdt,enddt)\n\nclass FactorDataInterface():\n '''\n API getFactor的实现\n '''\n def __init__(self,path=\"\",defaultInitDate=datetime(2017, 1, 1).date(),endDt = datetime.now().date()):\n self._path = path\n self._defaultInitDate = defaultInitDate\n self._startDt_cache = self._defaultInitDate\n self._endDt_cache = endDt\n self._datas = {}\n\n def getData(self,fname=\"\",startDt=None,endDt=None):\n if fname in self._datas:\n return self._datas.get(fname).loc[startDt:endDt]\n else:\n datas = FactorData(fname,self._path,self._defaultInitDate).load(self._startDt_cache, self._endDt_cache)\n self._datas[fname] = datas\n return datas.loc[startDt:endDt]\n\nif __name__ == \"__main__\":\n obj = FactorData(\"market_value_log\",\"E:\\\\evilAlpha\\\\test\" )#)\"Z:\\\\factor_datas\"\n data = obj.load(datetime(2017, 1,25), datetime(2018, 12, 31))\n print(data)\n\n # obj = FactorData(\"testF1\",\"E:\\\\evilAlpha\\\\rqalpha\\\\test\")\n # print(obj.name)\n # import random\n # code_cnt = 3000\n # day_cnt = 20\n # df = pd.DataFrame([[random.random() for j in range(code_cnt)] for i in range(day_cnt)],\n # columns=list([\"%06d.XSHE\"%i for i in range(code_cnt)]),\n # index=[datetime(2015, 12, 1) + timedelta(days=i) for i in range(day_cnt)])\n # print(df.info())\n # # obj.reset()\n # obj.append(df)\n # # data = obj.load(datetime(2014,1,15),datetime(2017,12,31))\n # # print(data)","sub_path":"rqalpha/mod/rqalpha_mod_alphaStar_factors/factor_data.py","file_name":"factor_data.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508535504","text":"import argparse\nimport json\nfrom typing import Text, Optional\nimport os\nimport logging\n\nimport questionary\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import Session\nfrom tqdm import tqdm\n\nfrom rasa.cli.utils import print_error, print_success\nfrom rasa.core.domain import Domain\nfrom rasa.core.tracker_store import TrackerStore, InMemoryTrackerStore, SQLTrackerStore\nfrom rasa.core.trackers import DialogueStateTracker\nfrom rasa.core.utils import AvailableEndpoints\n\nfrom rasax.community.database import ConversationStatistic\nfrom rasax.community.services.event_service import EventService\nimport rasax.community.database.utils as db_utils\nimport rasax.community.sql_migrations as sql_migrations\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"This script migrates Rasa (Core) tracker stores to Rasa X.\n\nWhat it can do:\n- migrate any persistent tracker stores to a Rasa X compatible database.\n- migrate any persistent tracker store to a SQL tracker store \n- migrate any persistent tracker store to a SQLite tracker store which is compatible \n with the local version of Rasa X which can be installed from `pip`.\n\nHow to use it:\n\nRun the script with `python migrate_tracker_store_to_rasa_x.py`.\nThe script will prompt you for all the required information.\n\n\"\"\"\n\n\ndef _create_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Script to migrate from a Rasa Core tracker store to Rasa X.\",\n )\n parser.add_argument(\n \"--max-trackers\",\n type=int,\n default=None,\n help=\"Number of trackers to migrate. By default this migrates all trackers.\",\n )\n\n return parser\n\n\ndef _get_path_to_old_endpoints_config() -> Optional[Text]:\n return questionary.text(\n \"Please provide the path to your endpoints \"\n \"configuration which \"\n \"specifies the credentials for your old tracker store:\",\n default=\"endpoints.yml\",\n ).ask()\n\n\ndef _get_is_local() -> bool:\n return questionary.confirm(\n \"Do you want to migrate to the local version of Rasa X?\"\n ).ask()\n\n\ndef _get_path_to_new_endpoints_config() -> Optional[Text]:\n return questionary.text(\n \"Please provide the path to your endpoints \"\n \"configuration which \"\n \"specifies the credentials for your new tracker store:\",\n default=\"new_endpoints.yml\",\n ).ask()\n\n\ndef _migrate_tracker_store_to_rasa_x(\n old_endpoints_file: Text,\n new_endpoints_file: Optional[Text],\n is_local: bool,\n max_number_of_trackers: Optional[int],\n) -> None:\n old_tracker_store = _get_tracker_store_from_endpoints_config(old_endpoints_file)\n\n reuse_old_tracker_store = old_endpoints_file == new_endpoints_file\n if reuse_old_tracker_store:\n print(\n \"Old and new endpoints file is the same. \"\n \"I will skip migrating the tracker store and only migrate the events to Rasa X.\"\n )\n\n # Initialize Rasa X tracker store in any case\n rasa_x_tracker_store = _get_rasa_x_tracker_store(new_endpoints_file)\n\n # Disable warnings regarding not existing slots\n logging.getLogger(\"rasa.core.trackers\").setLevel(logging.CRITICAL)\n\n if not reuse_old_tracker_store and rasa_x_tracker_store.keys():\n should_migrate = questionary.confirm(\n \"Found existing trackers in your Rasa X tracker store. Do you \"\n \"still want to migrate the new trackers?\"\n )\n\n if not should_migrate:\n exit(1)\n\n db_session = db_utils.get_database_session(is_local)\n sql_migrations.run_migrations(db_session)\n event_service = EventService(db_session)\n\n sender_ids = old_tracker_store.keys()\n\n if max_number_of_trackers:\n sender_ids = sender_ids[:max_number_of_trackers]\n\n print_success(\"Start migrating {} trackers.\".format(len(sender_ids)))\n\n nr_skipped_trackers = 0\n\n for sender_id in tqdm(sender_ids):\n tracker = old_tracker_store.retrieve(sender_id)\n\n if not reuse_old_tracker_store:\n if rasa_x_tracker_store.retrieve(sender_id):\n nr_skipped_trackers += 1\n logging.debug(\n \"Tracker for sender '{}' already exists. Skipping the \"\n \"migration for it.\".format(sender_id)\n )\n\n else:\n # Migrate tracker store to new tracker store format\n rasa_x_tracker_store.save(tracker)\n\n # Replay events of tracker\n _replay_tracker_events(tracker, event_service)\n\n # Set latest event id so that the `SQLiteEventConsumer` only consumes not already\n # migrated events\n set_latest_event_id(db_session, rasa_x_tracker_store)\n\n print_success(\n \"Finished migrating trackers ({} were skipped since they were \"\n \"already migrated).\".format(nr_skipped_trackers)\n )\n\n\ndef _get_tracker_store_from_endpoints_config(endpoints_file: Text) -> TrackerStore:\n if (\n not endpoints_file\n or not os.path.isfile(endpoints_file)\n or not os.path.exists(endpoints_file)\n ):\n print_error(\n \"File '{}' was not found. Please specify a valid file with \"\n \"'--endpoints '.\".format(endpoints_file)\n )\n exit(1)\n\n endpoints = AvailableEndpoints.read_endpoints(endpoints_file)\n\n tracker_store = TrackerStore.find_tracker_store(\n Domain.empty(), endpoints.tracker_store\n )\n\n if not tracker_store or isinstance(tracker_store, InMemoryTrackerStore):\n print_error(\n \"No valid tracker store config given. Please provide a valid \"\n \"tracker store configuration as it is described here: \"\n \"https://rasa.com/docs/core/0.14.4/tracker_stores/\"\n )\n exit(1)\n\n return tracker_store\n\n\ndef _get_rasa_x_tracker_store(endpoints_file: Optional[Text]) -> TrackerStore:\n if endpoints_file and os.path.exists(endpoints_file):\n return _get_tracker_store_from_endpoints_config(endpoints_file)\n else:\n return SQLTrackerStore(Domain.empty(), db=\"tracker.db\")\n\n\ndef _replay_tracker_events(\n tracker: DialogueStateTracker, event_service: EventService\n) -> None:\n \"\"\"Migrates the `events`, `logs`, `sessions` collections.\"\"\"\n\n for event in tracker.events:\n event_dict = event.as_dict()\n # add sender id to event\n event_dict[\"sender_id\"] = tracker.sender_id\n stringified_event = json.dumps(event_dict)\n # Update events + most of conversations metadata\n _ = event_service.save_event(stringified_event)\n\n\ndef set_latest_event_id(\n db_session: Session, rasa_x_tracker_store: SQLTrackerStore\n) -> None:\n (max_event_id,) = rasa_x_tracker_store.session.query(\n func.max(SQLTrackerStore.SQLEvent.id)\n ).first()\n\n existing = db_session.query(ConversationStatistic).first()\n\n if existing:\n existing.latest_event_id = max_event_id\n db_session.commit()\n\n logging.debug(\"Set max event id to '{}'.\".format(max_event_id))\n\n\nif __name__ == \"__main__\":\n parser = _create_argument_parser()\n args = parser.parse_args()\n\n print_success(\n \"Welcome to Rasa X 🚀 \\n\\nThis script will migrate your old tracker \"\n \"store to the new SQL based Rasa X tracker store.\"\n )\n print_success(\"Let's start!\\n\")\n\n path_to_old_endpoints_file = _get_path_to_old_endpoints_config()\n is_local = _get_is_local()\n path_to_new_endpoints_file = None\n if not is_local:\n path_to_new_endpoints_file = _get_path_to_new_endpoints_config()\n _ = questionary.confirm(\n \"You decided to migrate to a dockerized version of Rasa X. \"\n \"To migrate to a Rasa X SQL database, you have provide the database \"\n \"credentials. \"\n \"You can do so by setting the environment variables \"\n \"'DB_DRIVER' (e.g. 'postgresql'), \"\n \"'DB_USER' (e.g. 'admin'), \"\n \"'DB_PASSWORD' (e.g. 'password'), \"\n \"'DB_HOST' (e.g. 'localhost'), \"\n \"'DB_PORT' (e.g. '5432'), \"\n \"'DB_DATABASE' (e.g. 'rasa'). Have you done that?\"\n )\n\n _migrate_tracker_store_to_rasa_x(\n path_to_old_endpoints_file,\n path_to_new_endpoints_file,\n is_local,\n args.max_trackers,\n )\n","sub_path":"migrate_tracker_store_to_rasa_x.py","file_name":"migrate_tracker_store_to_rasa_x.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"516840347","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-07-18 14:35:34\n# @Author : Li Hao (howardlee_h@outlook.com)\n# @Link : https://github.com/SAmmer0\n# @Version : $Id$\n\n'''\n行情类因子\n__version__: 1.0.0\n修改日期:2017-07-20\n修改内容:\n 初始化,添加基本因子\n'''\n__version__ = '1.0.0'\n\nimport datatoolkits\nimport dateshandle\nimport fdgetter\nimport pandas as pd\nfrom ..utils import check_indexorder, Factor, check_duplicate_factorname\nfrom ..query import query\n# --------------------------------------------------------------------------------------------------\n# 常量和功能函数\nNAME = 'quote'\n\n# --------------------------------------------------------------------------------------------------\n# 功能函数\n\n\ndef get_factor_dict():\n res = dict()\n for f in factor_list:\n res[f.name] = {'rel_path': NAME + '\\\\' + f.name, 'factor': f}\n return res\n\n\n# --------------------------------------------------------------------------------------------------\n# 获取行情相关数据\n\n\ndef get_quote(data_type):\n '''\n 母函数,用于生成获取给定行情数据的函数\n '''\n sql = '''\n SELECT S.TradingDay, data_type, M.Secucode\n FROM QT_DailyQuote S, SecuMain M\n WHERE\n S.InnerCode = M.InnerCode AND\n M.SecuMarket in (83, 90) AND\n S.TradingDay <= CAST(\\'{end_time}\\' as datetime) AND\n S.TradingDay >= CAST(\\'{start_time}\\' as datetime) AND\n M.SecuCategory = 1\n ORDER BY S.TradingDay ASC, M.Secucode ASC\n '''\n price_filter = ['openprice', 'highprice', 'lowprice']\n if data_type.lower() not in price_filter:\n transed_sql = sql.replace('data_type', 'S.' + data_type)\n cols = ('time', 'data', 'code')\n else:\n transed_sql = sql.replace('data_type', 'S.PrevClosePrice, S.' + data_type)\n cols = ('time', 'prevclose', 'data', 'code')\n\n def _inner(universe, start_time, end_time):\n data = fdgetter.get_db_data(transed_sql, cols=cols, start_time=start_time,\n end_time=end_time, add_stockcode=False)\n data['code'] = data.code.apply(datatoolkits.add_suffix)\n if len(data.columns) == 4:\n data.loc[data.data == 0, 'data'] = data['prevclose']\n data.drop('prevclose', inplace=True, axis=1)\n data = data.pivot_table('data', index='time', columns='code')\n data = data.loc[:, sorted(universe)]\n assert check_indexorder(data), 'Error, data order is mixed!'\n return data\n return _inner\n\n\n# 收盘价\nclose_price = Factor('CLOSE', get_quote('ClosePrice'), pd.to_datetime('2017-07-20'))\n# 开盘价\nopen_price = Factor('OPEN', get_quote('OpenPrice'), pd.to_datetime('2017-07-20'))\n# 最高价\nhigh_price = Factor('HIGH', get_quote('HighPrice'), pd.to_datetime('2017-07-20'))\n# 最低价\nlow_price = Factor('LOW', get_quote('LowPrice'), pd.to_datetime('2017-07-20'))\n# 成交量\nto_volume = Factor('TO_VOLUME', get_quote('TurnoverVolume'), pd.to_datetime('2017-07-20'),\n desc='单位为股')\n# 成交额\nto_value = Factor('TO_VALUE', get_quote('TurnoverValue'), pd.to_datetime('2017-07-20'),\n desc='单位为元')\n\n# --------------------------------------------------------------------------------------------------\n# 复权因子\n\n\ndef get_adjfactor(universe, start_time, end_time):\n '''\n 股票的复权因子\n '''\n sql = '''\n SELECT A.ExDiviDate, A.RatioAdjustingFactor, M.SecuCode\n FROM QT_AdjustingFactor A, SecuMain M\n WHERE\n A.InnerCode = M.InnerCode AND\n M.secuMarket in (83, 90) AND\n M.SECUCATEGORY = 1\n ORDER BY M.SecuCode ASC, A.ExDiviDate ASC\n '''\n data = fdgetter.get_db_data(sql, cols=('time', 'data', 'code'), add_stockcode=False)\n data['code'] = data.code.apply(datatoolkits.add_suffix)\n by_code = data.groupby('code')\n tds = dateshandle.get_tds(start_time, end_time)\n data = by_code.apply(datatoolkits.map_data, days=tds, fromNowOn=True,\n fillna={'code': lambda x: x.code.iloc[0], 'data': lambda x: 1})\n data = data.reset_index(drop=True)\n data = data.pivot_table('data', index='time', columns='code')\n data = data.loc[:, sorted(universe)]\n assert check_indexorder(data), 'Error, data order is mixed!'\n return data\n\n\nadj_factor = Factor('ADJ_FACTOR', get_adjfactor, pd.to_datetime('2017-07-21'))\n# --------------------------------------------------------------------------------------------------\n# 股本\n\n\ndef get_shares(share_type):\n '''\n 母函数,用于生成获取给定类型股本的函数\n '''\n sql = '''\n SELECT S.share_type, S.EndDate, M.SecuCode\n FROM SecuMain M, LC_ShareStru S\n WHERE M.CompanyCode = S.CompanyCode AND\n M.SecuMarket in (83, 90) AND\n M.SecuCategory = 1\n '''\n transed_sql = sql.replace('share_type', share_type)\n\n def _inner(universe, start_time, end_time):\n data = fdgetter.get_db_data(transed_sql, cols=('data', 'time', 'code'), add_stockcode=False)\n data['code'] = data.code.apply(datatoolkits.add_suffix)\n by_code = data.groupby('code')\n tds = dateshandle.get_tds(start_time, end_time)\n data = by_code.apply(datatoolkits.map_data, days=tds, fromNowOn=True,\n fillna={'code': lambda x: x.code.iloc[0]})\n data = data.reset_index(drop=True)\n data = data.pivot_table('data', index='time', columns='code')\n data = data.loc[:, sorted(universe)]\n assert check_indexorder(data), 'Error, data order is mixed!'\n return data\n return _inner\n\n\n# 流通股本\nfloat_shares = Factor('FLOAT_SHARE', get_shares('NonResiSharesJY'), pd.to_datetime('2017-07-21'))\n# 总股本\ntotal_shares = Factor('TOTAL_SHARE', get_shares('TotalShares'), pd.to_datetime('2017-07-21'))\n# --------------------------------------------------------------------------------------------------\n# 股票市值:包含总市值和流通市值\n\n\ndef get_mktvalue(share_factor_name):\n '''\n 母函数,用于生成计算市值因子的函数\n '''\n def _inner(universe, start_time, end_time):\n share_data = query(share_factor_name, (start_time, end_time))\n close_data = query('CLOSE', (start_time, end_time))\n assert len(share_data) == len(close_data), \"Error, basic data length does not match! \" + \\\n \"share data = {sd_len}, while close data = {cd_len}\".format(sd_len=len(share_data),\n cd_len=len(close_data))\n res = share_data * close_data\n res = res.loc[:, sorted(universe)]\n return res\n return _inner\n\n\ntotal_mktvalue = Factor('TOTAL_MKTVALUE', get_mktvalue('TOTAL_SHARE'), pd.to_datetime('2017-07-24'),\n desc=\"使用收盘价计算\", dependency=['TOTAL_SHARE', 'CLOSE'])\nfloat_mktvalue = Factor('FLOAT_MKTVALUE', get_mktvalue('FLOAT_SHARE'), pd.to_datetime('2017-07-24'),\n desc=\"使用收盘价计算\", dependency=['FLOAT_SHARE', 'CLOSE'])\n\n# --------------------------------------------------------------------------------------------------\n# 后复权价格\n\n\ndef get_adjclose(universe, start_time, end_time):\n '''\n 获取后复权收盘价\n '''\n adj_factor = query('ADJ_FACTOR', (start_time, end_time))\n close_data = query('CLOSE', (start_time, end_time))\n assert len(adj_factor) == len(close_data), \"Error, basic data length does not match! \" + \\\n \"adj_factor data = {sd_len}, while close data = {cd_len}\".format(sd_len=len(adj_factor),\n cd_len=len(close_data))\n res = adj_factor * close_data\n res = res.loc[:, sorted(universe)]\n return res\n\n\nadj_close = Factor('ADJ_CLOSE', get_adjclose, pd.to_datetime('2017-07-24'),\n dependency=['CLOSE', 'ADJ_FACTOR'])\n\n# --------------------------------------------------------------------------------------------------\n# 日收益率\n\n\ndef get_dailyret(universe, start_time, end_time):\n '''\n 获取日收益率,使用后复权收盘价计算\n '''\n new_start = pd.to_datetime(start_time) - pd.Timedelta('30 day')\n data = query('ADJ_CLOSE', (new_start, end_time))\n data = data.pct_change()\n mask = data.index >= start_time\n data = data.loc[mask, sorted(universe)]\n return data\n\n\ndaily_ret = Factor('DAILY_RET', get_dailyret, pd.to_datetime('2017-07-24'),\n dependency=['ADJ_CLOSE'])\n# --------------------------------------------------------------------------------------------------\n# 换手率\n\n\ndef get_torate(universe, start_time, end_time):\n '''\n 获取换手率,使用当天交易量/流通股数来计算\n '''\n volume = query('TO_VOLUME', (start_time, end_time))\n float_shares = query('FLOAT_SHARE', (start_time, end_time))\n res = volume / float_shares\n res = res.loc[:, sorted(universe)]\n return res\n\nto_rate = Factor('TO_RATE', get_torate, pd.to_datetime('2017-07-24'),\n dependency=['TO_VOLUME', 'FLOAT_SHARE'])\n\n# --------------------------------------------------------------------------------------------------\nfactor_list = [close_price, open_price, high_price, low_price, to_value, to_volume, adj_factor,\n float_shares, total_shares, total_mktvalue, float_mktvalue, adj_close,\n daily_ret, to_rate]\ncheck_duplicate_factorname(factor_list, __name__)\n","sub_path":"fmanager/factors/basicfactors/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"611398889","text":"# 1214 Мультифакториал\nn1=input().split()\nn=int(n1[0])\nk=int(n1[1])\nres=1\nwhile n>0:\n\n res*=n\n if res>10**18:\n res=('overflow')\n break\n n-=k\nprint(res)\n","sub_path":"1000-1999/1214.py","file_name":"1214.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"287222589","text":"import socket\r\n\r\nHOST, PORT = \"localhost\", 9999\r\ndata = \"dasdsad \"\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\ntry:\r\n # Connect to server and send data\r\n sock.connect((HOST, PORT)) \r\n d = str(data + \"\\n\")\r\n sock.sendall(bytes(\"hi world\",\"utf-8\"))\r\n\r\n # Receive data from the server and shut down\r\n received = sock.recv(1024)\r\nfinally:\r\n sock.close()\r\n\r\nprint(\"Sent: {}\".format(data))\r\nprint(\"Received: {}\".format(received))","sub_path":"network_client.py","file_name":"network_client.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"218318311","text":"import unittest\nimport copy\n\nimport unipac\n\nclass TestToolchain(unittest.TestCase):\n TEST_DATA_0 = {\n \"name\" : \"toolchain\",\n \"steps\" : [\n {\n \"name\" : \"configure\",\n \"commands\" : [\n {\n \"name\" : \"basic_configure\",\n \"arguments\" : [\n {\n \"args\" : [\"echo\", \"configure\"]\n }\n ]\n }\n ]\n },\n\n {\n \"name\" : \"build\",\n \"commands\" : [\n {\n \"name\" : \"basic_build\",\n \"arguments\" : [\n {\n \"args\" : [\"echo\", \"build\"]\n }\n ]\n }\n ]\n },\n\n {\n \"name\" : \"package\",\n \"commands\" : [\n {\n \"name\" : \"basic_package\",\n \"arguments\" : [\n {\n \"args\" : [\"echo\", \"package\"]\n }\n ]\n }\n ]\n }\n\n ]\n }\n\n TEST_NEW_STEP = {\n \"name\" : \"new_step\",\n \"commands\" : [\n {\n \"name\" : \"basic_new_step\",\n \"arguments\" : [\n {\n \"args\" : [\"echo\", \"new_step\"]\n }\n ]\n }\n ]\n }\n \n \n def test_read_name(self):\n test_data = copy.deepcopy(self.TEST_DATA_0)\n toolchain = unipac.Toolchain(test_data)\n\n self.assertEquals(\"toolchain\", toolchain.name)\n\n def test_write_name(self):\n test_data = copy.deepcopy(self.TEST_DATA_0)\n toolchain = unipac.Toolchain(test_data)\n\n toolchain.name = \"new_name\"\n self.assertEquals(\"new_name\", test_data[\"name\"])\n\n def test_read_steps(self):\n test_data = copy.deepcopy(self.TEST_DATA_0)\n toolchain = unipac.Toolchain(test_data)\n\n self.assertEquals(tuple(self.TEST_DATA_0[\"steps\"]), toolchain.steps)\n\n def test_write_steps_with_data(self):\n test_data = copy.deepcopy(self.TEST_DATA_0)\n toolchain = unipac.Toolchain(test_data)\n\n toolchain.steps = [self.TEST_NEW_STEP]\n\n self.assertEquals(tuple([self.TEST_NEW_STEP]), toolchain.steps)\n\n def test_write_steps_with_command_sequence(self):\n test_data = copy.deepcopy(self.TEST_DATA_0)\n toolchain = unipac.Toolchain(test_data)\n\n toolchain.steps = [unipac.CommandSequence(self.TEST_NEW_STEP)]\n\n self.assertEquals(tuple([self.TEST_NEW_STEP]), toolchain.steps)","sub_path":"tests/test_toolchain.py","file_name":"test_toolchain.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"200216312","text":"# -*- coding: utf-8 -*-\n\nfrom django.views import View\nfrom django.http import JsonResponse, HttpResponse\nfrom django.template import loader\nfrom django.shortcuts import get_object_or_404, render\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie\nfrom django.utils.decorators import method_decorator\n\nfrom blog.util import markdown\nfrom blog.models import (Article, Tweet, REDIS_CACHE,\n CACHE_KEY_PREFIX, TWEET_KEY_PREFIX)\n\n\ndef redis_cache(func):\n def wrapper(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n cache_key = CACHE_KEY_PREFIX + kwargs.get('url', 'index')\n cache = REDIS_CACHE.get(cache_key)\n if cache:\n return HttpResponse(cache)\n return func(self, request, *args, **kwargs)\n return wrapper\n\n\nclass TitleFlow(View):\n template = 'index.html'\n\n @method_decorator(ensure_csrf_cookie)\n @redis_cache\n def get(self, request):\n articles = Article.objects.filter(status=Article.PUBLISH) \\\n .order_by('-create_time')\n html = loader.render_to_string(\n template_name=self.template,\n context={'articles': articles, 'user': request.user}\n )\n if not request.user.is_authenticated:\n cache_key = CACHE_KEY_PREFIX + 'index'\n REDIS_CACHE.set(cache_key, html, ex=60 * 60)\n return HttpResponse(html)\n\n\nclass Content(View):\n template = 'article.html'\n\n @method_decorator(ensure_csrf_cookie)\n @redis_cache\n def get(self, request, url):\n article = get_object_or_404(Article, ~Q(status=Article.BLOCK), url=url)\n article.content = markdown(article.content)\n\n previous = article.get_adjacent_article('previous')\n next_ = article.get_adjacent_article('next')\n\n html = loader.render_to_string(\n template_name=self.template,\n context={'article': article, 'user': request.user,\n 'next_url': previous.url if previous else None,\n 'previous_url': next_.url if next_ else None}\n )\n if not request.user.is_authenticated:\n cache_key = CACHE_KEY_PREFIX + url\n REDIS_CACHE.set(cache_key, html, ex=60 * 60)\n return HttpResponse(html)\n\n\nclass StatCount(View):\n stat_name = ''\n\n @method_decorator(csrf_protect)\n def post(self, request, url):\n article = get_object_or_404(Article, url=url)\n if not self.stat_name:\n raise NotImplementedError()\n value = getattr(article, self.stat_name)\n if not request.POST.get('fetch'):\n value += 1\n setattr(article, self.stat_name, value)\n article.save(update_fields=[self.stat_name])\n return JsonResponse({'data': value})\n\n\nclass FaveStat(StatCount):\n stat_name = 'fave'\n\n\nclass PageViewStat(StatCount):\n stat_name = 'page_view'\n\n\ndef view_tweets(request):\n if not request.user.is_authenticated:\n cache = REDIS_CACHE.get(TWEET_KEY_PREFIX)\n if cache:\n return HttpResponse(cache)\n tweets = Tweet.objects.all().order_by('-id')[:10]\n for tweet in tweets:\n tweet.content = markdown(tweet.content)\n html = loader.render_to_string(\n template_name='tweet.html',\n context={'tweets': tweets, 'user': request.user}\n )\n if not request.user.is_authenticated:\n REDIS_CACHE.set(TWEET_KEY_PREFIX, html, ex=60*60)\n return HttpResponse(html)\n\n\ndef view_lab(request):\n return render(request, 'lab.html')\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"252523264","text":"#!/usr/bin/python3.4\n# coding: utf-8\n\n\"\"\"\nProgramme : PServeurSocket.py version 1.0\nDate : 15-03-2018\nAuteur : Arnaud JULLIEN\n\"\"\"\n\nimport socket\nimport sys\nfrom threading import Thread\nimport time\nfrom CAcqPuissance import CAcqPuissance\nfrom socket import error as SocketError\nimport errno\n \nTCP_IP = ''\nTCP_PORT = 2019\nBUFFER_SIZE = 4096\n\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket.bind((TCP_IP, TCP_PORT))\nsocket.listen(7)\nprint(\"Le serveur est lancé. Attente de la connextion du client...\")\n\nclass Receive(Thread): #recoit un message de la part d'un client\n \n def __init__(self):\n Thread.__init__(self)\n self.socket_client = socket_client\n \n \n def run(self):\n \n \"\"\"\n recoit le message du client puis test dans un premier temps si le message commence par\n %: dans ce cas ce sera le puissance a envoyer dans l'eolienne et si le message\n commence par id ce sera l'id du scenario a aller chercher dans la base de donnees\n \"\"\" \n\n while True:\n try:\n message_recu = self.socket_client.recv(BUFFER_SIZE)\n except BlockingIOError:\n print(\"Except dans Receive\")\n pass\n else :\n message_recu = message_recu.decode()\n message_decode = message_recu[0]+message_recu[1]\n print(message_decode)\n if message_decode == \"%:\":\n intensite = message_recu.replace(\"%:\",\"\")\n print(intensite)\n return intensite\n elif message_decode == \"id\":\n id_scenario = message_recu.replace(\"id\",\"\")\n str(id_scenario)\n id_scenario = id_scenario.split(\"/\")\n print(id_scenario)\n print(\"id scenario: {}\".format(id_scenario[0]))\n print(\"id_eolienne: {}\".format(id_scenario[1]))\n return id_scenario\n else:\n print(\"Donnees incorrectes\")\n \n \n \n \n \nclass Send(Thread):\n \n def __init__(self):\n Thread.__init__(self)\n self.__mesPuis = CAcqPuissance()\n \n \n def run(self):\n \n i = 3\n while True:\n time.sleep(1)\n msg = self.__mesPuis.mesurerPuissance()\n print(msg)\n msgs = \"{}/{}\".format(i,self.__mesPuis.mesurerPuissance()/100)\n msgs = msgs.encode('UTF-8')\n i += 1\n if socket_client.send(msgs):\n print(\"ok: {}\".format(msgs))\n else:\n print(\"pas ok\")\n print(msgs)\n\nif __name__ == \"__main__\":\n mesPuis = CAcqPuissance()\n val = mesPuis.mesurerPuissance()\n #msgs = \"10/5.5\"#+str(self.__mesPuis.mesurerPuissance())\n msgs = \"10/{}\".format(val)\n msgs = msgs.encode('UTF-8')\n print(msgs) \n \n print(\"Ecoute sur le port {}\".format(TCP_PORT))\n socket_client, infos = socket.accept()\n print(\"Le client {} est connecté!\".format(infos))\n \n Receive_thread = Receive()\n Send_thread = Send()\n \n Receive_thread.start()\n Send_thread.start()\n","sub_path":"06_taches_finale/02_arnaud_jullien/Projet_eolienne/Prog_ok/CServeurSocket.py","file_name":"CServeurSocket.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"734450","text":"import numpy as np\nimport math\nfrom scipy.optimize import fsolve \n#\n# TODO : make init method for scafield\n# Sod shocktube\ndef initSod(mesh):\n\n neq = 4 \n gamma =1.4\n\n initEuler = []\n for i in range(neq):\n initEuler.append(np.zeros(len(mesh.centers()))) #test use zeros instead\n\n rhoL = 1.0\n uL = 0.0\n pL = 1.0\n eL = pL / ((gamma-1.0)*rhoL)\n EL = eL + 0.5 * uL**2\n\n rhoR = 0.125\n uR = 0.0\n pR = 0.1\n eR = pR / ((gamma-1.0)*rhoR)\n ER = eR + 0.5 * uR**2\n\n x = mesh.centers()\n xhalf = 0.5 * (x[0]+x[-1])\n\n for c in range(len(x)):\n if x[c] < xhalf:\n initEuler[0][c] = rhoL\n initEuler[1][c] = rhoL*uL\n initEuler[2][c] = rhoL*EL\n initEuler[3][c] = 0.0\n else:\n initEuler[0][c] = rhoR\n initEuler[1][c] = rhoR*uR\n initEuler[2][c] = rhoR*ER\n initEuler[3][c] = 0.0\n\n\n return initEuler\n\ndef exactSod(mesh,tf): # tf is the endtime\n\n neq = 4 \n gamma = 1.4\n\n exactEulerPdata = []\n for i in range(neq):\n exactEulerPdata.append(np.zeros(len(mesh.centers()))) #test use zeros instead\n\n gamma = 1.4 \n gm1 = gamma - 1.0\n gp1 = gamma + 1.0\n g2 = 2.0*gamma \n\n mu = math.sqrt( gm1/gp1)\n beta = gm1/g2\n\n # Initial conditions \n rho1 = 1.0\n u1 = 0.0\n p1 = 1.0\n e1 = p1 / (gm1*rho1)\n E1 = e1 + 0.5 * u1**2\n\n rho5 = 0.125\n u5 = 0.0\n p5 = 0.1\n e5 = p5 / (gm1*rho5)\n E5 = e5 + 0.5 * u5**2\n\n #speed of sound \n c1 = math.sqrt(gamma*p1/rho1)\n c5 = math.sqrt(gamma*p5/rho5)\n\n #location of the discontinuity at time t = 0 \n x = mesh.centers()\n xi = 0.5 * (x[0]+x[-1])\n\n def f(p):\n z = (p/p5-1.0) \n fact = gm1 /g2 * (c5/c1) * z / math.sqrt(1.0+gp1 /g2 * z)\n fact = (1.0 - fact)**(g2/gm1)\n fp = p1 * fact - p\n return fp\n\n p4 = fsolve(f, 0.5*(p1+p5))\n\n # resolve post shock density and velocity\n z = (p4/p5-1.0) \n gmfac1 = 0.5 *gm1/gamma\n gmfac2 = 0.5 *gp1/gamma\n\n fac = math.sqrt(1.0 + gmfac2 * z)\n\n u4 = c5 * z /(gamma * fac)\n rho4 = rho5 * (1.0 + gmfac2 * z) / (1.0 + gmfac1 * z) \n\n # shock speed\n w = c5 * fac\n\n # compute the values at foot of the rarefaction wave\n p3 = p4\n u3 = u4\n rho3 = rho1 * (p3/p1)**(1.0/gamma)\n\n # compute the position of the waves \n c3 = math.sqrt(gamma*p3/rho3)\n\n xsh = xi + w * tf # shock position\n xcd = xi + u3 * tf # contact discontinuity position\n xft = xi + (u3-c3) * tf # rarefaction foot position\n xhd = xi - c1 * tf # rarefaction head position\n\n for c in range(len(x)):\n if x[c] < xhd:\n e1 = p1 / ((gamma-1.0)*rho1)\n exactEulerPdata[0][c] = rho1\n exactEulerPdata[1][c] = u1\n exactEulerPdata[2][c] = e1\n exactEulerPdata[3][c] = p1\n elif x[c] < xft:\n u2 = 2.0 / gp1 * ( c1 + (x[c]-xi) / tf )\n fac = 1.0 - 0.5 * gm1 * u2 / c1\n rho2 = rho1 * fac**(2.0/gm1)\n p2 = p1 * fac**(2.0*gamma / gm1)\n e2 = p2 / ((gamma-1.0)*rho2)\n exactEulerPdata[0][c] = rho2\n exactEulerPdata[1][c] = u2\n exactEulerPdata[2][c] = e2\n exactEulerPdata[3][c] = p2\n elif x[c] < xcd:\n e3 = p3 / ((gamma-1.0)*rho3)\n exactEulerPdata[0][c] = rho3\n exactEulerPdata[1][c] = u3\n exactEulerPdata[2][c] = e3\n exactEulerPdata[3][c] = p3\n elif x[c] < xsh:\n e4 = p4 / ((gamma-1.0)*rho4)\n exactEulerPdata[0][c] = rho4\n exactEulerPdata[1][c] = u4\n exactEulerPdata[2][c] = e4\n exactEulerPdata[3][c] = p4\n else:\n e5 = p5 / ((gamma-1.0)*rho5)\n exactEulerPdata[0][c] = rho5\n exactEulerPdata[1][c] = u5\n exactEulerPdata[2][c] = e5\n exactEulerPdata[3][c] = p5\n\n return exactEulerPdata\n","sub_path":"lessons/euler-shocktube/sod.py","file_name":"sod.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"363880514","text":"# returns a sequence of row/column toggles to solve the puzzle\n# - Examples:\n# - [3, 7]\n# - [0, 5, 4, 7]\ndef find_solution(puzzle):\n n = len(puzzle)\n target = tuple([tuple([1 for _ in range(n)]) for _ in range(n)])\n\n def toggle(a, v):\n if v < n:\n return tuple([tuple([a[i][j] if i != v else 1 - a[i][j] for j in range(n)]) for i in range(n)])\n else:\n v -= n\n return tuple([tuple([a[i][j] if j != v else 1 - a[i][j] for j in range(n)]) for i in range(n)])\n\n q = [[tuple([tuple([puzzle[i][j] for j in range(n)]) for i in range(n)]), []]]\n visited = {q[0][0]}\n while q:\n newQ = []\n for p, path in q:\n if p == target:\n return path\n for i in range(2 * n):\n newP = toggle(p, i)\n if newP not in visited:\n visited.add(newP)\n newQ.append([newP, path + [i]])\n q = newQ\n return []\n\n\nprint(find_solution([[0, 0, 0],\n [0, 1, 0],\n [0, 0, 0]]))\n","sub_path":"codewar/2021/Solve_the_Grid_Binary_Toggling_Puzzle.py","file_name":"Solve_the_Grid_Binary_Toggling_Puzzle.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"414817179","text":"import redis\r\nimport pandas as pd\r\n\r\nr = redis.Redis(db=0)\r\nletterpoints = { \"E\":(1),\r\n \"A\":(2),\r\n \"R\":(3),\r\n \"I\":(4),\r\n \"O\":(5),\r\n \"T\":(6),\r\n \"N\":(7),\r\n \"S\":(8),\r\n \"L\":(9),\r\n \"C\":(10),\r\n \"U\":(11),\r\n \"D\":(12),\r\n \"P\":(13),\r\n \"M\":(14),\r\n \"H\":(15),\r\n \"G\":(16),\r\n \"B\":(17),\r\n \"F\":(18),\r\n \"Y\":(19),\r\n \"W\":(20),\r\n \"K\":(21),\r\n \"V\":(22),\r\n \"X\":(23),\r\n \"Z\":(24),\r\n \"J\":(25),\r\n \"Q\":(26)\r\n}\r\nnew = []\r\nword = input(\"Enter your word:\")\r\nif word != []:\r\n z = word.split('(')[0]\r\n z = word.split('-')[0]\r\n z = word.split('–')[0]\r\n z = ''.join(e for e in z if e.isalnum())\r\n z = ''.join([i for i in z if not i.isdigit()])\r\n WordCount = str(len(z))\r\n org = z\r\n score = 0\r\n for j in z:\r\n j = j.upper()\r\n try:\r\n score = score + letterpoints[j]\r\n except KeyError:\r\n score = score + 0\r\n\r\n set = r.smembers(\"ANAGRAM:{}\".format(score))\r\n\r\n listing = []\r\n for x in set:\r\n listing.append(x.decode(\"utf-8\"))\r\n keys, values = zip(*(s.split(\":\") for s in listing))\r\n df = pd.DataFrame()\r\n df['Description'] = keys\r\n df['value'] = values\r\n key = []\r\n for x in df['value']:\r\n z = x.split('(')[0]\r\n z = z.split('-')[0]\r\n z = z.split('–')[0]\r\n z = ''.join(e for e in z if e.isalnum())\r\n z = ''.join([i for i in z if not i.isdigit()])\r\n\r\n if WordCount == str(len(z)):\r\n key.append(x +\":\" +str(len(z)) + \":\" + z)\r\n new_doc = {}\r\n new_doc['original'] = x\r\n new_doc['not_original'] = z.lower()\r\n new_doc['match'] = x +\":\" +str(len(z)) + \":\" + z\r\n new.append(new_doc)\r\n\r\n org = ''.join(sorted(org.lower()))\r\n result = []\r\n for kill in new:\r\n jugs = ''.join(sorted(kill['not_original']))\r\n if jugs == org:\r\n result.append(kill)\r\n # Activ Health Enhance\r\n key = [ sub['match'] for sub in result ]\r\n # print(str(key))\r\n Redisreturnedlist = []\r\n for x in key:\r\n Redisreturnedlist.append(x.split(\":\"))\r\n\r\n flat_list = [item for sublist in Redisreturnedlist for item in sublist]\r\n\r\n\r\n requiredvalues = []\r\n requiredvalues = flat_list[::3]\r\n # Function to convert\r\n def listToString(s):\r\n # initialize an empty string\r\n str1 = \"\"\r\n\r\n # traverse in the string\r\n for ele in s:\r\n str1 += ele\r\n\r\n # return string\r\n return str1\r\n\r\n listToStringval = (listToString(requiredvalues))\r\n\r\n if (requiredvalues == [] and listToStringval == \"\"):\r\n print(\"Entered word not available in our database\")\r\n else:\r\n if (len(requiredvalues) > 1):\r\n print(\"Correct options available based on your search\")\r\n print(df[df.value.isin(requiredvalues)].to_string(index=False))\r\n else:\r\n print(\"Correct options available based on your search\")\r\n print(df[df.value == listToStringval].to_string(index=False))\r\n\r\nelse:\r\n print(\"Please enter non blank value \")","sub_path":"User Stories/User Story 14/UserStory14.py","file_name":"UserStory14.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}