diff --git "a/1192.jsonl" "b/1192.jsonl" new file mode 100644--- /dev/null +++ "b/1192.jsonl" @@ -0,0 +1,706 @@ +{"seq_id":"463459463","text":"from fastapi import APIRouter\n\nfrom app.database.models import LinkMap\n\nrouter = APIRouter()\n\n\n@router.post(\"/\", response_model=LinkMap)\nasync def create_link_map(link_map: LinkMap) -> LinkMap:\n return await link_map.save()\n\n\n@router.get(\"/\", response_model=list[LinkMap])\nasync def get_link_maps(sid: int, uid: int) -> list[LinkMap]:\n return await LinkMap.objects.all(sid=sid, uid=uid)\n\n\n@router.delete(\"/{id}\", response_model=LinkMap)\nasync def remove_link_map(id: int) -> LinkMap:\n item_db = await LinkMap.objects.get(pk=id)\n\n return {\"deleted_rows\": await item_db.delete()}\n","sub_path":"api/app/routers/v1/link_map.py","file_name":"link_map.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"130461363","text":"# -*- coding: UTF-8 -*-\n\nimport datetime\n\nclass ReportKeys:\n \"\"\"\n Key of the metrics report.\n\n :ivar name: Name of the key.\n :ivar python_type: Python type of the key.\n :ivar abbreviation: Shorter name for the key.\n :ivar lb: Lower bound on the key value to be acceptable.\n :ivar ub: Upper bound on the key value to be acceptable.\n :ivar more_is_better: Boolean true if more is better.\n \"\"\"\n\n def __init__(self, name: str, python_type: type = str, abbreviation: str = None, lb = None, ub = None,\n more_is_better: bool = True):\n \"\"\"\n\n :param name: Name of the key.\n :param python_type: Python type of the key.\n :param abbreviation: Shorter name for the key.\n :param lb: Lower bound on the key value to be acceptable.\n :param ub: Upper bound on the key value to be acceptable.\n :param more_is_better: Boolean true if more is better.\n \"\"\"\n self.name = name\n self.python_type = python_type\n self.abbreviation = abbreviation\n self.lb = lb\n self.ub = ub\n self.more_is_better = more_is_better\n\n def abbreviate(self):\n \"\"\"\n Get the abbreviation of this key.\n\n :return: String.\n :rtype: str\n \"\"\"\n return self.abbreviation if self.abbreviation is not None else self.name\n\n def to_file_name(self, new_spaces=\"_\"):\n \"\"\"\n Get a file name from the key, lower case with underscores.\n\n :param new_spaces: Replacement for spaces.\n :return: String.\n \"\"\"\n base_name = \"\".join([c for c in self.name.lower() if c.isalpha() or c.isdigit() or c == \" \"])\n base_name = base_name.replace(\" \", \" \") # Replace double spaces\n base_name = base_name.replace(\" \", new_spaces)\n return base_name\n\n def __str__(self):\n return self.name\n\n def __lt__(self, other):\n return str(self) < str(other)\n\n def __hash__(self):\n return id(self)\n\n# Result key constants\nCODE_PATH = ReportKeys('Code path', str)\nREPORT_DATE = ReportKeys('Report date', datetime.datetime, abbreviation=\"Date\")\nLINES_OF_CODE = ReportKeys('Lines of code', int, abbreviation=\"Lines\", lb=1)\nCOMMENT_RATE = ReportKeys('Documentation rate', float, abbreviation=\"/* */\", lb=0.0, ub=0.45)\nTESTS_COVERAGE = ReportKeys('Tests coverage', float, abbreviation=\"Tests\", lb=0.2, ub=1.0)\nMAINTAINABILITY_INDEX = ReportKeys('Maintainability index', float, abbreviation=\"MI\", lb=0.1 ,ub=0.5)\nAVERAGE_CYCLOMATIC_COMPLEXITY = ReportKeys('Cyclomatic complexity - average', float, abbreviation=\"CC~\",\n more_is_better=False, lb=10, ub=35)\nMAX_CYCLOMATIC_COMPLEXITY = ReportKeys('Cyclomatic complexity - maximum', int, abbreviation='CC^',\n more_is_better=False, lb=10, ub=35)\nMAX_CYCLOMATIC_COMPLEXITY_FUNCTION = ReportKeys('Max cyclomatic complexity function', str)\nCODE_STYLE = ReportKeys('Code style', float, abbreviation=\"Style\", lb=0.1, ub=0.95)\n","sub_path":"metrics/report_keys.py","file_name":"report_keys.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"322023342","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 26 15:44:31 2019\r\n\r\n@author: Jrainbow\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.measure import (label, regionprops, find_contours)\r\n#from skimage.measure import regionprops\r\nfrom skimage.io import imread\r\nfrom skimage.transform import (hough_line, hough_line_peaks,\r\n probabilistic_hough_line)\r\nfrom skimage import feature\r\nfrom scipy import ndimage\r\n#import skimage\r\n\r\ndef pad_with(vector, pad_width, iaxis, kwargs):\r\n pad_value = kwargs.get('padder', 0)\r\n vector[:pad_width[0]] = pad_value\r\n vector[-pad_width[1]:] = pad_value\r\n return vector\r\n\r\nsegment = '6_8'\r\n\r\nimg_path = rf\"O:\\tiled\\SJ7394\\buildings_masks\\buildings_{segment}.tif\"\r\nrgb_path = rf\"O:\\tiled\\SJ7394\\{segment}.tif\"\r\n\r\nimg = cv2.imread(img_path, 0)\r\nbgr = cv2.imread(rgb_path)\r\nrgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\r\n\r\n#padded = np.pad(img, 1, pad_with)\r\n\r\npadded = img\r\n\r\n#cv2.imshow(\"original\",img)\r\n#cv2.waitKey(0)\r\nlabelled_img = label(padded)\r\n#plt.imshow(img)\r\n#plt.imshow(labelled_img)\r\n\r\nnumBuildings = len(np.unique(labelled_img))\r\nprint(f'{numBuildings} buildings found')\r\n\r\nblank = np.zeros_like(padded)\r\noutput = np.zeros_like(padded)\r\n\r\n\r\ncandidate = 6\r\n\r\nprops = regionprops(labelled_img)\r\nprop = props[candidate]\r\nregion = np.where(labelled_img == candidate)\r\nblank[region] = 1\r\n\r\n\r\n# draw contours\r\ncontours = find_contours(blank, 0.8)\r\n\r\nfig, ax = plt.subplots(1,1, figsize=(15,15))\r\nax.imshow(output)\r\n\r\nfor n, contour in enumerate(contours):\r\n ax.plot(contour[:, 1], contour[:, 0], linewidth=3)\r\n ax.set_axis_off()\r\nfig.savefig('images/building_outline_1.png', bbox_inches='tight', pad_inches=0) \r\n\r\n","sub_path":"building_polygonizer_v0.py","file_name":"building_polygonizer_v0.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"582642922","text":"# -*- coding: utf-8 -*-\nimport datetime as dt\nimport random\nimport time\n\nclass Headers:\n def __init__(self, num, time_start, time_end, app):\n self.num = num\n time_start = time_start if time_start else dt.datetime.now().strftime(\"%Y%m%d\")\n time_end = time_end if time_end else dt.datetime.now().strftime(\"%Y%m%d\")\n self.time_start = dt.datetime.strptime(str(time_start), '%Y%m%d')\n self.time_end = dt.datetime.strptime(str(time_end), '%Y%m%d')\n self.inc_ms, self.time_range = self._calc_increment_ms()\n self.app = app\n\n def _calc_increment_ms(self):\n if self.time_start == self.time_end:\n time_ms_range = 24 * 60 * 60 * 1000 - 1000\n else:\n time_ms_range = (self.time_end + dt.timedelta(days=1) - self.time_start).total_seconds() * 1000 - 1000\n return time_ms_range / self.num, time_ms_range\n\n def __iter__(self):\n for seq_num in range(1, self.num + 1):\n log_ts = self.time_start + dt.timedelta(milliseconds=self.inc_ms * seq_num)\n log_ts_str = log_ts.strftime(\"%Y-%m-%d %H:%M:%S\")\n yield \"{} {} {} {} {} \".format(log_ts_str, \"127.0.0.1\", \"(1.2)\", self.app + \" : \", str(seq_num).zfill(11))\n\n\nclass BatchHeaders(Headers):\n def __init__(self, num, time_start, time_end, batch, app):\n super().__init__(num, time_start, time_end, app)\n self.batch = batch\n\n def __iter__(self):\n seq_num = 0\n for _ in range(1, self.num + 1):\n log_ts = self.time_start + dt.timedelta(milliseconds=self.inc_ms * seq_num)\n log_ts_next = self.time_start + dt.timedelta(milliseconds=self.inc_ms * seq_num)\n if log_ts.hour != log_ts_next.hour:\n for _ in range(1, self.batch):\n seq_num += 1\n if seq_num > self.num:\n break\n log_ts_str = log_ts.strftime(\"%Y-%m-%d %H:%M:%S\")\n yield \"{} {} {} {} {} \".format(log_ts_str, \"127.0.0.1\", \"(1.2)\", \"APP DELAY : \", str(seq_num).zfill(11))\n else:\n seq_num += 1\n if seq_num > self.num:\n break\n log_ts_str = log_ts.strftime(\"%Y-%m-%d %H:%M:%S\")\n yield \"{} {} {} {} {} \".format(log_ts_str, \"127.0.0.1\", \"(1.2)\", \"APP : \", str(seq_num).zfill(11))\n\n\nclass RandomHeaders(Headers):\n def __iter__(self):\n random.seed(time.clock())\n for seq_num in range(1, self.num + 1):\n log_ts = self.time_start + dt.timedelta(milliseconds=random.randint(0, self.time_range))\n log_ts_str = log_ts.strftime(\"%Y-%m-%d %H:%M:%S\")\n yield \"{} {} {} {} {} \".format(log_ts_str, \"127.0.0.1\", \"(1.2)\", \"APP : \", str(seq_num).zfill(11))\n\n\nclass SyslogHeaders(Headers):\n def __iter__(self):\n for seq_num in range(1, self.num + 1):\n log_ts = self.time_start + dt.timedelta(milliseconds=self.inc_ms * seq_num)\n log_ts_str = log_ts.strftime(\"%b %d %H:%M:%S\")\n yield \"{} {} {} {} {} \".format(\"<34>\", log_ts_str, \"127.0.0.1\", \"Syslog : \", str(seq_num).zfill(11))\n\n\nclass InfiniteSyslog:\n def __init__(self, offset=0):\n self.seq_num = 0\n self.seq_num += offset\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.seq_num += 1\n log_ts_str = dt.datetime.now().strftime(\"%b %d %H:%M:%S\")\n return \"{} {} {} {} {} \".format(\"<34>\", log_ts_str, \"127.0.0.1\", \"Syslog : \", str(self.seq_num).zfill(11))\n\n","sub_path":"devtool/log/head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"374038214","text":"import sys\r\nimport globus_sdk\r\n\r\npickled_tokens = base64.b64decode(sys.argv[0])\r\n\r\ntokens = pickle.loads(pickled_tokens)\r\nisinstance(tokens, dict)\r\n\r\ntransfer_access_token = tokens['tokens']['transfer.api.globus.org']['access_token']\r\ntransfer_authorizer = globus_sdk.AccessTokenAuthorizer(transfer_access_token)\r\n\r\ntc = globus_sdk.TransferClient(authorizer=transfer_authorizer)\r\n\r\nsource_endpoint_id = \"ebf55996-33bf-11e9-9fa4-0a06afd4a22e\"\r\nsource_path = \"/personal/rick/galaxy-training-data/\"\r\n\r\ndest_endpoint_id = \"bfe3af54-5fcc-11e9-bf34-0edbf3a4e7ee\"\r\ndest_path = \"/\"\r\n\r\nlabel = \"My tutorial transfer\"\r\n\r\n# TransferData() automatically gets a submission_id for once-and-only-once submission\r\ntdata = globus_sdk.TransferData(tc, source_endpoint_id, dest_endpoint_id, label=label)\r\ntdata.add_item(source_path, dest_path, recursive=True)\r\n\r\ntc.endpoint_autoactivate(source_endpoint_id)\r\ntc.endpoint_autoactivate(dest_endpoint_id)\r\n\r\nsubmit_result = tc.submit_transfer(tdata)\r\nprint(\"Task ID:\", submit_result[\"task_id\"])","sub_path":"src/tools/fileTransferTool/fileTransfer.py","file_name":"fileTransfer.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"219471383","text":"import tensorflow as tf\nimport numpy as np\nimport math\nimport time\nimport os\nimport glob\nimport cv2\nimport datetime\nimport scipy as sp\nfrom model_v2 import ESPCN\nfrom utils import (\n input_setup,\n checkpoint_dir,\n read_data,\n checkimage,\n imsave,\n imread,\n load_data,\n preprocess,\n modcrop\n)\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"epoch\", 250, \"Number of epoch\")\nflags.DEFINE_integer(\"steps_per_epoch\", 250, \"Steps per epoch\")\nflags.DEFINE_integer(\"image_size\", 32, \"The size of image input\")\nflags.DEFINE_integer(\"c_dim\", 3, \"The size of channel\")\nflags.DEFINE_boolean(\"is_train\", True, \"if training\")\nflags.DEFINE_integer(\"train_mode\", 1, \"0: Spatial Transformer 1: VESPSCN No MC\\\n 2: VESPCN 3: Bicubic (No Training Required) 4: SRCNN \\\n 5: Multi-Dir mode for testing mode 2 6: Multi-Dir mode \\\n for testing mode 1\")\nflags.DEFINE_integer(\"scale\", 3,\n \"the size of scale factor for pre-processing input image\")\nflags.DEFINE_integer(\"stride\", 100, \"the size of stride\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoint\",\n \"Name of checkpoint directory\")\nflags.DEFINE_float(\"learning_rate\", 1e-4, \"The learning rate\")\nflags.DEFINE_integer(\"batch_size\", 8, \"the size of batch\")\nflags.DEFINE_string(\"result_dir\", \"result\", \"Name of result directory\")\nflags.DEFINE_string(\"test_img\", \"\", \"test_img\")\nflags.DEFINE_boolean(\"load_existing_data\", True,\n \"True iff existing hf data is loaded for training/testing\")\nflags.DEFINE_string(\"job_name\", \"\", \"ps/worker\")\nflags.DEFINE_integer(\"task_index\", 0, \"task index\")\nflags.DEFINE_string(\"ps_hosts\", \"\", \"ps-task hosts in cluster\")\nflags.DEFINE_string(\"worker_hosts\", \"\", \"worker-task hosts in cluster\")\n\n\ndef prepare_data(config):\n\n # Prepares data if load_existing_data is False\n if not config.load_existing_data:\n input_setup(config)\n\n # Loads data from data_dir\n print('Loading data...')\n data_dir = checkpoint_dir(config)\n input_, label_, paths_ = read_data(data_dir, config)\n\n # Shuffles training data\n print('Shuffling data...')\n numData = np.arange(input_.shape[0])\n np.random.shuffle(numData)\n input_ = input_[numData]\n label_ = label_[numData]\n\n # Prepares frame sets for feeding into different spatial\n # transformers if training mode is 2\n if FLAGS.train_mode == 2:\n print(\"Preparing frames sets for spatial transformers...\")\n\n curr_prev_imgs = input_[:, :, :, 0:(2 * config.c_dim)]\n curr_next_imgs = np.concatenate((input_[:, :, :,\n 0:config.c_dim],\n input_[:, :, :,\n (2 * config.c_dim):\n (3 * config.c_dim)]),\n axis=3)\n\n curr_prev_imgs = tf.cast(curr_prev_imgs, tf.float32)\n curr_next_imgs = tf.cast(curr_next_imgs, tf.float32)\n label_ = tf.cast(label_, tf.float32)\n\n # Provides data in batch one at a time to tf.train.batch\n input_queue = tf.train.slice_input_producer([curr_prev_imgs, curr_next_imgs, label_], shuffle=False)\n x1, x2, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size)\n return x1, x2, y\n\n elif FLAGS.train_mode == 4:\n\n # Upscales input data using bicubic interpolation\n print('Upscaling training data using Bicubic Interpolation...')\n\n input_new = []\n for i in range(len(input_)):\n input_new.append(sp.misc.imresize(input_[i],\n (config.image_size * config.scale,\n config.image_size * config.scale), interp='bicubic'))\n input_ = np.array(input_new)\n\n input_ = tf.cast(input_, tf.float32)\n label_ = tf.cast(label_, tf.float32)\n\n # Provides data in batch one at a time to tf.train.batch\n input_queue = tf.train.slice_input_producer([input_, label_], shuffle=False)\n x1, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size)\n return x1, y\n\n else:\n input_ = tf.cast(input_, tf.float32)\n label_ = tf.cast(label_, tf.float32)\n\n # Provides data in batch one at a time to tf.train.batch\n input_queue = tf.train.slice_input_producer([input_, label_], shuffle=False)\n x1, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size)\n return x1, y\n\n\ndef stop_fn(step_context):\n step_context.request_stop()\n return None\n\n\ndef run_train_epochs(cfg, espcn, server):\n\n hooks = [tf.train.StopAtStepHook(last_step=(FLAGS.steps_per_epoch +\n (len(server.enqueue_ops)*len(server.resource_dict['worker'])) + 1)),\n tf.train.CheckpointSaverHook(checkpoint_dir=FLAGS.checkpoint_dir, save_steps=50, saver=tf.train.Saver())]\n # hooks = []\n # The MonitoredTrainingSession takes care of session initialization,\n # restoring from a checkpoint, saving to a checkpoint, and closing when done\n # or an error occurs.\n # master=\"grpc://\" + worker_hosts[FLAGS.task_index]\n # if_chief: 制定task_index为0的任务为主任务,用于负责变量初始化、做checkpoint、保存summary和复原\n # 定义计算服务器需要运行的操作。在所有的计算服务器中有一个是主计算服务器。\n # 它除了负责计算反向传播的结果,它还负责输出日志和保存模型\n\n print(\"chkpt dir: {}\".format(FLAGS.checkpoint_dir))\n print(\"hks: {}\".format(hooks))\n print(\"cgf: {}\".format(cfg))\n print(\"tsk indx T/F: {}\".format(FLAGS.task_index == 0))\n #os.chdir('C:\\\\Users\\\\XL\\\\Desktop\\\\spyn-poc')\n\n #print(\"current dir: \" + os.getcwd())\n #checkpoint_path = os.path.join(os.getcwd(), r'\\checkpoint')\n #print(\"Checkpoint path: {}\".format(checkpoint_path))\n\n with tf.train.MonitoredTrainingSession(checkpoint_dir=None,\n hooks=hooks,\n master=server.target,\n config=cfg,\n is_chief=(server.task_index == 0)\n ) as sess:\n while not sess.should_stop() and tf.train.global_step(sess, espcn.global_step) < FLAGS.steps_per_epoch:\n espcn.train(FLAGS, sess)\n\n if server.use_done_queues:\n server.signal_done(sess)\n\n sess.run_step_fn(stop_fn)\n #print('Final step: {}'.format(tf.train.global_step(sess, espcn.global_step)))\n\n\n\ndef run_ps(server): # ===================================================================================== -> Checking if the flags are valid\n\n server.join()\n\n\ndef run_worker(server):\n\n # Checks if train mode is 3 and training is on\n if FLAGS.train_mode == 3 and FLAGS.is_train:\n print('Error: Bicubic Mode does not require training')\n exit(1)\n elif FLAGS.train_mode == 5 and FLAGS.is_train:\n print('Error: Multi-Dir testing mode for Mode 2 does not require training')\n exit(1)\n elif FLAGS.train_mode == 6 and FLAGS.is_train:\n print('Error: Multi-Dir testing mode for Mode 1 does not require training')\n exit(1)\n\n with tf.device(server.device):\n\n print(server.device)\n print(FLAGS.train_mode)\n\n # Prepares data based on is_train and train_mode\n DataList = []\n\n if FLAGS.train_mode == 2:\n xx1, xx2, yy = prepare_data(FLAGS)\n DataList = [xx1, xx2, yy]\n else:\n xx1, yy = prepare_data(FLAGS)\n DataList = [xx1, yy]\n\n espcn = ESPCN(\n image_size=FLAGS.image_size,\n is_train=FLAGS.is_train,\n train_mode=FLAGS.train_mode,\n scale=FLAGS.scale,\n c_dim=FLAGS.c_dim,\n batch_size=FLAGS.batch_size,\n load_existing_data=FLAGS.load_existing_data,\n device=server.device,\n learn_rate=FLAGS.learning_rate,\n data_list=DataList)\n\n if server.use_done_queues:\n server.prepare_signal_ops()\n\n # 通过设置log_device_placement选项来记录operations 和 Tensor 被指派到哪个设备上运行\n config = tf.ConfigProto( # ================================================================================ -> Setting a configuration for the device\n allow_soft_placement=True,\n log_device_placement=False,\n device_filters=[\"/job:ps\", \"/job:worker/task:%d\" % FLAGS.task_index]\n )\n\n run_train_epochs(config, espcn, server)\n","sub_path":"scratch_dir/temp/main_v2.py","file_name":"main_v2.py","file_ext":"py","file_size_in_byte":8718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"33796016","text":"# Import the NetCDF Python interface\nfrom netCDF4 import Dataset \n# Import the Matplotlib package\nimport matplotlib \nimport matplotlib.pyplot as plt # Collection of functions that make matplotlib work like MATLAB\nfrom mpl_toolkits.basemap import Basemap # Import the Basemap toolkit\nfrom matplotlib.colors import LinearSegmentedColormap # Linear interpolation for color maps\nfrom matplotlib.patches import Rectangle # Library to draw rectangles on the plot\n#Import the Numpy package\nimport numpy as np \nfrom numpy.ma import masked_array\n# Add the GDAL library\nfrom osgeo import gdal\n# Import local functions\nfrom remap import remap # Import the Remap function\nfrom cpt_convert import loadCPT # Import the CPT convert function\nfrom headerNetcdf import getBand,convertDate # Import band and convert date function\nfrom os import listdir\nfrom os.path import isfile, join,splitext\nimport sys # Import the \"system specific parameters and functions\" module\nimport datetime # Library to convert julian day to dd-mm-yyyy\n\n\ndef plot(sistemas_convectivos_nc,glm_nc,local,extent,resolution=2):\n \n if(local=='Brasil'):\n degrees = 10\n label_fontsize = 50\n elif(local=='Sudeste'):\n degrees = 5\n label_fontsize = 8\n else:\n degrees = 2 \n resolution = 0.8 \n label_fontsize = 8\n \n g16glm = Dataset(glm_nc,'r')\n nc = Dataset(sistemas_convectivos_nc)\n\n # Get the latitude and longitude image bounds\n geo_extent = nc.variables['geospatial_lat_lon_extent']\n min_lon = float(geo_extent.geospatial_westbound_longitude)\n max_lon = float(geo_extent.geospatial_eastbound_longitude)\n min_lat = float(geo_extent.geospatial_southbound_latitude)\n max_lat = float(geo_extent.geospatial_northbound_latitude)\n\n # Choose the visualization extent (min lon, min lat, max lon, max lat)\n #extent = [-45.0, -24.5, -39.0, -20.7] \n\n # Choose the image resolution (the higher the number the faster the processing is)\n #resolution = 0.8 #2\n\n # Calculate the image extent required for the reprojection\n H = nc.variables['goes_imager_projection'].perspective_point_height\n x1 = nc.variables['x_image_bounds'][0] * H \n x2 = nc.variables['x_image_bounds'][1] * H \n y1 = nc.variables['y_image_bounds'][1] * H \n y2 = nc.variables['y_image_bounds'][0] * H \n\n # Call the reprojection funcion\n grid = remap(sistemas_convectivos_nc, extent, resolution, x1, y1, x2, y2)\n tipos = [\"todos\",\"event\",\"group\",\"flash\"]\n for formato in range(4):\n glm_variables = [False,False,False,False]\n glm_variables[formato] = True\n #print(glm_variables)\n # Read the data returned by the function ==============================================================\n # If it is an IR channel subtract 273.15 to convert to ° Celsius\n data = grid.ReadAsArray() - 273.15\n\n # Make pixels outside the footprint invisible\n data[data <= -180] = np.nan\n\n # Define the size of the saved picture =================================================================\n DPI = 150\n fig = plt.figure(figsize=(data.shape[1]/float(DPI), data.shape[0]/float(DPI)), frameon=False, dpi=DPI)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax = plt.axis('off')\n\n # Plot the Data =======================================================================================\n\n # Create the basemap reference for the Rectangular Projection\n bmap = Basemap(llcrnrlon=extent[0], llcrnrlat=extent[1], urcrnrlon=extent[2], urcrnrlat=extent[3], epsg=4326)\n\n # Draw the countries and Brazilian states shapefiles\n bmap.readshapefile('/home/cendas/GOES16_WS_Rodrigo/GLM/src/BRA_adm1','BRA_adm1',linewidth=0.50,color='#000000')\n\n if(local=='Brasil'):\n bmap.readshapefile('/home/cendas/GOES16_WS_Rodrigo/GLM/src/ne_10m_coastline','ne_10m_coastline',linewidth=0.50,color='#000000')\n\n #ne_10m_coastline\n # Draw parallels and meridians\n if(not(local=='Brasil')):\n bmap.drawparallels(np.arange(-90.0, 90.0, degrees), linewidth=0.3, dashes=[4, 4], color='white', labels=[True,False,False,True], fmt='%g', labelstyle=\"+/-\", size=8)\n bmap.drawmeridians(np.arange(0.0, 360.0, degrees), linewidth=0.3, dashes=[4, 4], color='white', labels=[True,False,False,True], fmt='%g', labelstyle=\"+/-\", size=8)\n else: \n bmap.drawparallels(np.arange(-90.0, 90.0, degrees), linewidth=0.3, dashes=[4, 4], color='white', labels=[True,False,False,True], fmt='%g', labelstyle=\"+/-\", size=30)\n bmap.drawmeridians(np.arange(0.0, 360.0, degrees), linewidth=0.3, dashes=[4, 4], color='white', labels=[True,False,False,True], fmt='%g', labelstyle=\"+/-\", size=30)\n #Split the dataset with temperatures above and below -20°C \n temp = -80\n tempAbove= masked_array(data,data=temp)\n\n # Converts a CPT file to be used in Python \n\n cptSquareRoot = loadCPT('/home/cendas/GOES16_WS_Rodrigo/GLM/src/Square Root Visible Enhancement.cpt')\n # Makes a linear interpolation\n cpt_convert_SquareRoot = LinearSegmentedColormap('cpt', cptSquareRoot) \n\n # Plot the GOES-16 channel with the converted CPT colors (you may alter the min and max to match your preference)\n plot_SquareRoot = bmap.imshow(tempAbove, origin='upper', cmap=cpt_convert_SquareRoot, vmin=-80, vmax=100) \n\n # ===================== LEGENDA ==========================\n\n # Get the unit based on the channel. If channels 1 trough 6 is Albedo. If channels 7 to 16 is BT.\n Unit = \"Cloud Top Temperature [°C]\"\n\n # Choose a title for the plot\n Title = \" Geostationary Lightning Mapper (GLM) - GOES Satellite\"\n Latitude = \"Latitude\"\n Longitude = \"Longitude\"\n\n # Add a black rectangle in the bottom to insert the image description\n lon_difference = (extent[2] - extent[0]) # Max Lon - Min Lon\n # Add the image description inside the black rectangle\n lat_difference = (extent[3] - extent[1]) # Max lat - Min lat\n if(not(local=='Brasil')):\n #Labels and its positions #\n plt.text(extent[0] + lon_difference * 0.5, extent[3] + lat_difference * 0.035,Title, horizontalalignment='center', color = 'black', size=9)\n \n plt.text(extent[0] + lon_difference * 0.5, extent[3] + lat_difference * 0.065,\" \", horizontalalignment='center', color = 'black', size=10)\n \n plt.text(extent[0] + lon_difference * 0.5, extent[1] - lat_difference * 0.11,Longitude, horizontalalignment='center',color = 'black', size=10)\n \n plt.text(extent[0] + lon_difference * 0.5, extent[1] - lat_difference * 0.15,\" \", horizontalalignment='center', color = 'black', size=18) \n \n plt.text(extent[0] - lon_difference * 0.15, extent[1] + lat_difference * 0.5 ,Latitude, verticalalignment ='center', rotation = \"vertical\", color = 'black', size=10) \n \n else:\n plt.text(extent[0] + lon_difference * 0.5, extent[3] + lat_difference * 0.035,Title, horizontalalignment='center', color = 'black', size=40)\n \n plt.text(extent[0] + lon_difference * 0.5, extent[3] + lat_difference * 0.065,\" \", horizontalalignment='center', color = 'black', size=10)\n \n plt.text(extent[0] + lon_difference * 0.5, extent[1] - lat_difference * 0.11,Longitude, horizontalalignment='center',color = 'black', size=40)\n \n plt.text(extent[0] + lon_difference * 0.5, extent[1] - lat_difference * 0.15,\" \", horizontalalignment='center', color = 'black', size=18) \n \n plt.text(extent[0] - lon_difference * 0.15, extent[1] + lat_difference * 0.5 ,Latitude, verticalalignment ='center', rotation = \"vertical\", color = 'black', size=40) \n \n # ========================================\n if(glm_variables[0]):#Todos\n # Get Events, Group and flash from Glm file\n event_lat = g16glm.variables['event_lat'][:]\n event_lon = g16glm.variables['event_lon'][:]\n\n group_lat = g16glm.variables['group_lat'][:]\n group_lon = g16glm.variables['group_lon'][:]\n\n flash_lat = g16glm.variables['flash_lat'][:]\n flash_lon = g16glm.variables['flash_lon'][:]\n\n\n # Plot events as large blue dots\n event_x, event_y = bmap(event_lon, event_lat)\n bmap.plot(event_x, event_y, 'bo', markersize=7,label='Events')\n\n # Plot groups as medium green dots\n group_x, group_y = bmap(group_lon, group_lat)\n bmap.plot(group_x, group_y, 'go', markersize=3,label='Group')\n\n # Plot flashes as small red dots\n flash_x, flash_y = bmap(flash_lon, flash_lat)\n bmap.plot(flash_x, flash_y, 'ro', markersize=1,label='Flash')\n plt.legend(fontsize=label_fontsize,loc=4)\n else:\n if(glm_variables[1]):\n # Get Events from Glm file\n event_lat = g16glm.variables['event_lat'][:]\n event_lon = g16glm.variables['event_lon'][:]\n # Plot events as large blue dots\n event_x, event_y = bmap(event_lon, event_lat)\n bmap.plot(event_x, event_y, 'bo', markersize=7,label='Events')\n if(glm_variables[2]):\n # Get Group from Glm file\n group_lat = g16glm.variables['group_lat'][:]\n group_lon = g16glm.variables['group_lon'][:]\n # Plot groups as medium green dots\n group_x, group_y = bmap(group_lon, group_lat)\n bmap.plot(group_x, group_y, 'go', markersize=3,label='Group')\n if(glm_variables[3]):\n # Get Flash from Glm file\n flash_lat = g16glm.variables['flash_lat'][:]\n flash_lon = g16glm.variables['flash_lon'][:] \n # Plot flashes as small red dots\n flash_x, flash_y = bmap(flash_lon, flash_lat)\n bmap.plot(flash_x, flash_y, 'ro', markersize=1,label='Flash')\n plt.legend(fontsize=label_fontsize,loc=4)\n #plt.show()\n Start = glm_nc[glm_nc.find('_s')+1:glm_nc.find(\"_e\")-3] \n year = int(Start[1:5])\n dayjulian = int(Start[5:8]) - 1 # Subtract 1 because the year starts at \"0\"\n dayconventional = datetime.datetime(year,1,1) + datetime.timedelta(dayjulian) # Convert from julian to conventional\n month = dayconventional.strftime('%m')\n day = dayconventional.strftime('%d')\n hour = int(Start[8:12])\n plt.savefig('/home/cendas/GOES16_WS_Rodrigo/GLM/Output/'+local+'/glm_'+ str(year)+str(month)+str(day)+'_'+ str(hour)+'_'+ tipos[formato]+'.png', dpi=DPI, pad_inches=0, transparent=True,bbox_inches='tight')\n plt.close()\n \n\n\ndef get_proper_glm(mypath,sispath): \n #mypath = \"/home/cendas/Temperature-Analysis/samples/\"\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f)) and splitext(f)[1] == '.nc']\n intervalo = sispath[sispath.find('_s')+1:sispath.find(\"_e\")-3]\n #print(intervalo)\n glmfiles = []\n for ncfile in onlyfiles:\n if(intervalo in ncfile):\n glmfiles.append(ncfile)\n glmfiles.sort()\n return (mypath+glmfiles[0])\n\n#Channel 13)\n#sis_path = \"OR_ABI-L2-CMIPF-M6C13_G16_s20203031740189_e20203031749508_c20203031750004.nc\"\n\n#GLM\n#lm_path = 'OR_GLM-L2-LCFA_G16_s20203031800000_e20203031800204_c20203031800226.nc'\nglm_folder = '/home/cendas/GOES16_WS_Rodrigo/Samples/GLM_Samples/'\nsis_path = sys.argv[1]\ntry:\n glm_path = get_proper_glm(glm_folder,sis_path)\n print(glm_path)\n #locais = {'Brasil': [-74, -34.0, -34.0, 5.30], 'Sudeste': [-53.15, -25.5, -39.0, -14.0], 'RJ': [-45.0, -24.5, -39.0, -20.7], 'Bocaina':[-47 ,-25,-42,-20], 'RegiaoSerrana': [-46,-26,-40,-19], 'RegiaoNorte': [-44,-27,-38,-21]}\n local = 'Sudeste'\n extent = [-53.15, -25.5, -39.0, -14.0]\n plot(sis_path,glm_path,local,extent)\n #for local in locais:\n # plot(sis_path,glm_path,local,locais[local])\n \n \n # If the file doesn't exists, it will create one.\n with open('/home/cendas/GOES16_WS_Rodrigo/GLM/Output/Sudeste/G16_Log.txt', 'a') as log:\n log.write(sis_path.replace('\\\\\\\\', '\\\\') + '\\n')\nexcept:\n pass ","sub_path":"GLM/src/Glm_sudeste.py","file_name":"Glm_sudeste.py","file_ext":"py","file_size_in_byte":12392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"258365666","text":"#!/usr/bin/env python3\n\"\"\"kafka-store\n\nUsage:\n ./main [options]\n ./main (-h | --help)\n\nOptions:\n -h --help Show this screen.\n -v --verbose Enable verbose output\n\n --gcloud-url Google store url for files\n --mysql-url MySQL server to write updates to\n --broker-list Kafka broker list\n --topic Consumer topic\n --group Consumer group\n --source Consumer source\n --offset-reset Reset offset on start\n\"\"\"\nimport logging\nfrom docopt import docopt\n\nfrom .handler import KafkaStoreHandler\nfrom .store import (\n GCloudStore,\n MySQLStore,\n)\nfrom smyte_pylib.kafka.loop import KafkaLoop\n\nUPLOAD_BYTES = 1024 * 1024 * 64\nUPLOAD_MAX_AGE_MS = 45 * 60 * 1000\n\n# Try fetch some Smyte-specific initialization configuration, but fall back to\n# sane defaults if they're missing\ntry:\n from pylib.config import setup_config\n from pylib.log import configure_logging\n from pylib.kafka10 import get_bootstrap_servers\nexcept ImportError:\n setup_config = lambda paths: None\n get_bootstrap_servers = lambda group: None\n def configure_logging(level):\n logging.getLogger().setLevel(level)\n\ndef main():\n arguments = docopt(__doc__)\n\n if setup_config:\n setup_config([])\n\n configure_logging(\n logging.DEBUG if arguments['--verbose'] else logging.INFO\n )\n\n group = arguments.get('--group')\n assert arguments.get('--topic')\n assert group\n\n config = {\n 'queued.max.messages.kbytes': 10 * 1024,\n }\n\n broker_list = arguments.get('--broker-list')\n if broker_list is None and get_bootstrap_servers:\n broker_list = get_bootstrap_servers(group)\n if broker_list is not None:\n config['metadata.broker.list'] = broker_list\n\n loop = KafkaLoop(\n topic=arguments.get('--topic'),\n group=arguments['--group'],\n auto_commit=False,\n offset_reset=arguments.get('--offset-reset') or 'error',\n consumer_config=config,\n )\n\n stores = []\n if arguments['--gcloud-url']:\n stores.append(\n GCloudStore(arguments['--gcloud-url'])\n )\n if arguments['--mysql-url']:\n stores.append(\n MySQLStore(arguments['--mysql-url'])\n )\n\n loop.run(KafkaStoreHandler(\n loop,\n stores=stores,\n max_age_ms=UPLOAD_MAX_AGE_MS,\n max_size=UPLOAD_BYTES,\n ))\n\nif __name__ == '__main__':\n main()\n","sub_path":"kafka_store/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"444811056","text":"from imagine import app, db\nfrom flask import render_template, request, redirect, url_for\nfrom .forms import UserEmailForm, ContactForm\nfrom .models import Email, Contact\n\n@app.route('/', methods=['GET', 'POST'])\ndef search():\n form = UserEmailForm(request.form)\n # Add some sanitizing thing.\n if request.method == 'POST' and form.validate():\n db.session.add(Email(email=str(form.email.data)))\n db.session.commit()\n\n return render_template('index.html', success=True)\n return render_template('index.html', form=form)\n\n\n@app.route('/contact', methods=['GET', 'POST'])\ndef contact():\n\tform = ContactForm(request.form)\n\tif request.method == 'POST' and form.validate():\n\t\tdb.session.add(Contact(name=str(form.name.data), email=str(form.email.data), occupation=str(form.occupation.data), idea=str(form.idea.data), information=str(form.information.data)))\n\t\tdb.session.commit()\n\n\t\treturn render_template('index.html', success=True)\n\treturn render_template('contact_us.html')","sub_path":"imagine/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"478533361","text":"#!/usr/bin/env python\nimport transducer_score\nimport sys\nimport os\n\nbatch = int(sys.argv[1])-1\n\nlanguages = ['basque', 'english', 'tagalog', 'irish']\nfolds = range(10)\n\ncount = 0\n\nfor l in languages:\n for f in folds:\n if count == batch:\n os.system('mkdir -p /export/a10/kitsing/tmp/ryanout-{}-{}/dev'.format(l,f))\n os.system('mkdir -p /export/a10/kitsing/tmp/pkl-{}-{}'.format(l,f))\n import glob\n pretrained = max(glob.glob('results/{}-{}*/transducer_fixed.pkl'.format(l,f)))\n results = transducer_score.main(train_fn='res/wicentowski_split/{}-10fold/{}/dev.uniq'.format(l,f),\n dev_fn='res/wicentowski_split/{}-10fold/{}/dev.uniq'.format(l,f),\n test_fn='res/wicentowski_split/{}-10fold/{}/dev.uniq'.format(l,f), \n folder='/export/a10/kitsing/tmp/pkl-{}-{}'.format(l,f),\n ryanout='/export/a10/kitsing/tmp/ryanout-{}-{}/dev/'.format(l,f),\n pretrained_param_pklfile=pretrained,\n nepochs=-1,\n perform_training=0,\n perform_testing=1,\n crunching=1,\n )\n count += 1\n","sub_path":"single_test_2.dev.py","file_name":"single_test_2.dev.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"28599186","text":"import webob\nimport webob.exc\n\nimport core\nimport musicdb.model.db\nimport musicdb.view.label\n\nclass LabelEditForm(core.Resource):\n\t@core.xslt\n\tdef GET(self, request, labelID):\n\t\tdb = musicdb.model.db.DB()\n\t\tl = db.label.get_by_id(labelID)\n\t\tlx = db.label.get()\n\t\treturn webob.Response(body=musicdb.view.label.label_form(lx, l))\n\n\t@core.authenticated('EditData')\n\tdef POST(self, request, labelID):\n\t\tdb = musicdb.model.db.DB()\n\t\tif 'delete' in request.POST:\n\t\t\tdb.label.delete(labelID)\n\t\t\traise webob.exc.HTTPOk()\n\t\telif 'update' in request.POST:\n\t\t\tdb.label.update(labelID, \n request.POST['LabelName'], \n request.POST['ParentID'] if request.POST['ParentID'] != u'\\u2002' else None)\n\t\t\traise webob.exc.HTTPOk()\n\t\telse:\n\t\t\traise webob.exc.HTTPNotImplemented(labelID + ': ' + repr(request.POST))\n\nclass LabelAddForm(core.Resource):\n\t@core.xslt\n\tdef GET(self, request):\n\t\tdb = musicdb.model.db.DB()\n\t\tlx = db.label.get()\n\t\treturn webob.Response(body=musicdb.view.label.label_form(lx))\n\n\t@core.authenticated('EditData')\n\tdef POST(self, request):\n\t\tdb = musicdb.model.db.DB()\n\t\tdb.label.add(\n request.POST['LabelName'], \n request.POST['ParentID'] if request.POST['ParentID'] != u'\\u2002' else None)\n\t\traise webob.exc.HTTPCreated()\n\nclass LabelRoot(core.Resource):\n\t@core.xslt\n\tdef GET(self, request):\n\t\tdb = musicdb.model.db.DB()\n\t\tl = db.label.get()\n\t\treturn webob.Response(body=musicdb.view.label.label_list(l))\n\n","sub_path":"musicdb/ctrl/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"146047756","text":"from tinydb import Query, TinyDB, where\ndb = TinyDB('db.json')\n\ndef get_books_dict():\n return db.all()\n\ndef add_or_update(id, name):\n Book = Query()\n search_result = db.search(Book.id == id)\n assert(len(search_result) <= 1)\n if not search_result:\n db.insert({'id': id, 'name': name})\n return True\n else:\n db.update({'name': name}, Book.id == id)\n return False\n\ndef remove_entry(id):\n db.remove(where('id') == id)\n\n","sub_path":"gdrivesync/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"406818590","text":"class Solution:\n def rob(self, nums: [int]) -> int:\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n\n dp = [0] * len(nums)\n for idx, num in enumerate(nums):\n if idx == 0:\n dp[idx] = num\n continue\n\n if idx == 1:\n dp[idx] = max(dp[0], num)\n continue\n\n dp[idx] = max(dp[idx - 2] + num, dp[idx - 1]) \n\n return max(dp[-1], dp[-2]) if len(dp) >= 2 else dp[-1]\n\nprint(Solution().rob([1,2]))\n","sub_path":"Solutions/198HouseRobber.py","file_name":"198HouseRobber.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"360918174","text":"import pymysql\nimport time\nimport contextlib\n\n\n# 定义上下文管理器,连接后自动关闭连接\n@contextlib.contextmanager\ndef mysql(host='123.206.227.74', port=3306, user='root', password='exue2017', db='sit_exue_resource', charset='utf8'):\n conn = pymysql.connect(host=host, port=port, user=user, passwd=password, db=db, charset=charset)\n cur = conn.cursor(cursor=pymysql.cursors.DictCursor)\n try:\n yield cur\n finally:\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef get_max(sql):\n with mysql() as cur:\n cur.execute(sql)\n return int(cur.fetchone().get('data'))\n\n\ndef get_subject_info(subject_key):\n with mysql() as cur:\n cur.execute('select * from t_res_subject WHERE summary_key = \"{subject_key}\"'.format(subject_key=subject_key))\n return cur.fetchone()\n\n\nmax_book_id = get_max(\"SELECT MAX(book_id) as data from t_res_book\") + 100\nmax_unit_id = get_max(\"SELECT MAX(unit_id) as data from t_res_units\") + 2000\nmax_chapter_id = get_max(\"SELECT MAX(chapter_id) as data from t_res_chapter\") + 4000\ncreate_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\nsql_book = \"INSERT INTO t_res_book (`book_id`, `book_name`, `book_version`, `edition_id`, `subject_code`, `subject_name`, `create_time`, `finish`) \" \\\n \"VALUES ('{book_id}', '{book_name}', '{book_version}', '{edition_id}', '{subject_code}', '{subject_name}', \" \\\n \"'{create_time}', '1');\"\nsql_grade_book = \"INSERT INTO t_res_graduate_book (`book_id`, `grade`, `semester`, `create_time`) \" \\\n \"VALUES ('{book_id}', '{grade}', '2', '{create_time}');\"\nsql_unit = \"INSERT INTO t_res_units (`unit_id`, `unit_name`, `book_id`, `create_time`) \" \\\n \"VALUES ('{unitId}', '{unitName}', '{bookId}', '{create_time}');\"\nsql_chapter = \"INSERT INTO t_res_chapter (`chapter_id`, `chapter_name`, `unit_id`, `book_id`, `summary_key`, `create_time`, `finish`) \" \\\n \"VALUES ('{chapter_id}', '{chapter_name}', '{unit_id}', '{book_id}', '{summary_key}', '{create_time}', '1');\"\n\n\ndef insert_sql(index, summary_key, grade, book_name, book_version, edition_id):\n f_write = open(\"%s_sql.txt\" % book_name, mode=\"w\", encoding=\"utf8\")\n book_id = max_book_id + index\n unit_id = max_unit_id + index * 50\n chapter_id = max_chapter_id + index * 100\n subject = get_subject_info(subject_key=summary_key)\n f_write.write(\n sql_book.format(book_id=book_id, book_name=book_name, book_version=book_version, edition_id=edition_id,\n subject_code=subject.get('summary_code'), subject_name=subject.get('summary_name'),\n create_time=create_time) + '\\n')\n f_write.write(sql_grade_book.format(book_id=book_id, grade=grade, create_time=create_time) + '\\n') # 年级与课本关系\n f = open(book_name + \".txt\", mode=\"r\", encoding=\"utf8\")\n unit = []\n data = f.readlines()\n f.close()\n for index, line in enumerate(data):\n if \"_*_\" in line:\n unit.append(index)\n unit.append(len(data))\n for index, item in enumerate(unit[:-1]):\n f_write.write(sql_unit.format(unitId=unit_id, unitName=data[item].strip().replace('_*_', ''),\n bookId=book_id, create_time=create_time) + '\\n') # 单元\n chapters = data[item + 1:unit[index + 1]]\n for chapter in chapters:\n f_write.write(sql_chapter.format(chapter_id=chapter_id, chapter_name=chapter.lstrip().strip(),\n unit_id=unit_id, book_id=book_id, summary_key=summary_key,\n create_time=create_time) + '\\n')\n chapter_id += 1\n unit_id += 1\n\n\ndef main():\n # 序号,学科key,年级数字,书名,出版社,出版社id\n insert_sql(1, \"yw\", \"1\", \"语文S版(新版)一年级下\", \"语文出版社\", \"155199\")\n insert_sql(2, \"yw\", \"1\", \"语文S版(新版)一年级上\", \"语文出版社\", \"155199\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"工作/添加课本/0607/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"46892388","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\nimport requests\n\n\n\n#*\n\n\n\n\ndef pixivToName(pixivurl):\n\theaders={'http-equiv':'Content-Type',\\\n\t'content':'text/html',\\\n\t'charset':'utf-8',\\\n\t'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36))'}\n\tr = requests.get(pixivurl,headers=headers)\n\tprint(u''.join(r.text))\n\n#\tprint r.encoding\n\n\ntesturl=\"https://www.pixiv.net/member_illust.php?mode=medium&illust_id=62743924\"\npixivToName(testurl)\n","sub_path":"src/pixivToName.py","file_name":"pixivToName.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"374120539","text":"import os\nimport pytest\nimport types\nimport virtool.jobs.build_index\n\n\n@pytest.fixture\ndef fake_otus():\n return [\n {\n \"_id\": \"foo\",\n \"isolates\": [\n {\n \"id\": \"foo_1\",\n \"default\": True,\n \"sequences\": [\n {\n \"_id\": \"1\",\n \"sequence\": \"AGAGGATAGAGACACA\"\n },\n {\n \"_id\": \"2\",\n \"sequence\": \"GGGTAGTCGATCTGGC\"\n }\n ]\n },\n {\n \"id\": \"foo_2\",\n \"default\": False,\n \"sequences\": [\n {\n \"_id\": \"3\",\n \"sequence\": \"TTTAGAGTTGGATTAC\",\n \"default\": True\n },\n {\n \"_id\": \"4\",\n \"sequence\": \"AAAGGAGAGAGAAACC\",\n \"default\": True\n }\n ]\n },\n ]\n },\n {\n \"_id\": \"bar\",\n \"isolates\": [\n {\n \"id\": \"bar_1\",\n \"default\": True,\n \"sequences\": [\n {\n \"_id\": \"5\",\n \"sequence\": \"TTTGAGCCACACCCCC\"\n },\n {\n \"_id\": \"6\",\n \"sequence\": \"GCCCACCCATTAGAAC\"\n }\n ]\n }\n ]\n }\n ]\n\n\n@pytest.fixture\ndef mock_job(tmpdir, mocker, request, dbs, test_db_connection_string, test_db_name):\n tmpdir.mkdir(\"references\").mkdir(\"foo\")\n\n settings = {\n \"data_path\": str(tmpdir),\n \"db_name\": test_db_name\n }\n\n dbs.references.insert_one({\n \"_id\": \"foo\",\n \"data_type\": \"genome\"\n })\n\n dbs.jobs.insert_one({\n \"_id\": \"foobar\",\n \"task\": \"build_index\",\n \"args\": {\n \"index_id\": \"bar\",\n \"ref_id\": \"foo\"\n },\n \"proc\": 2,\n \"mem\": 8\n })\n\n queue = mocker.Mock()\n\n job = virtool.jobs.build_index.Job(\n test_db_connection_string,\n test_db_name,\n settings,\n \"foobar\",\n queue\n )\n\n job.init_db()\n\n return job\n\n\n@pytest.mark.parametrize(\"data_type\", [\"genome\", \"barcode\"])\ndef test_check_db(dbs, data_type, tmpdir, mock_job):\n \"\"\"\n Test that method provides the required parameters and that `data_type` is derived correctly.\n\n \"\"\"\n dbs.references.update_one({\"_id\": \"foo\"}, {\n \"$set\": {\n \"data_type\": data_type\n }\n })\n\n mock_job.check_db()\n\n assert mock_job.params == {\n \"data_type\": data_type,\n \"index_id\": \"bar\",\n \"index_path\": os.path.join(str(tmpdir), \"references/foo/bar\"),\n \"ref_id\": \"foo\",\n \"reference_path\": os.path.join(str(tmpdir), \"references/foo\")\n }\n\n\ndef test_mk_index_dir(dbs, tmpdir, mock_job):\n \"\"\"\n Test that index dir is created successfully.\n\n \"\"\"\n mock_job.check_db()\n assert not os.path.exists(mock_job.params[\"index_path\"])\n\n # Path exists after `mk_index_dir` runs.\n mock_job.mk_index_dir()\n assert os.path.exists(mock_job.params[\"index_path\"])\n\n\ndef test_get_patched_otus(mocker, dbs):\n m = mocker.patch(\"virtool.db.sync.patch_otu_to_version\", return_value=(None, {\"_id\": \"foo\"}, None))\n\n manifest = {\n \"foo\": 2,\n \"bar\": 10,\n \"baz\": 4\n }\n\n settings = {\n \"data_path\": \"foo\"\n }\n\n patched_otus = virtool.jobs.build_index.get_patched_otus(\n dbs,\n settings,\n manifest\n )\n\n assert isinstance(patched_otus, types.GeneratorType)\n\n assert list(patched_otus) == [\n {\"_id\": \"foo\"},\n {\"_id\": \"foo\"},\n {\"_id\": \"foo\"}\n ]\n\n m.assert_has_calls([\n mocker.call(dbs, settings, \"foo\", 2),\n mocker.call(dbs, settings, \"bar\", 10),\n mocker.call(dbs, settings, \"baz\", 4)\n ])\n\n\n@pytest.mark.parametrize(\"data_type\", [\"genome\", \"barcode\"])\ndef test_get_sequences_from_patched_otus(data_type, mocker, snapshot, dbs, fake_otus):\n sequence_otu_dict = dict()\n\n sequences = virtool.jobs.build_index.get_sequences_from_patched_otus(\n fake_otus,\n data_type,\n sequence_otu_dict\n )\n\n assert isinstance(sequences, types.GeneratorType)\n\n snapshot.assert_match(list(sequences))\n snapshot.assert_match(sequence_otu_dict)\n\n\ndef test_remove_unused_index_files(tmpdir):\n \"\"\"\n Test that all and only non-active indexes are removed.\n\n \"\"\"\n active_index_ids = [\n \"foo\",\n \"baz\"\n ]\n\n index_ids = [\n \"foo\",\n \"bar\",\n \"baz\",\n \"boo\"\n ]\n\n for index_id in index_ids:\n tmpdir.mkdir(index_id).join(\"test.fa\").write(\"hello world\")\n\n for index_id in index_ids:\n assert os.listdir(os.path.join(str(tmpdir), index_id)) == [\"test.fa\"]\n\n virtool.jobs.build_index.remove_unused_index_files(\n str(tmpdir),\n active_index_ids\n )\n\n assert set(os.listdir(str(tmpdir))) == set(active_index_ids)\n\n for index_id in active_index_ids:\n assert os.listdir(os.path.join(str(tmpdir), index_id)) == [\"test.fa\"]\n\n\ndef test_write_sequences_to_file(snapshot, tmpdir):\n sequences = [\n {\n \"_id\": \"foo\",\n \"sequence\": \"ATTGAGAGATAGAGACAC\"\n },\n {\n \"_id\": \"bar\",\n \"sequence\": \"GGGTACGAGTTTCTATCG\"\n },\n {\n \"_id\": \"baz\",\n \"sequence\": \"GGCTTCGGACTTTTTTCG\"\n }\n ]\n\n path = os.path.join(str(tmpdir), \"output.fa\")\n\n virtool.jobs.build_index.write_sequences_to_file(path, sequences)\n\n with open(path, \"r\") as f:\n snapshot.assert_match(f.read())\n","sub_path":"tests/jobs/test_build_index.py","file_name":"test_build_index.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"490368266","text":"# Roll No.:2018201\r\n# Name:Tushar\r\n# Section:A\r\n# Group:1\r\n#CSE-IP HW-2\r\n#K-Map minimization\r\n\r\n\r\n\r\ndef stringtolist():\r\n\r\n \"\"\"Changes the input strings into two different lists (i.e. minterms and dontcares\r\n Precondition:strings are of type m0,m1,m2,...,mn and d0,d1,d2,...dn.\"\"\"\r\n global nvar,minterms, dontcares\r\n\r\n minterms=minterms.replace(\",\",\" \") #replacing comma (,) with spaces\r\n dontcares=dontcares.replace(\",\",\" \") #replacing comma (,) with spaces\r\n minterms=minterms.split() #spliiting minterms string into minterms list\r\n dontcares=dontcares.split() #spliiting dontcares string into dontcares list\r\n minterms=list(map(int,minterms)) #converting data type from string to int\r\n dontcares=list(map(int,dontcares)) #converting data type from string to int\r\n\r\n\r\n\r\ndef dectobin():\r\n \"\"\"Converts the element of minterms and dontcares into binary and appends into a new list binmin\"\"\"\r\n global minterms,dontcares,binmin\r\n binmin=[] #creating an empty list\r\n\r\n minterms.extend(dontcares) #extending the minterms list\r\n for c in minterms:\r\n binm =((bin(c)[2:])) #using bin function to get the binary of the integer\r\n if len(binm)==1:\r\n binm=\"000\"+ binm\r\n if len(binm)==2:\r\n binm=\"00\"+ binm\r\n if len(binm)==3:\r\n binm=\"0\"+ binm #making all the binary number of 4 bits\r\n binmin.append(binm) #appending binary numbers in the binmin list\r\n\r\n\r\ndef prime2reducer():\r\n \"\"\"Creates Prime implicants of size 2 and keeps record of minterms ,the prime implicants are made of\"\"\"\r\n global binmin,bindont,prime2,prime2dec\r\n prime2=[] #keeps the binary representatiion of prime implicants\r\n prime2dec=[] #keeps the minterms in decimal of which the respective prime implicant is made of\r\n\r\n\r\n for i in range (len(binmin)):\r\n y=0 #Flag to catch the terms which are present in no prime implicant\r\n for j in range (len(binmin)):\r\n z = 0 #Flag to catch that two binary numbers differ at only one place\r\n for k in range (0,4):\r\n\r\n\r\n\r\n if binmin[i][k]!=binmin[j][k]: #comparing bits of the of the binary numbers\r\n x=k #index of the different bit\r\n\r\n z=z+1\r\n\r\n if z == 1: #checking if they differ only by 1\r\n y=y+1\r\n\r\n if x==0:\r\n r=\"-\"+binmin[i][1:] #replacing the different bit with \"-\"\r\n elif x==3:\r\n r=binmin[i][:3]+\"-\" #replacing the different bit with \"-\"\r\n else:\r\n r=binmin[i][:x]+\"-\"+binmin[i][x+1:] #replacing the different bit with \"-\"\r\n if r in prime2: #checking if prime implicant already present in list\r\n prime2=prime2\r\n else:\r\n prime2.append(r) #appending the prime implicant if not present\r\n prime2dec.append([minterms[i],minterms[j]]) #appending the list of respective minterms of prime implicants\r\n\r\n if y==0: #if any binary term isn't icluded in a prime implicant\r\n prime2.append(binmin[i]) #appending binary term in the list\r\n prime2dec.append([minterms[i]]) #appending its respective minterm\r\n\r\n\r\n\r\ndef prime4reducer():\r\n \"\"\"Creates Prime implicants of size 4 and keeps record of minterms ,the prime implicants are made of\"\"\"\r\n global prime2,prime4,prime4dec\r\n prime4=[] #keeps the binary representatiion of prime implicants\r\n prime4dec=[] #keeps the minterms in decimal of which the respective prime implicant is made of\r\n\r\n\r\n for i in range(len(prime2)):\r\n y = 0 #Flag to catch the terms which are present in no prime implicant of size 4\r\n for j in range(len(prime2)):\r\n z = 0 #Flag to catch that two binary numbers differ at only one place\r\n for k in range(0, 4):\r\n\r\n if prime2[i][k] != prime2[j][k]: #comparing bits of the of the binary numbers\r\n x = k #index of the different bit\r\n\r\n z = z + 1\r\n\r\n if z == 1: #checking if they differ only by 1\r\n y = y + 1\r\n\r\n if x == 0:\r\n r = \"-\" + prime2[i][1:] #replacing the different bit with \"-\"\r\n elif x == 3:\r\n r = prime2[i][:3] + \"-\" #replacing the different bit with \"-\"\r\n else:\r\n r = prime2[i][:x] + \"-\" + prime2[i][x + 1:] #replacing the different bit with \"-\"\r\n if r in prime4: #checking if prime implicant already present in list\r\n prime4 = prime4\r\n else:\r\n prime4.append(r) #appending the prime implicant if not present\r\n prime4dec.append(prime2dec[i]+prime2dec[j]) #appending the list of respective minterms of prime implicants\r\n\r\n if y == 0: #if any binary term or prime implicant of size 2 isn't icluded in a prime implicant of size 4\r\n prime4.append(prime2[i]) #appending prime implicant of size 2 and binary terms in the list\r\n prime4dec.append(prime2dec[i]) #appending its respective minterms\r\n\r\n\r\n\r\n\r\ndef prime8reducer():\r\n \"\"\"Creates Prime implicants of size 8 and keeps record of minterms ,the prime implicants are made of\"\"\"\r\n global prime4,prime8,prime8dec\r\n prime8=[] #keeps the binary representatiion of prime implicants\r\n prime8dec=[] #keeps the minterms in decimal of which the respective prime implicant is made of\r\n\r\n for i in range(len(prime4)):\r\n y = 0 #Flag to catch the terms which are present in no prime implicant of size 4\r\n for j in range(len(prime4)):\r\n z = 0 #Flag to catch that two binary numbers differ at only one place\r\n for k in range(0, 4):\r\n\r\n if prime4[i][k] != prime4[j][k]: #comparing bits of the of the binary numbers\r\n x = k #index of the different bit\r\n\r\n z = z + 1\r\n\r\n if z == 1: #checking if they differ only by 1\r\n y = y + 1\r\n\r\n if x == 0:\r\n r = \"-\" + prime4[i][1:] #replacing the different bit with \"-\"\r\n elif x == 3:\r\n r = prime4[i][:3] + \"-\" #replacing the different bit with \"-\"\r\n else:\r\n r = prime4[i][:x] + \"-\" + prime4[i][x + 1:] #replacing the different bit with \"-\"\r\n if r in prime8: #checking if prime implicant already present in list\r\n prime8 = prime8\r\n else:\r\n prime8.append(r) #appending the prime implicant if not present\r\n prime8dec.append(prime4dec[i] + prime4dec[j]) #appending the list of respective minterms of prime implicants\r\n\r\n if y == 0: #if any binary term or prime implicant of size 2 or prime implicant of size 4 isn't icluded in a prime implicant of size 8\r\n prime8.append(prime4[i]) #appending that term in the list\r\n prime8dec.append(prime4dec[i]) #appending its respective minterms\r\n\r\n\r\n\r\n\r\ndef prime16reducer():\r\n \"\"\"Creates Prime implicants of size 16 and keeps record of minterms ,the prime implicants are made of\"\"\"\r\n global prime8,prime16,prime16dec\r\n prime16=[] #keeps the binary representatiion of prime implicants\r\n prime16dec=[] #keeps the minterms in decimal of which the respective prime implicant is made of\r\n\r\n\r\n for i in range(len(prime8)):\r\n y = 0 #Flag to catch the terms which are present in no prime implicant of size 16\r\n for j in range(len(prime8)):\r\n z = 0 #Flag to catch that two binary numbers differ at only one place\r\n for k in range(0, 4):\r\n\r\n if prime8[i][k] != prime8[j][k]: #comparing bits of the of the binary numbers\r\n x = k #index of the different bit\r\n\r\n z = z + 1\r\n\r\n if z == 1: #checking if they differ only by 1\r\n y = y + 1\r\n\r\n if x == 0:\r\n r = \"-\" + prime8[i][1:] #replacing the different bit with \"-\"\r\n elif x == 3:\r\n r = prime8[i][:3] + \"-\" #replacing the different bit with \"-\"\r\n else:\r\n r = prime8[i][:x] + \"-\" + prime8[i][x + 1:] #replacing the different bit with \"-\"\r\n if r in prime16: #checking if prime implicant already present in list\r\n prime16 = prime16\r\n else:\r\n prime16.append(r) #appending the prime implicant if not present\r\n prime16dec.append(prime8dec[i]+prime8dec[j]) #appending its respective minterms\r\n\r\n if y == 0: #if any binary term or prime implicant of size 2,4,8 isn't icluded in a prime implicant of size 4\r\n prime16.append(prime8[i]) #appending that term in the list\r\n prime16dec.append(prime8dec[i]) #appending its respective minterms\r\n\r\ndef dontcareimplemt(string1,string1dec):\r\n \"\"\"Implemets the whole don't care thing and removes the redundant don't care terms\r\n string1 is list of prime implicants.\r\n string1dec is the list of minterms of the respective prime implicants\"\"\"\r\n\r\n p1 = -1 # index in string1dec\r\n for c in string1dec:\r\n p1 = p1 + 1\r\n p = 0;z=0 #flags for keeping count of don't cares and repeated minterms in a prime implicant\r\n for i in range(len(c)):\r\n y = 0;u=0 #flags for making sure they don't get counted more than once\r\n for k in range(len(string1dec)):\r\n\r\n if c[i] in dontcares: #if found element in dontcares\r\n y = y + 1\r\n if y == 1: #if counted once\r\n p = p + 1 #increase p by 1\r\n elif c!=string1dec[k]: # if current minterms is not equal to selected minterms\r\n if c[i] in string1dec[k]: #if found element in minterms list\r\n u=u+1\r\n if u==1: #if counted once\r\n z=z+1 #increase z by 1\r\n\r\n\r\n if p >= 1 and p+z== len(c):\r\n string1dec[p1] = [\"*\"] #replacing redundant don'tcare terms in minterm list\r\n string1[p1] = [\"*\"] #replacing redundant don'tcare terms in prime implicant list\r\n\r\n\r\n\r\n\r\n\r\ndef removeredundant(string2,string2dec):\r\n \"\"\" removes the redundant terms\r\n string2 is list of prime implicants.\r\n string2dec is the list of minterms of the respective prime implicants\"\"\"\r\n q = -1 # index of string2dec\r\n for c in string2dec:\r\n q = q + 1\r\n flag = [] #flag list to keep count how many times respective terms are repeated\r\n for i in range(len(c)):\r\n coun = 0 #counts the no. of times a particular minterm is repeated\r\n for k in range(len(string2dec)):\r\n coun += string2dec[k][:].count(c[i])\r\n flag.append(coun) #appending count to the list\r\n di = 0 #for counting how many terms are repeated more than once\r\n for c in flag:\r\n if c > 1:\r\n di = di + 1\r\n if di == len(flag): #if all the terms are repeated more than once\r\n string2dec[q] = [\"*\"] #replacing redundant don'tcare terms in minterm list\r\n string2[q] = [\"*\"] #replacing redundant don'tcare terms in prime implicant list\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef var2outp():\r\n \"\"\"Removes[\"*\"] terms and does formatting of the ouput for 2 vriable K map \"\"\"\r\n global output, prime4, prime4dec,prime41dec,prime42dec,prime41,prime42,stringOut\r\n prime41dec=[] #new lists to hold prime implicats and minterms after removing redundant terms\r\n prime42dec=[]\r\n prime41 = []\r\n prime42 = []\r\n dontcareimplemt(prime4,prime4dec) #removing redundant don't care terms\r\n\r\n\r\n for c in prime4dec: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime41dec.append(c) #append to the new list\r\n for c in prime4: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime41.append(c) #append to the new list\r\n\r\n removeredundant(prime41,prime41dec) #removing redundant terms\r\n\r\n\r\n\r\n\r\n for c in prime41dec: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime42dec.append(c) #append to the new list\r\n for c in prime41: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime42.append(c) #append to the new list\r\n if prime42 == [] : #if resultant prime implicant list is empty\r\n output.append(\"0\") #output is zero\r\n\r\n\r\n else:\r\n for c in prime42:\r\n\r\n if c[2:]!=\"--\": #if last two bits are not dashes\r\n if c[2]==\"0\": #if 3rd bit is 0\r\n bit=\"w'\"\r\n elif c[2]==\"1\": # if 3rd bit is 1\r\n bit=\"w\"\r\n elif c[2]== \"-\": #if 3rd bit is \"-\"\r\n bit=\"\"\r\n if c[3]==\"0\":\r\n bit=bit + \"x'\"\r\n elif c[3]==\"1\":\r\n bit=bit + \"x\"\r\n elif c[3]==\"-\":\r\n bit=bit+\"\"\r\n\r\n output.append(bit) #bit is the character represntation of 1 minterm\r\n\r\n\r\n else:\r\n output.append(\"1\") #output is 1 if last two places of minterm are the dashesm.\r\n output.sort() # sorting the terms lexographically\r\n for c in output:\r\n if stringOut == \"\":\r\n stringOut = c\r\n else:\r\n stringOut = stringOut + \"+\" + c # joining results of the different minterms\r\n\r\n\r\n\r\n\r\n\r\ndef var3outp():\r\n \"\"\"Removes[\"*\"] terms and does formatting of the ouput for 3 vriable K map \"\"\"\r\n global output, prime8, prime8dec,prime81dec,prime81,prime82dec,prime82,stringOut\r\n prime81dec = [] #new lists to hold prime implicats and minterms after removing redundant terms\r\n prime81=[]\r\n prime82=[]\r\n prime82dec = []\r\n dontcareimplemt(prime8,prime8dec) #removing redundant don't care terms\r\n for c in prime8dec: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime81dec.append(c) #append to the new list\r\n for c in prime8: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime81.append(c) #append to the new list\r\n removeredundant(prime81,prime81dec) #removing redundant terms\r\n for c in prime81dec: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime82dec.append(c) #append to the new list\r\n for c in prime81: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime82.append(c) #append to the new list\r\n if prime82 == []: #if resultant prime implicant list is empty\r\n output.append(\"0\") #output is zero\r\n else:\r\n for c in prime82:\r\n if c[1:] != \"---\": #if last three bits are not dashes\r\n if c[1] == \"0\": #if 2nd bit is 0\r\n bit = \"w'\"\r\n elif c[1] == \"1\": #if 2nd bit is 1\r\n bit = \"w\"\r\n elif c[1] == \"-\": #if 2nd bit is -\r\n bit = \"\"\r\n if c[2] == \"0\":\r\n bit = bit + \"x'\"\r\n elif c[2] == \"1\":\r\n bit = bit + \"x\"\r\n elif c[2] == \"-\":\r\n bit = bit + \"\"\r\n if c[3] == \"0\":\r\n bit = bit + \"y'\"\r\n elif c[3] == \"1\":\r\n bit = bit + \"y\"\r\n elif c[3] == \"-\":\r\n bit = bit + \"\"\r\n\r\n output.append(bit) #bit is the character represntation of 1 minterm\r\n\r\n # Formatting in terms of a ,b and c instead of 0,1 and -.\r\n else:\r\n output.append(\"1\") #output is 1 if last three places of minterm are the dashes.\r\n output.sort() # sorting the terms lexographically\r\n for c in output:\r\n if stringOut == \"\":\r\n stringOut = c\r\n else:\r\n stringOut = stringOut + \"+\" + c # joining results of the different minterms\r\n\r\n\r\n\r\n\r\n\r\ndef var4outp():\r\n \"\"\"Removes[\"*\"] terms and does formatting of the ouput for 4 vriable K map \"\"\"\r\n global output,prime16,prime16dec,prime161dec,prime162dec,prime151,prime162,stringOut\r\n prime161dec = [] #new lists to hold prime implicats and minterms after removing redundant terms\r\n prime161 = []\r\n prime162 = []\r\n prime162dec = []\r\n dontcareimplemt(prime16,prime16dec) #removing redundant don't care terms\r\n\r\n for c in prime16dec: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime161dec.append(c) #append to the new list\r\n for c in prime16: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime161.append(c) #append to the new list\r\n\r\n removeredundant(prime161,prime161dec) #removing redundant terms\r\n for c in prime161dec: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime162dec.append(c) #append to the new list\r\n for c in prime161: #removing [\"*\"] terms and appending them to a new list\r\n if c!=[\"*\"]: #if term not equal to [\"*\"]\r\n prime162.append(c) #append to the new list\r\n if prime162 == []: #if resultant prime implicant list is empty and don'tcares list is not empty\r\n output.append(\"0\") #output is zero\r\n else:\r\n for c in prime162:\r\n if c[:] != \"----\": #if all the bits are not dashes\r\n if c[0] == \"0\": #if 1st bit is 0\r\n bit = \"w'\"\r\n elif c[0] == \"1\": #if 1st bit is 1\r\n bit = \"w\"\r\n elif c[0] == \"-\": #if 1st bit is -\r\n bit = \"\"\r\n if c[1] == \"0\":\r\n bit = bit + \"x'\"\r\n elif c[1] == \"1\":\r\n bit = bit + \"x\"\r\n elif c[1] == \"-\":\r\n bit = bit + \"\"\r\n if c[2] == \"0\":\r\n bit = bit + \"y'\"\r\n elif c[2] == \"1\":\r\n bit = bit + \"y\"\r\n elif c[2] == \"-\":\r\n bit = bit + \"\"\r\n if c[3] == \"0\":\r\n bit = bit + \"z'\"\r\n elif c[3] == \"1\":\r\n bit = bit + \"z\"\r\n elif c[3] == \"-\":\r\n bit = bit + \"\"\r\n\r\n output.append(bit) #bit is the character represntation of 1 minterm\r\n\r\n # Formatting in terms of a,b,c and d instead of 0,1 and -.\r\n\r\n else:\r\n\r\n output.append(\"1\") #output is 1 if all bits of minterm are the dashes.\r\n\r\n output.sort() #sorting the terms lexographically\r\n for c in output:\r\n if stringOut==\"\":\r\n stringOut=c\r\n else:\r\n stringOut=stringOut+ \"+\" + c #joining results of the different minterms\r\n\r\n\r\n\r\ndef minFunc(numVar, stringIn):\r\n \"\"\"Changes input string into minterm and dontcare strings separately\r\n numVar is no. of variables.\r\n stringIn is a string of the format (a0,a1,a2, ...,an) d(d0,d1, ...,dm)\"\"\"\r\n global nvar,minterms,dontcares,output,stringOut\r\n stringOut=\"\"\r\n output=[]\r\n nvar=numVar\r\n e=stringIn.index(\")\") # index of first \")\"\r\n minterms=stringIn[1:e] #slicing string for the minterm\r\n d=stringIn.index(\"(\",e) #index of first \"(\" after index e.\r\n dontcares=stringIn[d+1:-1] #slicing string for the dontcares\r\n stringtolist()\r\n dectobin()\r\n prime2reducer()\r\n prime4reducer()\r\n prime8reducer()\r\n prime16reducer()\r\n if nvar==2:\r\n var2outp()\r\n if nvar==3:\r\n var3outp()\r\n if nvar==4:\r\n var4outp()\r\n\r\n return stringOut\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":22549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"289093811","text":"# Adapted from: https://www.tensorflow.org/tutorials/load_data/csv\nimport os\nfrom dataclasses import dataclass\nfrom typing import List, OrderedDict\n\nimport tensorflow as tf\n\nfrom .base import Reader, ReaderConfig, UNKNOWN_LABEL\nfrom .._logging import get_logger\nfrom ..custom_types import DataType, DataWithInfo\n\n_logger = get_logger(__name__)\n\n\n# CSV file should not contain any whitespace between the delimiter and actual field, in such cases the field should\n# be quoted. There should be no empty lines in the CSV file. Quotes within a quoted field should be escaped with\n# another quote. Newline characters should be actual newlines, not \\n characters.\n@dataclass(frozen=True)\nclass CSVFileConfig(ReaderConfig):\n path: str\n header_present: bool\n text_column_number: int\n label_column_number: int\n num_columns: int\n num_records: int\n label_values: List[str]\n shuffle_buffer_size: int\n delimiter: chr = ','\n\n @property\n def data_type(self) -> DataType:\n return DataType.TEXT\n\n\nclass CSVFileReader(Reader):\n @staticmethod\n def read_data(config: CSVFileConfig) -> DataWithInfo:\n # Mapping label -> label index\n mapping = tf.lookup.StaticHashTable(\n initializer=tf.lookup.KeyValueTensorInitializer(keys=tf.constant(config.label_values),\n values=tf.constant(list(range(len(config.label_values))))),\n default_value=UNKNOWN_LABEL\n )\n\n # index = number - 1\n index_text = config.text_column_number - 1\n index_label = config.label_column_number - 1\n\n # Define column names so that label column can be identified\n column_names = [str(i) for i in range(config.num_columns)]\n column_names[index_text] = \"text\"\n column_names[index_label] = \"label\"\n\n # Set number of threads to number of available CPU cores or 1 if number cannot be obtained\n cpu_count = os.cpu_count()\n if not cpu_count:\n cpu_count = 1\n _logger.debug(f\"{cpu_count} thread(s) will be used to load CSV dataset at {config.path}\")\n\n def get_text_and_numeric_label(line: OrderedDict):\n return line[\"text\"][0], mapping.lookup(line[\"label\"][0])\n\n data = tf.data.experimental.make_csv_dataset(\n file_pattern=config.path,\n batch_size=1,\n column_names=column_names,\n column_defaults=[\"string\" for _ in range(config.num_columns)],\n select_columns=[index_text, index_label],\n field_delim=config.delimiter,\n use_quote_delim=True,\n header=config.header_present,\n num_epochs=1,\n shuffle=True,\n shuffle_buffer_size=config.shuffle_buffer_size,\n num_parallel_reads=cpu_count,\n sloppy=True\n )\n data = data.map(get_text_and_numeric_label, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return DataWithInfo(data=data, size=config.num_records, num_labels=len(config.label_values))\n","sub_path":"snoopy/reader/csv_file.py","file_name":"csv_file.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"463739621","text":"import sys\nimport tensorflow as tf\nfrom utils import *\nfrom tensorflow.python.ops import variable_scope\nfrom cells2 import *\nimport pickle\n\n\n\n\ndataset = \"train/MOSI/\"\n\n\n\n\n#2096 lines\n\n\nwith open(dataset+'video_sentence_dataset.pkl', 'rb') as f:\n data_x = pickle.load(f)\n\n\nembedding_size = data_x[0].shape[1]\nprint(\"embedding_size\",embedding_size)\nwith open(dataset+'list.txt', 'r') as f:\n list = f.readlines()\n\nwith open(dataset+'list_filtered.txt', 'r') as f:\n list_filtered = f.readlines()\n\n#delete according to index\nwith open(dataset+'target.txt', 'r') as f:\n data_y_ = f.readlines()\n data_y = []\n for i,y in enumerate(data_y_):\n if(list[i] in list_filtered):\n data_y.append(float(y.strip()))\n\nassert len(data_x) == len(data_y), \"NOK\"\nprint (\"Reading training data\")\n\ndata_x_ = []\ndata_y_ = []\nsrc_max = -1\nfor x,y in zip(data_x,data_y):\n if(x.shape[0] > 70):\n continue\n if y == 0.0:\n continue\n if y < 0.0:\n y=-1\n else:\n y=1\n\n sentence_length = x.shape[0]\n if(sentence_length>src_max):\n src_max = sentence_length\n data_x_.append(x)\n data_y_.append(y)\n\ndata_x = np.array(data_x_)\ndata_y = np.array(data_y_)\nPAD_ID = np.random.uniform(-math.sqrt(3), math.sqrt(3), embedding_size)\nassert len(data_x) == len(data_y), \"NOK\"\nprint('src_max', src_max)\n# randomize = np.arange(len(data_x))\n# np.random.shuffle(randomize)\n# data_x = data_x[randomize]\n# data_y = data_y[randomize]\n\n\n\ndata_size = len(data_x)\nprint(\"data_size\",data_size)\n\ntest_size = int(len(data_x)/10)\ntrain_size = data_size-test_size\nprint(\"train_size\", train_size)\nprint(\"test_size\", test_size)\nk_fold = 10\n\n# model settings\ndtype = tf.float32\ninitializer = tf.truncated_normal_initializer(stddev=0.0001)\ncell_fw = GRUCell(1024)\ncell_bw = GRUCell(512)\n\n\n\ndropout_rate_rnn = 0.0\ndropout_rate_fc = 0.0\n\ndropout_rnn = tf.Variable(1 - dropout_rate_rnn, trainable=False, name='dropout_keep_prob')\ndropout_rnn_input = tf.Variable(1.0, trainable=False, name='input_dropout_keep_prob')\n\n\ndropout_output = dropout_rnn\ndropout_state = dropout_rnn\ndropout_input = dropout_rnn_input\n\nencoder_cell_fw = DropoutWrapper_custom(cell_fw, output_keep_prob=dropout_output, state_keep_prob=dropout_state,\n input_keep_prob=dropout_input, variational_recurrent=False)\nencoder_cell_bw = DropoutWrapper_custom(cell_bw, output_keep_prob=dropout_output, state_keep_prob=dropout_state,\n input_keep_prob=dropout_input, variational_recurrent=False)\n\nbatch_size = 100\nmax_step = src_max\n\n\n\n# encoder_inputs =[]\n# for i in range(max_step):\n# encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],\n# name=\"encoder{0}\".format(i)))\n\nencoder_inputs = tf.placeholder(tf.float32, [None, None, embedding_size])\ny = tf.placeholder(tf.float32, [None, 2])\nx_lengths = tf.placeholder(tf.int32, [None])\ndropout_inputs_rnn = tf.placeholder(tf.float32)\ndropout_inputs_fc = tf.placeholder(tf.float32)\n\n\nn_input = 2048\nn_hidden = 1024\nn_hidden2 = 512\nn_hidden3 = 256\nn_hidden4 = 64\nn_output = 2\n\n\n\nw = {\n # 'h1': variable_scope.get_variable(\"h1\", [n_input, n_hidden]),\n 'h2': variable_scope.get_variable(\"h2\", [n_hidden, n_hidden2]),\n 'h3': variable_scope.get_variable(\"h3\", [n_hidden2, n_hidden3]),\n 'h4': variable_scope.get_variable(\"h4\", [n_hidden3, n_hidden4]),\n 'out': variable_scope.get_variable(\"out_h\", [n_hidden4, n_output])\n}\nb = {\n # 'b1': variable_scope.get_variable(\"b1\", [n_hidden]),\n 'b2': variable_scope.get_variable(\"b2\", [n_hidden2]),\n 'b3': variable_scope.get_variable(\"b3\", [n_hidden3]),\n 'b4': variable_scope.get_variable(\"b4\", [n_hidden4]),\n 'out': variable_scope.get_variable(\"out_b\", [n_output])\n}\n\n\ndef RNN(x, x_lengths, weights, biases, dropout_rnn, dropout_fc):\n\n # x = tf.unstack(x, max_step, 1)\n\n encoder_cell_fw.set_dropout(dropout_rnn)\n encoder_cell_bw.set_dropout(dropout_rnn)\n\n #\n # outputs, states = tf.nn.bidirectional_dynamic_rnn(encoder_cell_fw,\n # encoder_cell_bw,\n # x,\n # sequence_length=x_lengths,\n # dtype=dtype)\n #\n # output = (tf.concat(states, 1))\n\n\n outputs, state = tf.nn.dynamic_rnn(encoder_cell_fw,\n x,\n sequence_length=x_lengths,\n dtype=dtype)\n def last_relevant(output, length):\n batch_size = tf.shape(output)[0]\n max_length = tf.shape(output)[1]\n output_size = tf.shape(output)[2]\n index = tf.range(0, batch_size) * max_length + (length - 1)\n flat = tf.reshape(output, [-1, output_size])\n relevant = tf.gather(flat, index)\n return relevant\n\n output = last_relevant(outputs, x_lengths)\n # outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(encoder_cell_fw, encoder_cell_bw, x,\n # dtype=tf.float32)\n # output = outputs[-1]\n\n #\n def multilayer_perceptron(x_, weights, biases, dropout):\n # Hidden layer with RELU activation\n for i in range(2,5):\n hidden = tf.add(tf.matmul(x_, weights[\"h\"+str(i)]), biases[\"b\"+str(i)])\n x_ = tf.nn.relu(hidden)\n x_ = tf.nn.dropout(x_, dropout)\n out_layer = tf.matmul(x_, weights['out']) + biases['out']\n return out_layer\n\n\n out = multilayer_perceptron(output,w,b, dropout_fc)\n\n # out = tf.matmul(output, weights['out']) + biases['out']\n\n return out, output, x\n\npred, output, x = RNN(encoder_inputs, x_lengths, w, b, dropout_inputs_rnn, dropout_inputs_fc)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n\n# optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\noptimizer = tf.train.AdamOptimizer(learning_rate=0.0003).minimize(cost)\n\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n\ndevice = '/gpu:{}'.format(0)\ntf_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\ntf_config.gpu_options.allow_growth = True\ntf_config.gpu_options.per_process_gpu_memory_fraction = 0.9\n\nwith tf.device(device):\n with tf.Session(config=tf_config) as sess:\n print('using device: {}'.format(device))\n\n\n for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n print(v.name)\n\n test_accuracies = []\n max_test_accuacy=-1.0\n\n for i in range(k_fold):\n # Launch the graph\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(tf.global_variables())\n\n\n # si on est pas au dernier k_fold, on prend commme taille de test test_size\n if i < k_fold - 1:\n start = i * test_size\n end = (i + 1) * test_size\n\n test_x = data_x[start:end]\n test_y = data_y[start:end]\n # get index to remove from data_x for train\n r = np.arange(start, end)\n train_x = np.delete(data_x, r, 0)\n train_y = np.delete(data_y, r, 0)\n #train must now be dived into train and dev (last 10% == test_size)\n dev_x = train_x[len(train_x)-test_size: len(train_x)]\n dev_y = train_y[len(train_y)-test_size: len(train_y)]\n r = np.arange(len(train_x)-test_size, len(train_x))\n train_x = np.delete(train_x, r, 0)\n train_y = np.delete(train_y, r, 0)\n\n\n\n if i == k_fold - 1:\n start = i * test_size\n end = len(data_x)\n test_x = data_x[start:end]\n test_y = data_y[start:end]\n # get index to remove test from data_x for train\n r = np.arange(start, end)\n train_x = np.delete(data_x, r, 0)\n train_y = np.delete(data_y, r, 0)\n # train must now be dived into train and dev (last 10% == test_size)\n dev_x = train_x[len(train_x) - test_size: len(train_x)]\n dev_y = train_y[len(train_y) - test_size: len(train_y)]\n r = np.arange(len(train_x) - test_size, len(train_x))\n train_x = np.delete(train_x, r, 0)\n train_y = np.delete(train_y, r, 0)\n\n print(\"Starting k-fold number\", i+1)\n print(\"K-fold\", i+1, \"train size =\", len(train_x))\n print(\"K-fold\", i+1, \"dev size =\", len(dev_x))\n print(\"K-fold\", i+1, \"test size =\", len(test_x))\n\n\n accuracies = []\n best_loss = sys.maxsize\n stop_counter = 0\n step = 0\n best_acc = 0.0\n while True:\n\n dropout_rnn_ = 1.0-dropout_rate_rnn\n dropout_fc_ = 1.0-dropout_rate_fc\n x_batch, y_batch, x_batch_lengths = get_batch_random(train_x, train_y, batch_size, max_step, PAD_ID=PAD_ID)\n x_batch = x_batch.reshape((batch_size, max_step, embedding_size))\n # print(y_batch)\n y_batch = vector_to_one_hot(y_batch)\n\n # print(x_batch)\n # # print(y_batch)\n # sys.exit(1)\n\n\n input_feed = {}\n # for l in xrange(50):\n # input_feed[encoder_inputs[l].name] = x_batch[l]\n input_feed[encoder_inputs] = x_batch\n input_feed[y] = y_batch\n input_feed[x_lengths] = x_batch_lengths\n input_feed[dropout_inputs_rnn] = dropout_rnn_\n input_feed[dropout_inputs_fc] = dropout_fc_\n\n sess.run(optimizer, input_feed)\n pred_ = sess.run(pred, input_feed)\n\n # if step % display_step == 0:\n #loss on train set\n loss = sess.run(cost, input_feed)\n\n # rand_ = random.randint(0,10)\n # if rand_ == 5:\n #eval on dev dataset\n dropout_ = 1.0\n x_batch, y_batch, x_batch_lengths = get_batch_fixed(dev_x, dev_y, max_step, PAD_ID=PAD_ID)\n test_size = len(dev_x)\n x_batch = x_batch.reshape((test_size, max_step, embedding_size))\n y_batch = vector_to_one_hot(y_batch)\n\n input_feed = {}\n input_feed[encoder_inputs] = x_batch\n input_feed[y] = y_batch\n input_feed[x_lengths] = x_batch_lengths\n input_feed[dropout_inputs_rnn] = dropout_\n input_feed[dropout_inputs_fc] = dropout_\n\n acc = sess.run(accuracy, input_feed)\n # prediction = sess.run(pred, input_feed)\n\n #\n accuracies.append(acc)\n if acc > best_acc:\n best_acc = acc\n saver.save(sess, 'models/kk', global_step=0)\n\n if loss < best_loss and (best_loss-loss)> 1e-6:\n best_loss = loss\n stop_counter = 0\n if loss > 0.2:\n stop_counter = 0\n if stop_counter == 100 or loss < 1e-4:\n break\n stop_counter +=1\n\n\n\n print(\"Iter \" + str(step * batch_size) + \" Epoch : \" + str(int((step * batch_size)/len(data_x)))+\", Training minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Dev Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n\n step += 1\n\n assert np.amax(np.array(accuracies)) == best_acc, \"what ?\"\n print(\"Optimization Finished! Best dev accuracies: {:.5f}\".format(best_acc))\n print(\"Now running this best model to test set\")\n saver.restore(sess, \"models/kk-0\")\n dropout_ = 1.0\n x_batch, y_batch, x_batch_lengths = get_batch_fixed(test_x, test_y, max_step, PAD_ID=PAD_ID)\n test_size_ = len(test_x)\n # x_batch = x_batch.reshape((test_size_, max_step, embedding_size))\n y_batch = vector_to_one_hot(y_batch)\n input_feed = {}\n input_feed[encoder_inputs] = x_batch\n input_feed[y] = y_batch\n input_feed[x_lengths] = x_batch_lengths\n input_feed[dropout_inputs_rnn] = dropout_\n input_feed[dropout_inputs_fc] = dropout_\n acc = sess.run(accuracy, input_feed)\n\n test_accuracies.append(acc)\n print(\"K-fold\", i + 1, \"Test accuracy = {:.5f}\".format(acc))\n if acc + best_acc > max_test_accuacy:\n max_test_accuacy = acc + best_acc\n saver.save(sess, 'models/best-test0')\n\n for i in test_accuracies:\n print(i)\n print(\"Average accuracies : \", np.mean(test_accuracies))\n print('Best test-dev :', max_test_accuacy)\n print(\"Now extracting features for every sentence\")\n saver.restore(sess, \"models/best-test0\")\n dropout_ = 1.0\n x_batch, y_batch, x_batch_lengths = get_batch_fixed(data_x, data_y, max_step, PAD_ID=PAD_ID)\n test_size = len(x_batch)\n # x_batch = x_batch.reshape((test_size, max_step, 1))\n y_batch = vector_to_one_hot_real(y_batch)\n input_feed = {}\n input_feed[encoder_inputs] = x_batch\n input_feed[y] = y_batch\n input_feed[x_lengths] = x_batch_lengths\n input_feed[dropout_inputs_rnn] = dropout_\n input_feed[dropout_inputs_fc] = dropout_\n acc, outs = sess.run([accuracy, output], input_feed)\n print(\"Overall = {:.5f}\".format(acc))\n print(outs.shape)\n np.save('train/MOSI/sentences_repr_video.npy', outs)\n","sub_path":"v0.1/MOSI_rnn_video.py","file_name":"MOSI_rnn_video.py","file_ext":"py","file_size_in_byte":14063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"30883834","text":"#!/usr/bin/env python\n\n\"\"\"\nA minimal GAE application that makes an Adaptive API request to PayPal\nand parses the result. Fill in your own 3 Token Credentials and sample\naccount information from your own sandbox account\n\"\"\"\n\nimport random\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import memcache\nfrom django.utils import simplejson as json\n\n\n# Replace these values with your own 3-Token credentials and sample receivers\n# who collect the funds to run this sample code in the developer sandbox\n\nuser_id = \"XXX\"\npassword = \"XXX\"\nsignature = \"XXX\"\nreceiver1, amount1 = \"XXX\", 10.00\nreceiver2, amount2 = \"XXX\", 5.00\nreceiver3, amount3 = \"XXX\", 2.00\n\n\nclass MainHandler(webapp.RequestHandler):\n\n # Helper function to execute requests with appropriate headers\n def _request(self, url, params):\n\n # standard Adaptive Payments headers\n headers = {\n 'X-PAYPAL-SECURITY-USERID' : user_id,\n 'X-PAYPAL-SECURITY-PASSWORD' : password,\n 'X-PAYPAL-SECURITY-SIGNATURE' : signature,\n 'X-PAYPAL-REQUEST-DATA-FORMAT' : 'JSON',\n 'X-PAYPAL-RESPONSE-DATA-FORMAT' : 'JSON',\n 'X-PAYPAL-APPLICATION-ID' : 'APP-80W284485P519543T'\n }\n \n return urlfetch.fetch(\n url, \n payload = json.dumps(params),\n method=urlfetch.POST,\n validate_certificate=True,\n deadline=10, # seconds\n headers=headers\n )\n\n def get(self, mode=\"\"):\n\n # /status - executes PaymentDetails when PayPal redirects back to this app after payment approval\n\n if mode == \"status\":\n\n payKey = memcache.get(self.request.get('sid'))\n\n params = {\n 'requestEnvelope' : {'errorLanguage' : 'en_US', 'detailLevel' : 'ReturnAll'},\n 'payKey' : payKey\n }\n\n result = self._request('https://svcs.sandbox.paypal.com/AdaptivePayments/PaymentDetails', params)\n\n response = json.loads(result.content)\n\n if result.status_code == 200: # OK\n\n # Convert back to indented JSON and display it\n\n pretty_json = json.dumps(response,indent=2)\n self.response.out.write('
%s
' % (pretty_json,))\n else:\n self.response.out.write('
%s
' % (json.dumps(response,indent=2),))\n\n else: # / (application root) - executed when app loads and initiates a Pay request\n\n # A cheap session implementation that's leveraged in order to lookup the payKey\n # from the Pay API and execute PaymentDetails when PayPal redirects back to /status\n\n sid = str(random.random())[5:] + str(random.random())[5:] + str(random.random())[5:]\n\n return_url = self.request.host_url + \"/status\" + \"?sid=\" + sid\n cancel_url = return_url\n\n redirect_url = \"https://www.sandbox.paypal.com/cgi-bin/webscr?cmd=_ap-payment&paykey=\"\n\n params = {\n 'requestEnvelope' : {'errorLanguage' : 'en_US', 'detailLevel' : 'ReturnAll'},\n 'actionType' : 'PAY',\n 'receiverList' : {\n 'receiver' : [\n {'email' : receiver1, 'amount' : amount1, 'primary' : True },\n {'email' : receiver2, 'amount' : amount2, 'primary' : False},\n {'email' : receiver3, 'amount' : amount2, 'primary' : False}\n ],\n },\n 'currencyCode' : 'USD',\n 'memo' : 'Chained payment example.',\n 'cancelUrl' : cancel_url,\n 'returnUrl' : return_url,\n }\n\n result = self._request('https://svcs.sandbox.paypal.com/AdaptivePayments/Pay', params)\n \n response = json.loads(result.content)\n\n if result.status_code == 200: # OK\n\n # Convert back to indented JSON and inject a hyperlink to kick off payment approval\n\n pretty_json = json.dumps(response,indent=2)\n pretty_json = pretty_json.replace(response['payKey'], '%s' % (redirect_url, response['payKey'], response['payKey'],))\n\n memcache.set(sid, response['payKey'], time=60*10) # seconds\n\n self.response.out.write('
%s
' % (pretty_json,))\n else:\n self.response.out.write('
%s
' % (json.dumps(response,indent=2),))\n\ndef main():\n application = webapp.WSGIApplication([('/', MainHandler),\n ('/(status)', MainHandler)],\n debug=True)\n util.run_wsgi_app(application)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ch04/chained-payment_example/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"608802708","text":"#Declamramos las variables explictamente\r\nacumulado = int(0)\r\nnumero = str(\"\")\r\n#cuando usas true como condicion de while se hace un ciclo infinito\r\n#hasta que no se use break\r\nwhile True:\r\n numero = input(\"Dame un numero entero:\")\r\n if numero == \"\":\r\n #si numero es vacio lo dice y sale del ciclo\r\n print(\"Vacio. Salida del programa.\")\r\n break\r\n else:\r\n #si si hay,se hace calculo usando suma incluyente\r\n acumulado +=int(numero)\r\n salida = \"Monto acumulado: {}\"\r\n print(salida.format(acumulado))\r\n","sub_path":"Acumulado.py","file_name":"Acumulado.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"551641615","text":"from json import load\nfrom sys import argv\nfrom cgi import escape\n\nHTML_HEADER = '''\n\n\n\n\n'''\n\nHTML_FOOTER = '''\n
\n\n\n'''\n\nWIDTH = 700\n\ndef generate(fn):\n with open(fn) as fd:\n events = load(fd)\n\n # extract event per frame\n html = []\n tags = {}\n start_mainloop = 0\n for infos in events:\n time, event = infos[:2]\n\n # extract a main event\n if event == 'start-mainloop':\n start_mainloop = time\n index_mainloop = len(html)\n html.append('')\n elif event == 'end-mainloop':\n html[index_mainloop] = '
'\n html.append('
')\n\n cmd, event = event.split('-', 1)\n\n # analyse commands \n if cmd == 'mark':\n html.append('
{}: {}
'.format(\n event, escape(str(infos[2]))))\n elif cmd == 'start':\n index = len(html)\n html.append('')\n tags[event] = (time, index)\n elif cmd == 'end':\n start_time, index = tags[event]\n width = int((time - start_time) * WIDTH / (1 / 60.))\n offset = int((start_time - start_mainloop) * WIDTH / (1 / 60.))\n duration = (time - start_time)\n text = (\n '
'\n '
{0}
'\n '
{3:.2f}
'\n '
'\n '
'\n '{0}
'\n '
'\n '
'\n ).format(event, width, offset, duration * 1000)\n\n if duration > 1. / 60.:\n text = '
' + text + '
'\n\n html[index] = text\n\n with open('output.html', 'w') as fd:\n fd.write(HTML_HEADER)\n fd.write(''.join(html))\n fd.write(HTML_FOOTER)\n\n\ngenerate(argv[1])\n","sub_path":"aprofiler/examples/generate-frame-graph.py","file_name":"generate-frame-graph.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"153830961","text":"#!/bin/python\nimport numpy as np\n\n__all__=['stabilityLimit','hw99S','hw99P']\n\n###########################\n# Orbital stability\n############################\ndef stabilityLimit(binary_star_type, mA, mB, ab, eb):\n \"\"\"Routines for caculating dynamical stability\n for Earth-like planets in binary star systems following\n Holman & Wiegert (1999).\n\n Parameters:\n -----------\n mA... mass of primary star [Msun]\n mB... mass of secondary star [Msun]\n ab... semimajor axis of binary orbit [au]\n eb... orbital eccentricity of binary\n\n Calculates:\n -----------\n stability_limit ... maximum (cicumstellar) or minimum (circumbinary)\n stable distance of planet on circular orbit from\n host star(s)\n \"\"\"\n\n tp=binary_star_type\n\n if(ab <= 0):\n raise ValueError('Semimajor axis ab must be >= 0!')\n\n if(eb < 1 and eb >= 0):\n raise ValueError('Eccentricity eb must be < 1!')\n\n if(tp in ['S', 's', 'S-type', 'S-Type', 'circumstellar']):\n stability_limit = hw99S(mA, mB, ab, eb)\n\n elif(tp in ['P', 'p', 'P-type', 'P-Type', 'circumbinary']):\n stability_limit = hw99P(mA, mB, ab, eb)\n else:\n raise ValueError('Binary star type not recognized. \\\n Choose \"S\" or \"P\".')\n\ndef hw99S(mA, mB, ab, eb):\n \"\"\" Circumstellar (S-type) stability limit for binary star systems\n according to Holman & Wiegert (1999)\n\n Parameters:\n -----------\n mA... mass of primary star [Msun]\n mB... mass of secondary star [Msun]\n ab... semimajor axis of binary orbit [au]\n eb... orbital eccentricity of binary\n\n Returns:\n -----------\n ap... maximum stable distance of planet on circular orbit from\n host star\n\n \"\"\"\n mu = mB/(mA+mB)\n eb2 = eb*eb\n ap = ab*(0.464-0.38*mu-0.631*eb+0.586*mu*eb + 0.15*eb2-0.198*mu*eb2)\n return ap\n\ndef hw99P(mA, mB, ab, eb):\n \"\"\" Circumbinary (P-type) stability limit for binary star systems\n according to Holman & Wiegert (1999)\n\n Parameters:\n ----------\n mA... mass of primary star [Msun]\n mB... mass of secondary star [Msun]\n ab... semimajor axis of binary orbit [au]\n eb... orbital eccentricity of binary\n\n Returns:\n -------\n ap... maximum stable distance of planet on circular orbit from\n host star\n\n \"\"\"\n mu = mB/(mA+mB)\n eb2 = eb*eb\n mu2 = mu*mu\n c = [0, 1.6, +5.1, -2.22, 4.12, -4.27, -5.09, 4.61]\n ap = ab*(c[1]+c[2]*eb+c[3]*eb2+c[4]*mu+c[5]*eb*mu +\n c[6]*mu2+c[7]*eb2*mu2)\n return ap","sub_path":"model/dihz/stability.py","file_name":"stability.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"336177162","text":"\"\"\"\nApplies machine learning to genomic data of 214 individuals to predict blood \ntype.\n\nMore technically: this script uses the Harvard PGP dataset to train/test \na linear support vector classifier for bloodtype A+/-. The dataset consists of\nboth genotypical and phenotypical data. The genomes within the dataset are tiled\nand the tiles serve as the features for machine learning. The target label\nis the phenotypical blood type, which is self reported by each patient.\n\nInputs (currently non-interactive/hard-coded):\n* Directory for saving outputs\n* hiq-pgp - The high quality subset of the Harvard PGP genomic data. Numpy array\n* hiq-pgp-info - Column names for the hiq-pgp numpy array. Numpy array\n* names-214.npy - Row names for the hiq-pgp numpy array. Numpy array\n* hu-pgp.sqlite3 - Database of phenotypes self-reported by patients. Sqlite\n\nOutputs:\n* Blood type A confusion matrix plot. PNG.\n\n\"\"\"\n\nimport os\nimport matplotlib as mpl\nif os.environ.get('DISPLAY','') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport sqlite3\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score, LeaveOneOut\nfrom sklearn import preprocessing\nfrom sklearn.metrics import confusion_matrix\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"data_dir_harvard_pgp_hiq_214\", default='/home/keldin/keep/by_id/su92l-4zz18-mp6wrk95q8li17t')\nparser.add_argument(\"data_dir_untap\", default='/home/keldin/keep/by_id/su92l-4zz18-ubrp7z77ogbv4r7')\nargs = parser.parse_args()\nhiq_dir = args.data_dir_harvard_pgp_hiq_214\nuntap_dir = args.data_dir_untap\n\n\n\n# The following section contains numpy load statements for each of the \"inputs\"\n# for the script. For each input, there are several possible directories\n# corresponding to where that resource is located on each of a few different\n# people's machines. Each option can just be un/commented to make it work.\n# \n# Plot output directory\n#plot_output_dir = \"/home/keldin/Images/\"\n#plot_output_dir = \"/home/keldin/keeprw/by_id/su92l-4zz18-hhyjc7arp04d20t/\"\n#plot_output_dir = \"/Users/Keldins/curoverse/Images/\"\n\n\n\n\n\n# All sets with \"magic tile\" in /data-sdd/tiling/hiq.214\n\n# Xtrain file paths, 1hot:\n#hiq_pgp_1hot_path = \"/data-sdd/tiling/hiq.214/hiq-pgp-1hot\"\n#hiq_pgp_1hot_path = \"/home/swz/PGP-work/Lightning_Work/PGPFiles/hiq-pgp-1hot\"\n#hiq_pgp_1hot_path = \"/Users/Keldins/curoverse/hiq/hiq-pgp-1hot\"\n#Xtrain = np.load(hiq_pgp_1hot_path)\n\n# Xtrain file paths, NOT 1hot:\n#hiq_pgp_path = \"/data-sdd/tiling/hiq.214/hiq-pgp\"\nhiq_pgp_path = hiq_dir + \"/hiq-pgp\"\nXtrain = np.load(hiq_pgp_path)\n\n\n\n\n\n# ohinfo file paths:\n# Recall that names-214.npy is an array of essentially huids. In particular, \n# it contains the huids of the participants that have genotypical data in\n# hiq-pgp.\n#names_path = \"/data-sdd/tiling/hiq.214/names-214.npy\"\n#names_path = \"/Users/Keldins/curoverse/hiq/names-214.npy\"\nnames_path = hiq_dir + \"/names-214.npy\"\nohinfo = np.load(names_path)\n\n\n\n\n\n\n# ohPaths file paths, 1hot:\n#hiq_pgp_1hot_info_path = \"/home/swz/PGP-work/Lightning_Work/PGPFiles/hiq-pgp-1hot-info\"\n#ohPaths = np.load(hiq_pgp_1hot_info_path)\n# ^ this appears to not be used\n\n\n\n\n\n\n# justVarPaths file paths, NOT 1hot:\n#hiq_pgp_info_path = \"/data-sdd/tiling/hiq.214/hiq-pgp-info\"\n#hiq_pgp_info_path = \"/Users/Keldins/curoverse/hiq/hiq-pgp-info\"\nhiq_pgp_info_path = hiq_dir + \"/hiq-pgp-info\"\njustVarPaths = np.load(hiq_pgp_info_path)\n\n\n\n\n\n\n\n\n# Loading in phenotype data from PGP database\n# Untap path (Harvard PGP phenotype database scrappings)\n# These are local \"snapshots\" of the database.\n#untap_path = \"/data-sdd/data/untap/hu-pgp.sqlite3\"\n#untap_path = \"/home/sarah/l7g-ml/BloodType/Database/untap.db\"\n#untap_path = \"/Users/Keldins/curoverse/hiq/hu-pgp.sqlite3\"\nuntap_path = untap_dir + \"/untap.sqlite3\"\nconn = sqlite3.connect(untap_path)\n\nc = conn.cursor()\nc.execute('SELECT * FROM demographics')\n# c now contains all the phenotype data\n\n\n\n# c contains all the phenotype data from the db, but we need to put it into\n# a pandas dataframe for ease of use.\n# Start by getting all the rows of phenotype data, and the column headers\n\nrows = c.fetchall()\n# First row of every column description is the column name (i.e. column headers)\n# These are the names of phenotype fields\nphenotype_names = [(i[0]) for i in c.description]\nphenotype_data = pd.DataFrame(rows,columns=phenotype_names)\nconn.close()\n# At this point, phenotype_data is a pandas dataframe object containing all the\n# phenotype data from the untap database. Now it is actually useful and useable\n# to the rest of the script.\n\n\n\n\n\n\n# We wish to select a subset of the phenotype data, namely the huid (\"human_id\")\n# as an identifier, and the blood type itself.\n# DataFrame allows indexing by a list, which returns another DataFrame.\n\ndataBloodType = phenotype_data[['human_id','blood_type']]\n\ndataBloodType = dataBloodType.replace('', np.nan, inplace=False)\n\ndataBloodType = dataBloodType.dropna(axis=0, how='any', inplace=False)\n# At this point, we've dropped every row (i.e. every patient) that doesn't have\n# a blood type listed.\n\n\n# Creating dummy variables for A, B and Rh factor\n# If DataFrame row contains blood type A, then mark as True. You get a pandas\n# Series of booleans (as integers: 0,1). Special case: NaN treated as false.\ndataBloodType['A'] = dataBloodType['blood_type'].str.contains('A',na=False).astype(int)\n# Same as A, but for B:\ndataBloodType['B'] = dataBloodType['blood_type'].str.contains('B',na=False).astype(int)\n# Now the same again, but for Rh (+/-) for A and B. + is treated as True.\ndataBloodType['Rh'] = dataBloodType['blood_type'].str.contains('\\+',na=False).astype(int)\n# Other cases:\n# AB is a special case where the patient has both A and B antigens, which means\n# the A line and the B line above would result in \"True\". This is fine because\n# we expect the genes responsible to have a simple additive effect.\n# Blood type O is a special case where the patient is marked \"False\" for A or B.\n\n# Getting phenotypes for huIDs that have associated genotypes\n\n# Given a byte string, this anon func will return string up to \"-\" character \n# b'hu040C0A-GS01175-DNA_F05' is what rows of ohinfo look like, so this will\n# give us first part of the string, which is the huid.\nextract_str_huid = lambda byte_string: byte_string[0:byte_string.find(b\"-\")]\n\n# Take huid in byte string to utf and lower case\nhuids = [extract_str_huid(patient_row).decode(\"utf-8\").lower() for patient_row in ohinfo]\n\nhuids_df = pd.DataFrame(huids,columns={'Sample'})\nhuids_df['Number'] = huids_df.index\n# Now we have a DataFrame for the huids, with a Number column that matches the\n# DataFrame index. The dataframe index will change later after other operations, so we\n# need to store it now as Number. Later we can use the Number column to index Xtrain.\n\n# Take huid in bloodtype DataFrame to lower case\ndataBloodType.human_id = dataBloodType.human_id.str.lower()\n\n# We wish to create a unified DataFrame for the data in dataBloodType and huids_df\n# So we use the merge() method.\n# By using option how='inner', we take the intersection of the data in both\n# This accomplishes the important task of making sure we're only looking at\n# huids (patients) for which we have both phenotypical data (their blood type)\n# and genotypical data (their tiled genome).\n\nunified_df = huids_df.merge(dataBloodType,left_on = 'Sample', right_on='human_id', how='inner')\ndel dataBloodType\n# As a bit of an aside, we plot the totals for the blood types\nunified_df['blood_type'].value_counts().plot(kind='bar')\nunified_df['blood_type'].value_counts()\ndel huids_df\n\n\n# Get genotypes (tiled genome) for participants that had a blood type listed\n# in phenotypical data. We can use the Number column, as only entries with blood type\n# data remain.\n\nidx = unified_df['Number'].values\nXtrain = Xtrain[idx,:] \n\n\n\n# Some columns (which are tile positions) will be the same throughout \n# (i.e. all participants have the same genes in that region). Because they \n# do not vary, they contribute nothing to the machine learning. Here we\n# remove them.\n\nmin_indicator = np.amin(Xtrain, axis=0)\nmax_indicator = np.amax(Xtrain, axis=0)\n\n# If the min value in the column equals the max value, then there must be no\n# variation in the column => everyone has the same tile variant\nsameTile = min_indicator == max_indicator\nskipTile = ~sameTile\n\nprint(\"skipTile shape: \" + str(skipTile.shape))\nprint(\"Xtrain shape: \" + str(Xtrain.shape))\nXtrain = Xtrain[:,skipTile]\nprint(\"Xtrain shape after skipTile: \" + str(Xtrain.shape))\nprint(\"justVarPaths shape: \" + str(justVarPaths.shape))\njustVarPathsNew = justVarPaths[skipTile]\n\n\n# Scaling the Training Data\n\nXtrain = preprocessing.scale(Xtrain.astype('double'))\n\n\ny = unified_df.A.values\n\ndel unified_df\n\n# Train the SVM\nCval = 0.01 # SVM penalty parameter\nclassifier = svm.LinearSVC(penalty='l1', dual=False, C=Cval)\nsvc = classifier.fit(Xtrain, y)\n\n# Examine model coefficents\nmaxCoef = svc.coef_.max()\nnumnz = np.nonzero(svc.coef_)[1].shape\nidxNZ = np.nonzero(svc.coef_)\n\nprint(\"Maximum Coefficent (%4.3f):\" % maxCoef)\nprint(\"Number of Nonzeros Coefficents (%d)\" % numnz)\n\n# Perform cross validation\n# Calculate Accuracy using 10-fold\n\nn = 10\nscores = cross_val_score(classifier, Xtrain, y, cv=n)\nprint(\"Accuracy 10-fold: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\n# Calculate Accuracy using LOO\n\nloo = LeaveOneOut()\nscoresLOO = cross_val_score(classifier, Xtrain, y, cv=loo)\n\nprint(\"Accuracy LOO: %0.2f (+/- %0.2f)\" % (scoresLOO.mean(), scoresLOO.std() * 2))\n\n\n# Calculate predictions and plot confusion matrix\ny_pred = svc.predict(Xtrain)\ncnf_matrix = confusion_matrix(y, y_pred)\n\nprint(np.matrix(cnf_matrix))\n\n\nplt.imshow(cnf_matrix,interpolation='nearest', cmap=plt.cm.Blues)\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\n\nclasses = ['A-antigen negative','A-antigen positive']\n\nplt.grid('off')\nplt.colorbar()\ntick_marks = np.arange(len(classes))\nplt.xticks(tick_marks, classes, rotation=45)\nplt.yticks(tick_marks, classes)\n\nfor i in range(cnf_matrix.shape[0]):\n for j in range(cnf_matrix.shape[1]):\n plt.text(j, i, cnf_matrix[i, j],\n horizontalalignment=\"center\",\n color= \"orangered\")\n\nplt.gcf().subplots_adjust(left=0.25, bottom =0.35)\n\n# Save plot output\n#plt.savefig(plot_output_dir + \"A_Confusion1.png\", format='png',dpi=300)\nplt.savefig('A_Confusion1.png', format='png',dpi=300)\n\n#coefPaths = justVarPathsNew[idxNZ[1]]\n\n\n#tile_path = np.trunc(coefPaths/(16**5))\n#tile_step = np.trunc((coefPaths - tile_path*16**5)/2)\n#tile_phase = np.trunc((coefPaths- tile_path*16**5 - 2*tile_step))\n\n\n#vtile_path = vhex(tile_path.astype('int'))\n#vitle_step = vhex(tile_step.astype('int'))\n\n\n","sub_path":"BloodType/old_hiq/Blood_Type_Test-A_annotated-KNS.py","file_name":"Blood_Type_Test-A_annotated-KNS.py","file_ext":"py","file_size_in_byte":10794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"162681765","text":"import numpy as np\nimport cv2\n\nmus = cv2.VideoCapture(0) #perintah untuk melakukan inisialisasi pada webcam. angka \"0\" menunjukkan bahwa yang digunakan adalah webcam internal pada notebook.\nprint(mus.isOpened())\n\n\nwhile(True): #perintah untuk looping imshow (looping menampilkan objek melalui webcam), sehingga webcam akan mengambil objek gambar secara realtime.\n ret, frame = mus.read() #perintah untuk mengambil objek gambar dengan format berwarna atau BGR.\n bright = cv2.addWeighted(frame,1.5, np.zeros(frame.shape, frame.dtype), 0, 25) #perintah untuk meningkatkan nilai kecerahan objek gambar.\n cv2.imshow('brightness',bright) #perintah untuk menampilkan gambar yang telah diubah nilai kecerahannya.\n if cv2.waitKey(1) & 0xFF == ord('c'): #perintah untuk menghentikan program dengan menekan tombol c pada keyboard notebook.\n break\n\nmus.realease()\ncv2.destroyAllwindows()\n","sub_path":"mus_brightness.py","file_name":"mus_brightness.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"382086637","text":"\"\"\"\nTest for the new fixtures.\n\"\"\"\n\nimport io\n\n\ndef test_image_fixtures(\n high_quality_image: io.BytesIO,\n image_file_failed_state: io.BytesIO,\n png_too_large: io.BytesIO,\n image_file_success_state_low_rating: io.BytesIO,\n corrupted_image_file: io.BytesIO,\n image_files_failed_state: io.BytesIO,\n bad_image_file: io.BytesIO,\n different_high_quality_image: io.BytesIO,\n) -> None:\n \"\"\"\n The image functions can be used as fixtures.\n \"\"\"\n fixture_bytes_list = [\n high_quality_image.getvalue(),\n image_file_failed_state.getvalue(),\n png_too_large.getvalue(),\n image_file_success_state_low_rating.getvalue(),\n corrupted_image_file.getvalue(),\n image_files_failed_state.getvalue(),\n bad_image_file.getvalue(),\n different_high_quality_image.getvalue(),\n ]\n assert len(set(fixture_bytes_list)) == len(fixture_bytes_list)\n","sub_path":"tests/test_fixtures.py","file_name":"test_fixtures.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"471474234","text":"import sys\nsys.path.append('/scratch/ppcode/sowfa/src')\nsys.path.append('/scratch/ppcode')\nimport imp\nimport numpy as np\nimport pickle\nfrom numpy import fft\nimport scipy.signal\nimport sliceDataClass as sdc\nimport funcs\nimport matplotlib.pyplot as plt\n\n\n# the directory where the wake data locate\nprjDir = '/scratch/sowfadata/JOBS'\nprjName = 'deepwind'\njobName = 'gs20'\nppDir = '/scratch/sowfadata/pp/' + prjName + '/' + jobName\n\nsliceList = ['Nz0', 'Nz1', 'Nz2', 'Nz3', 'Nz4', 'Nz5', 'Nz6', 'Nz7']\nsliceNum = len(sliceList)\n\n# coordinate transmation\nO = (0,0,0)\nalpha = 30.0 # rotate the coordinate system to alpha\n\nvar = 'U'\nvarD = 1 # u:0, v:1, w:2\nvarName = r'$\\mathrm{S_v^y}$'\nvarUnit = r'$\\mathrm{m^3/s^2}$'\nvarName_save = 'Sv_y'\n\ny_delta = 20\nk = 2*np.pi/y_delta\n\nreadDir = ppDir + '/data/'\nreadName = sliceList[0]\nfr = open(readDir + readName, 'rb')\ndata_org = pickle.load(fr)\nfr.close()\nslc = sdc.Slice(data_org, 2)\ntSeq = slc.data['time']\ntInd = -1\n\nsegNum = 64\n\nplotDataList = []\nHList = []\n\nfor slice in sliceList:\n readDir = ppDir + '/data/'\n readName = slice\n fr = open(readDir + readName, 'rb')\n data_org = pickle.load(fr)\n fr.close()\n\n slc = sdc.Slice(data_org, 2)\n H = slc.N_location # height of this plane\n HList.append(H)\n\n tmp = slc.data[var][tInd][:]\n tmp = funcs.trs(tmp,O,alpha)\n\n slcData = slc.meshITP_Nz((0,2000,100), (0,2000,100), tmp[:,varD], method_='linear')\n ySeq = slcData[1]\n yNum = slcData[1].size\n xNum = slcData[0].size\n\n PSD_list = []\n for xInd in range(xNum):\n # detrend by deg_ order plynomial fit\n deg_ = 1\n polyFunc = np.poly1d(np.polyfit(ySeq, slcData[2][xInd], deg=deg_))\n tmp = slcData[2][xInd] - polyFunc(ySeq)\n tmp = tmp - tmp.mean()\n # bell tapering\n tmp = funcs.window_weight(tmp)\n # FFT\n # k_seq, tmp = funcs.PSD_k(tmp, 1/x_delta)\n k_seq, tmp = scipy.signal.csd(tmp, tmp, k/2/np.pi, nperseg=segNum, noverlap=None)\n k_seq *= 2*np.pi\n tmp *= 1/2/np.pi\n PSD_list.append(tmp)\n # horizontal average\n PSD_seq = np.average(np.array(PSD_list), axis=0)\n plotDataList.append((k_seq, PSD_seq))\n\nzNum = len(HList)\n\n# plot\nfig, ax = plt.subplots(figsize=(6,6))\ncolors = plt.cm.jet(np.linspace(0,1,zNum))\n\nfor zInd in range(zNum):\n k_ = plotDataList[zInd][0]\n PSD_ = plotDataList[zInd][1]\n plt.loglog(k_, PSD_, label='h = ' + str(int(HList[zInd])) + 'm', linewidth=1.0, color=colors[zInd])\n# # -5/3 law\n# plt.loglog(k_[1:], 100*np.power(k_[1:], -5/3), label='-5/3 law', linewidth=2.0, color='k')\nplt.xlabel('k (1/m)')\nplt.ylabel(varName + ' (' + varUnit + ')')\nxaxis_min = 1e-3\nxaxis_max = k_seq.max()\n# yaxis_min = 1e-8\n# yaxis_max = 1e2\n# plt.ylim(yaxis_min, yaxis_max)\nplt.xlim(xaxis_min, xaxis_max)\nplt.legend(bbox_to_anchor=(1.05,0.5), loc=6, borderaxespad=0) # (1.05,0.5) is the relative position of legend to the origin, loc is the reference point of the legend\nplt.grid()\nplt.title('')\nfig.tight_layout() # adjust the layout\nsaveName = varName_save + '_PSD_' + str(tSeq[tInd]) + '.png'\nplt.savefig(ppDir + '/' + saveName, bbox_inches='tight')\nplt.show()\n","sub_path":"sowfa/PSD_v_ky_Nz.py","file_name":"PSD_v_ky_Nz.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"440653231","text":"\"\"\"\n系统参数\n\"\"\"\n\n\ndef get_environ(key, default_value=None):\n \"\"\"获取环境变量\"\"\"\n import os\n return os.environ.get(key, default_value)\n\n\ndef get_sys_args(options=[], hint_text=None):\n import sys\n return get_args(argv=sys.argv[1:], options=options, hint_text=hint_text)\n\n\ndef get_sys_arg(short_option=None, long_option=None, default_value=None, hint_text=None):\n import sys\n key = 'SYS_ARG_KEY'\n options = [(key, short_option, long_option, default_value)]\n return get_args(argv=sys.argv[1:], options=options, hint_text=hint_text).get(key, default_value)\n\n\ndef get_args(argv, options=[], hint_text=None):\n import getopt\n import sys\n short_options = 'h' + ''.join(op[1] if op[1] else '' for op in options)\n long_options = ['help'] + [op[2] for op in options if op[2]]\n try:\n opts, other_args = getopt.getopt(argv, short_options, long_options)\n except getopt.GetoptError:\n if hint_text:\n print(f\"usage: {hint_text}\")\n sys.exit(2)\n\n args = {}\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n if hint_text:\n print(f\"usage: {hint_text}\")\n sys.exit()\n else:\n opt_key = opt.replace('-', '')\n for key, short_opt, long_opt, _ in options:\n if opt_key == short_opt.replace(':', '') or opt_key == long_opt.replace('=', ''):\n args[key] = arg\n\n for key, _, _, default_value in options:\n if key not in args:\n args[key] = default_value\n\n return args\n\n\ndef get_param(\n key,\n config_file='config.json',\n default_value=None,\n type='str',\n log_enabled=True):\n import os\n import json\n from zlog2 import log\n\n def log_if_enabled(value):\n if log_enabled:\n log(f\"{key}={value}\")\n\n def translate_value(val):\n try:\n if type == 'bool':\n return bool(float(val))\n elif type == 'int':\n return int(float(val))\n elif type == 'float':\n return float(val)\n elif type == 'str':\n return str(val)\n except Exception as ex:\n print(f'get_param exception: {ex}')\n\n return val\n\n try:\n value = os.environ.get(key)\n if value:\n log_if_enabled(value)\n return translate_value(value)\n\n conf = json.loads(open(config_file, encoding='utf-8').read())\n value = conf.get(key)\n if value:\n log_if_enabled(value)\n return translate_value(value)\n except Exception as ex:\n print(f'get_param exception: {ex}')\n\n log_if_enabled(default_value)\n return default_value\n\n\ndef get_all_params(config_file='config.json', log_enabled=False):\n from zlog2 import log\n import json\n\n if log_enabled:\n log(f\"=================环境变量读取开始=================\")\n conf = json.loads(open(config_file, encoding='utf-8').read())\n for k, v in conf.items():\n conf[k] = get_param2(k, log_enabled=log_enabled)\n if log_enabled:\n log(f\"=================环境变量读取结束=================\")\n return conf\n\n\ndef get_param2(\n key,\n config_file='config.json',\n default_value=None,\n log_enabled=True):\n import os\n import json\n from zlog2 import log\n\n def log_if_enabled(value):\n if log_enabled:\n log(f\"{key}={value}\")\n\n def translate_value(val):\n try:\n if type(val) == bool:\n return bool(float(val))\n elif type(val) == int:\n return int(float(val))\n elif type(val) == float:\n return float(val)\n elif type(val) == str:\n return str(val)\n except Exception as ex:\n print(f'get_param exception: {ex}')\n\n return val\n\n def translate_value_with_type(val, type_val):\n try:\n if type(type_val) == bool:\n return bool(float(val))\n elif type(type_val) == int:\n return int(float(val))\n elif type(type_val) == float:\n return float(val)\n elif type(type_val) == str:\n return str(val)\n except Exception as ex:\n print(f'get_param exception: {ex}')\n\n return val\n\n try:\n conf = json.loads(open(config_file, encoding='utf-8').read())\n config_value = conf.get(key)\n\n value = os.environ.get(key)\n if value:\n log_if_enabled(value)\n return translate_value_with_type(value, config_value)\n\n if config_value:\n log_if_enabled(config_value)\n return translate_value(config_value)\n except Exception as ex:\n print(f'get_param exception: {ex}')\n\n log_if_enabled(default_value)\n return default_value\n\n\ndef main(argv):\n inputfile = ''\n outputfile = ''\n INPUT_FILE_KEY = 'input_file'\n OUTPUT_FILE_KEY = 'output_file'\n options = [\n (INPUT_FILE_KEY, 'i:', 'ifile=', '.'),\n (OUTPUT_FILE_KEY, 'o:', 'ofile=', '.')\n ]\n hint_text = '_.py -i -o '\n args = get_args(argv, options=options, hint_text=hint_text)\n\n print(f'参数: {args}')\n print('输入的文件为:', args.get(INPUT_FILE_KEY))\n print('输出的文件为:', args.get(OUTPUT_FILE_KEY))\n\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv[1:])\n","sub_path":"zsysargs.py","file_name":"zsysargs.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"564544864","text":"import time\n\nstart = time.time()\n\n\ndef primes(n):\n # starts with 2\n sieve = [False, False] + [True] * (n - 2)\n # noinspection PyShadowingNames\n for i in range(2, n):\n if i * i <= n:\n for f in range(i * i, n, i):\n sieve[f] = False\n return sieve\n\n\np = primes(1000000)\nlongest = 0\nAA = 0\nBB = 0\n\nfor a in range(-999, 1000):\n for b in range(-1000, 1001):\n length = 0\n i = 0\n while True:\n if p[i * i + a * i + b]:\n length += 1\n i += 1\n else:\n if length > longest:\n longest = length\n AA = a\n BB = b\n break\n\nprint(\"A:\", AA, \"\\nB:\", BB, \"\\nA*B:\", str(AA * BB))\n\n# this took: 4.904348611831665\n\nprint(\"this took:\", time.time() - start)\n","sub_path":"Solved 1-50/27. Quadratic Primes.py","file_name":"27. Quadratic Primes.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"160897354","text":"import json\nfrom json import JSONDecodeError\nimport logging\n\nimport time\nfrom traceback import print_exception\n\nfrom pkgin.models import Redpkg\n# Get an instance of a logger\nimport re\nimport datetime\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nimport hashlib\n# Create your views here.\nfrom django.views.decorators.csrf import csrf_exempt\n\nlogger = logging.getLogger(__name__)\n\n\n@csrf_exempt\ndef log(request):\n try:\n if request.method == 'POST':\n redpkg_data = json.loads(request.body.decode(\"utf-8\"))\n logger.debug(redpkg_data)\n redpkg_info = build_redpkg_info(redpkg_data)\n objects = []\n for p in redpkg_info:\n redpkg = Redpkg(**p)\n objects.append(redpkg)\n\n try:\n Redpkg.objects.bulk_create(objects)\n except:\n logger.error(\"红包重复: %s\" %(redpkg_data))\n\n ret = {\"code\": 0, \"message\": \"OK\"}\n json_str = JsonResponse(ret, safe=False, json_dumps_params={\"ensure_ascii\": False})\n json_str.charset = \"utf-8\"\n return json_str\n else:\n raise Exception(\"NO GET METHOD\")\n except Exception as e:\n\n logger.error(e)\n print_exception(e)\n return JsonResponse({\"code\": 1, \"message\": \"ERROR, 不支持GET或者 %s\" % e}, safe=False,\n json_dumps_params={\"ensure_ascii\": False})\n\n\ndef build_redpkg_info(pkg_json):\n pkg_info_list = []\n\n send_obj = pkg_json['send_object']\n if send_obj['recv_num']!=send_obj['total_num']:\n return [] #返回个空的,红包不完整\n\n redpkg_id = send_obj['send_listid']\n\n temp = send_obj['invalid_time']\n redpkg_tm = datetime.datetime.strptime(temp, \"%Y-%m-%d %H:%M:%S\") - datetime.timedelta(hours=24)\n redpkg_tm = redpkg_tm.strftime(\"%Y-%m-%d %H:%M:%S\")\n sender_qq_num = send_obj['send_uin']\n sender_qq_name = send_obj['send_name']\n redpkg_total = send_obj['total_amount']\n money_parts = send_obj['total_num'] # 红包个数\n money_guess = send_obj['wishing']\n\n group_info = pkg_json['group_info']\n qq_group_num = group_info['group_uin']\n qq_group_name = group_info['group_name']\n\n # qq_group = pkg_json['group_id']\n # qq_group_name = pkg_json['group_name']\n # money_parts = int(re.findall(r'\\d+', pkg_json['money_parts'])[0])\n # redpkg_total = float(re.findall(r'\\d+.\\d+', pkg_json['money_total'])[0])\n #\n # redpkg_qq_nick_name = pkg_json['money_name']\n # money_guess = int(pkg_json['money_guess']) # money_guess\n #\n # # 紅包的ID\n # redpkg_id = ''\n\n temp_redpkg_part = pkg_json['recv_array']\n for p in temp_redpkg_part:\n grab_qq_num = p['recv_uin']\n grab_qq_nick_name = p['recv_name']\n grab_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(p['create_ts'])))\n\n grab_amount = int(p['amount'])\n grab_last_digit = int(p['amount'][-1])\n tmp = {\n \"redpkg_id\": redpkg_id,\n 'redpkg_tm': redpkg_tm,\n \"qq_group_num\": qq_group_num,\n \"qq_group_name\": qq_group_name,\n\n 'sender_qq_num': sender_qq_num,\n 'sender_qq_name': sender_qq_name,\n \"redpkg_total\": int(redpkg_total),\n \"money_parts\": int(money_parts),\n \"money_guess\": money_guess,\n\n 'grab_qq_num': grab_qq_num,\n \"grab_qq_nick_name\": grab_qq_nick_name,\n \"grab_time\": grab_time,\n \"grab_amount\": int(grab_amount),\n \"grab_last_digit\": int(grab_last_digit),\n }\n pkg_info_list.append(tmp)\n\n return pkg_info_list\n\n\ndef today():\n str = time.strftime(\"%Y-%m-%d\", time.localtime())\n return \"%s \" % str\n\n\ndef year():\n str = time.strftime(\"%Y-\", time.localtime())\n return str\n","sub_path":"pkgin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"568753430","text":"# Copied from http://docs.h5py.org/en/latest/mpi.html\n# Run via: mpiexec -n 4 python partest5.py\n\nimport h5py\nimport os\nimport shutil\n\nfolder = r'/media/sf_temp/lastest/run2_lastest longer/testdir'\n\nh5path1 = os.path.join(folder,r'fields2D.hdf5')\nh5path2 = os.path.join(folder,r'fields2D_comp.hdf5')\n\n# Assume all keys are datasets, and copy them into the other hdf5\nwith h5py.File(h5path1, 'r') as f1:\n with h5py.File(h5path2, 'w') as f2:\n for k in f1.keys():\n f2.create_dataset(k, data=f1[k][...], compression='gzip', compression_opts=4)\n\nshutil.move(h5path2, h5path1)\n","sub_path":"Tests/partest5b.py","file_name":"partest5b.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"109217480","text":"\"\"\"Константы логического движка\"\"\"\nTOK_AND, TOK_OR, TOK_CON, TOK_EQL, TOK_NEG, TOK_BRO, TOK_BRC, TOK_ZERO, TOK_ONE, TOK_A, TOK_B, TOK_C, TOK_D = range(13)\n\nALIASES = {\n TOK_AND: '&*^',\n TOK_OR: '+v',\n TOK_CON: '>',\n TOK_EQL: '=~',\n TOK_NEG: '!',\n TOK_BRO: '(',\n TOK_BRC: ')',\n TOK_ZERO: '0',\n TOK_ONE: '1',\n TOK_A: 'Aa',\n TOK_B: 'Bb',\n TOK_C: 'Cc',\n TOK_D: 'Dd'\n}\n\nVARIABLES = [TOK_A, TOK_B, TOK_C, TOK_D]\nCONSTANTS = [TOK_ZERO, TOK_ONE]\nBINARY_OPERATIONS = [TOK_EQL, TOK_CON, TOK_OR, TOK_AND]\nUNARY_OPERATIONS = [TOK_NEG]\n\nPRIORITIES = TOK_BRO, TOK_BRC, TOK_EQL, TOK_CON, TOK_OR, TOK_AND, TOK_NEG\n\n","sub_path":"core/_engine_backup/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"148336833","text":"from itertools import product as pr\nfrom functools import reduce\nfrom collections import Counter\n\nmake_const = lambda i,j,n: {(0,i,j), (1,i,n), (2,j,n), (3,i//3,j//3,n)}\napply_const = lambda consts, const: [c for c in consts if c.isdisjoint(const)]\nfind_const = lambda key, consts: [c for c in consts if key in c]\n\ndef solve(sol, cs):\n if len(cs) == 0: return sol if len(sol)==81 else None\n\n cnt = Counter()\n for c in cs: # Find best split\n for k in c: cnt[k] += 1\n\n for c in find_const(cnt.most_common()[-1][0], cs):\n s = solve(sol+[c], apply_const(cs, c))\n if s: return s\n\nwith open('p96.txt') as fin: sudoku = [line.strip() for line in fin]\nconsts = [make_const(i,j,n) for i,j,n in pr(range(9), range(9), range(1,10))]\ntotal = 0\n\nfor start in range(1, len(sudoku), 10):\n initial = []\n for j,i in pr(range(9), range(9)):\n n = int(sudoku[start+j][i])\n if n: initial.append(make_const(i,j,n))\n\n s = {}\n for c in solve(initial, reduce(apply_const, initial, consts.copy())):\n parts = sorted(c)\n s[(parts[0][1], parts[0][2])] = parts[1][2]\n total += s[(0,0)]*100 + s[(1,0)]*10 + s[(2,0)]\n\nprint(total)\n","sub_path":"p96.py","file_name":"p96.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"495262462","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import gaussian\nimport emcee as em \n\nm = 0.3\nc = 5\nmu = 0\nsigma = 0.4\nsamples = 1000\n\nxs = np.random.uniform(0,10,samples)\nnoise = np.random.normal(mu,sigma,xs.shape)\nys = m*xs + c + noise\n\nplt.plot(xs,ys,'+')\nplt.savefig(\"fig1.png\")\n\ndef lnprior(model):\n\tm, c = model\n\tif m > -10 and m < 10 and c > -10 and c < 10:\n\t\treturn 0\n\treturn -np.inf\n\ndef loglike(model,xs,ys):\n\tm_sample,c_sample = model\t\n\tprediction = m_sample*xs + c_sample\n\tdiff2 = (ys - prediction)**2\n\tssq = np.sum(diff2,axis=0)\n\treturn -0.5*ssq\n\n\ndef logprob(model,xs,ys):\n\tlp = lnprior(model)\n\tif not np.isfinite(lp):\n\t\treturn -np.inf \n\treturn lp + loglike(model,xs,ys)\n\nndim, nwalkers,nsamples = 2, 50,10000\npos = [np.asarray([0.0,0.0]) + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]\nsampler = em.EnsembleSampler(nwalkers, ndim, loglike, args=(xs,ys))\nsampler.run_mcmc(pos,nsamples)\nchain = sampler.chain\n\nfig, axarr = plt.subplots(ndim)\n\nfor i,ax in enumerate(axarr):\n\tfor j in range(0,nwalkers):\n\t\tvalues = [ chain[j][k][i] for k in range(0,nsamples)]\n\t\tax.plot(values)\n\nplt.savefig(\"fig2.png\")\nsamples = sampler.chain[:,50:,:].reshape((-1,ndim))\nimport corner\nfig = corner.corner(samples, labels=[\"a\",\"b\",\"c\",\"d\",\"e\",\"f\"])\nplt.savefig(\"fig3.png\")\n","sub_path":"archive/bayesian/toy_emcee_linear.py","file_name":"toy_emcee_linear.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"415401596","text":"\"\"\"\n\"\"\"\nimport pprint\n\ntry:\n from stage_check import gql_helper\nexcept ImportError:\n import gql_helper\n\ntry:\n from stage_check import Output\nexcept ImportError:\n import Output\n\ntry:\n from stage_check import AbstractTest\nexcept ImportError:\n import AbstractTest\n\ntry:\n from stage_check import EntryTest\nexcept ImportError:\n import EntryTest\n\n\ndef create_instance(test_id, config, args):\n \"\"\"\n Invoked by TestExecutor class to create a test instance\n \n @test_id - test index number\n @config - test parameters from configuration \n @args - command line args\n \"\"\"\n return TestDeviceState(test_id, config, args)\n\n\nclass TestDeviceState(AbstractTest.GraphQL):\n \"\"\"\n Test device interface state\n \"\"\"\n def __init__(self, test_id, config, args):\n super().__init__(test_id, config, args)\n\n def requires_grapqhl(self):\n \"\"\"\n Override\n \"\"\"\n return True\n\n def get_params(self):\n # apply defaults\n default_params = {\n \"status_tests\" : [],\n \"default_status\" : Output.Status.OK,\n \"exclude_tests\" : [],\n \"device-interfaces\" : []\n }\n \n params = self.apply_default_params(default_params)\n return params\n\n def run(self, local_info, router_context, gql_token, fp):\n \"\"\"\n This test uses the gql engine to get device state state\n \"\"\"\n # Process test parameters\n test_info = self.test_info(local_info, router_context)\n self.output.test_start(test_info)\n params = self.get_params()\n\n # TODO either keep and implement as a parameter or toss\n include_list = params[\"device-interfaces\"]\n\n # GraophQL Preperation\n qr = gql_helper.NodeGQL(\"allRouters\", ['name'], [ router_context.get_router() ], debug=self.debug)\n qn = gql_helper.NodeGQL(\"nodes\", ['name'])\n qd = gql_helper.NodeGQL(\"deviceInterfaces\", [ 'name', 'state { operationalStatus }' ],\n include_list)\n qr.add_node(qn)\n qn.add_node(qd)\n\n json_reply={}\n if not self.send_query(qr, gql_token, json_reply):\n return self.output.test_end(fp)\n\n flatter_json = qr.flatten_json(json_reply, 'router/nodes/deviceInterfaces', '/')\n\n router_context.set_allRouters_node_type(flatter_json)\n\n if self.debug:\n print('........ flattened list ..........')\n pprint.pprint(flatter_json)\n\n # Run test / process results\n fail_count = 0\n warn_count = 0\n engine = EntryTest.Parser()\n for entry in flatter_json:\n try:\n if self.exclude_flat_entry(entry, params[\"exclude_tests\"]):\n continue\n except KeyError:\n pass\n \n test_status = engine.eval_entry_by_tests(entry, params[\"entry_tests\"])\n if test_status == Output.Status.FAIL:\n self.output.proc_test_match(test_status, entry)\n fail_count += 1\n\n if test_status == Output.Status.WARN:\n self.output.proc_test_match(test_status, entry)\n warn_count += 1\n\n if fail_count > 0:\n self.output.proc_test_fail_result(fail_count)\n elif warn_count > 0:\n self.output.proc_test_warn_result(warn_count)\n\n return self.output.test_end(fp)\n\n","sub_path":"stage_check/stage_check/TestDeviceState.py","file_name":"TestDeviceState.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"307542004","text":"# get product data\n# input : web data & id.txt\n# output : products.json\n\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport re\nimport json\nimport random\nimport time\n\nURL = 'https://www.digikala.com/product/dkp-{}'\n\n\ndef getdata():\n\n print('Scanning for product data.')\n\n # product keys\n keys = []\n with open('id.txt', 'r') as f:\n for line in f:\n keys.append(line.strip())\n\n # shuffle keys to draw uniform samples\n random.shuffle(keys)\n\n # keys with bad html format - to be deleted after scanning\n faulty_keys = []\n\n # to be filled with product data\n productdata = {}\n\n counter = 1\n for id in keys:\n # sleep for 3 sec not to overload the server\n time.sleep(3)\n\n try:\n\n # add url\n productdata[id] = {'url': URL.format(id)}\n\n # connect to url\n try:\n r = requests.get(productdata[id]['url'])\n except requests.exceptions.ConnectionError:\n time.sleep(3)\n print('failed {}/{}'.format(counter, len(keys)))\n counter += 1\n continue\n\n soup = bs(r.text, 'html.parser')\n specs = list(soup.find('div', attrs={'id':'tabs'}).div.children)[1].article\n\n # add product name\n productdata[id]['name'] = specs.find('h2', attrs={'class': 'c-params__headline'}).span.text.strip()\n\n # add product price\n productdata[id]['price'] = soup.find('div', attrs={'class':'c-product__seller-price-pure js-price-value'}).text.strip()\n\n # add product specs\n for section in specs.find_all('section')[:6]:\n # find section name\n if section.h3.find('a'):\n sectionName = section.h3.span.a.text.strip()\n else:\n sectionName = section.h3.text.strip()\n \n productdata[id][sectionName] = {}\n\n for li in section.ul.children:\n #key\n keytag = li.find('div', attrs={'class': 'c-params__list-key'})\n if keytag.find('a'):\n key = keytag.span.a.text.strip()\n else:\n key = keytag.span.text.strip()\n #value\n valtag = li.find('div', attrs={'class': 'c-params__list-value'})\n if valtag.find('a'):\n val = valtag.span.span.a.text.strip()\n else:\n val = valtag.span.text.strip()\n\n productdata[id][sectionName][key] = val\n\n except AttributeError:\n # handle bad html formats\n faulty_keys.append(id)\n continue\n\n print('{}/{}'.format(counter, len(keys)))\n counter += 1\n\n # remove products with bad html format\n for key in faulty_keys:\n if key in productdata:\n del productdata[key]\n\n\n # write product data to json\n with open('products.json', 'w', encoding='utf8') as f:\n json.dump(productdata, f, ensure_ascii=False)\n\n\n print('Scanned {} products. stored into products.json .\\n'.format(len(productdata)))\n\n\n\nif __name__=='__main__':\n getdata()","sub_path":"getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"648055698","text":"import unittest\nfrom cqo.simulation import ThermalState, CoherentState\nfrom cqo.units import hbar\nimport numpy as np\nfrom itertools import product\n\nclass TestThermalState(unittest.TestCase):\n\n eps = 1e-6\n\n omega = 1e3\n\n m = 1e-24\n\n def test_low_T(self):\n \"\"\"Test that the low temperature behaviour is the ground state\n \"\"\"\n\n beta = 1e12 / hbar / self.omega\n\n rho = ThermalState(beta, self.omega, self.m)\n\n ground_state = CoherentState(0, self.m*self.omega)\n\n x_array = np.linspace(-2*rho.width, 2*rho.width)\n\n for x, x_ in product(x_array, x_array):\n\n diff = ground_state.sample(x, x_) - rho.sample(x, x_)\n\n self.assertTrue(abs(diff) < self.eps)\n\n def beta_range(self):\n\n return 10.0**np.arange(-2,3) / hbar / self.omega\n\n def test_width(self):\n \"\"\"Width property should give half-width-half-maximum\n \"\"\"\n\n for beta in self.beta_range():\n rho = ThermalState(beta, self.omega, self.m)\n\n max = rho.sample(0,0)\n\n half_max = rho.sample(rho.width, rho.width)\n\n self.assertTrue(abs(half_max / max - 0.5) < 1e-3)\n\n def test_norm(self):\n \"\"\"Test that the state is normalised\n \"\"\"\n\n res = 1024\n\n for beta in self.beta_range():\n\n rho = ThermalState(beta, self.omega, self.m)\n\n limit = 8*rho.width\n\n x_array = np.linspace(-limit, limit, res)\n\n width = x_array[1] - x_array[0]\n\n pdf = [rho.sample(x,x) for x in x_array]\n\n norm = width*sum(pdf)\n\n if abs(norm - 1) >= self.eps:\n print(\"beta: {}, norm: {}\".format(beta, norm))\n\n self.assertTrue(abs(norm - 1) < self.eps)\n\n def test_gaussianity(self):\n \"\"\"Test that pdf is gaussian across range of temperatures\n \"\"\"\n\n for beta in self.beta_range():\n\n rho = ThermalState(beta, self.omega, self.m)\n\n x_array = np.linspace(rho.width/10, rho.width, num=9)\n\n rho_x = np.array([rho.sample(x, x) for x in x_array])\n\n rho_x /= rho.sample(0,0)\n\n a = np.log(rho_x) / x_array**2\n\n self.assertTrue(np.allclose(a / a[0], np.ones(a.shape), self.eps))\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"cqo/test/test_thermal_state.py","file_name":"test_thermal_state.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"633598887","text":"# *********************************************\n# |docname| - Tests of the Runestone Components\n# *********************************************\n# These tests check both client-side and server-side aspects of the Runestone Components.\n#\n# Imports\n# =======\n# These are listed in the order prescribed by `PEP 8\n# `_.\n#\n# Standard library\n# ----------------\nimport datetime\nimport json\n\n# Third-party imports\n# -------------------\nfrom polling2 import poll\nimport pytest\nfrom runestone.clickableArea.test import test_clickableArea\nfrom runestone.fitb.test import test_fitb\nfrom runestone.mchoice.test import test_assess\nfrom runestone.parsons.test import test_parsons\nfrom runestone.poll.test import test_poll\nfrom runestone.shortanswer.test import test_shortanswer\nfrom runestone.spreadsheet.test import test_spreadsheet\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\n# Local imports\n# -------------\n# None.\n\n\n# Utilities\n# =========\n# Poll the database waiting for the client to perform an update via Ajax.\ndef get_answer(db, expr, minimum_len):\n return poll(\n lambda: db(expr).select(),\n check_success=lambda s: len(s) >= minimum_len,\n step=0.1,\n timeout=10,\n )\n\n\n# Check the fields common to the tables of most Runestone components.\ndef check_common_fields_raw(selenium_utils_user, db, query, index, div_id):\n row = get_answer(db, query, index + 1)[index]\n assert row.timestamp - datetime.datetime.now() < datetime.timedelta(seconds=5)\n assert row.div_id == div_id\n assert row.sid == selenium_utils_user.user.username\n assert row.course_name == selenium_utils_user.user.course.course_name\n return row\n\n\n# Return the answer, correct, and percent fields after checking common fields.\ndef check_common_fields(selenium_utils_user, db, query, index, div_id):\n row = check_common_fields_raw(selenium_utils_user, db, query, index, div_id)\n return row.answer, row.correct, row.percent\n\n\n# Tricky fixures\n# --------------\n# The URL to fetch in order to do testing varies by the type of test:\n#\n# #. When performing client-side testing in Runestone Components, the URL is usually \"/index.html\". A fixture defined in client testing code handles this; see the ``selenium_utils_1`` fixture in ``test_clickableArea.py`` in the Runestone Component, for example. The client-side tests then use this fixture.\n# #. When performing plain server-side testing, the URL is \"/path/to/book/index.html\"; see ``selenium_utils_user.get_book_url``. The fixture below handles this. Then, inside a plain server-side test, the test invokes the client test directly, meaning that it passes its already-run fixture (which fetched the plain server-side testing page) to the client test, bypassing the client fixture.\n# #. When performing selectquestion server-side testing, the URL is \"/path/to/book/selectquestion.html\". The next fixture handles this. It likewise calls the plain server-side text with its already-run fixture, which has fetched the selectquestion server-side testing page.\n#\n# Both client-side and server-side tests must be structured carefully for this to work:\n# - Client-side tests must invoke ``selenium_utils.wait_until_ready(div_id)``.\n# - Client-side tests must **not** invoke ``selenium_utils.get`` in the body of the test, since this prevents server-side tests. Instead, invoke this in a fixture passed to the test, allow server-side tests to override this by passing a different fixture.\n# - The ``div_id`` of client-side tests must match the div_id of server-side tests, meaning the two ``.rst`` files containing tests must use the same ``div_id``.\n#\n# A fixture for plain server-side testing.\n@pytest.fixture\ndef selenium_utils_user_1(selenium_utils_user):\n selenium_utils_user.get_book_url(\"index.html\")\n return selenium_utils_user\n\n\n# A fixture for selectquestion server-side testing.\n@pytest.fixture\ndef selenium_utils_user_2(selenium_utils_user):\n selenium_utils_user.get_book_url(\"selectquestion.html\")\n return selenium_utils_user\n\n\n# Tests\n# =====\n#\n# ClickableArea\n# -------------\ndef test_clickable_area_1(selenium_utils_user_1, runestone_db):\n db = runestone_db\n div_id = \"test_clickablearea_1\"\n\n def ca_check_common_fields(index):\n return check_common_fields(\n selenium_utils_user_1,\n db,\n db.clickablearea_answers.div_id == div_id,\n index,\n div_id,\n )\n\n test_clickableArea.test_ca1(selenium_utils_user_1)\n assert ca_check_common_fields(0) == (\"\", False, None)\n\n test_clickableArea.test_ca2(selenium_utils_user_1)\n assert ca_check_common_fields(1) == (\"0;2\", True, 1)\n\n # TODO: There are a lot more clickable area tests that could be easily ported!\n\n\n# Fitb\n# ----\n# Test server-side logic in FITB questions.\ndef test_fitb_1(selenium_utils_user_1, runestone_db):\n db = runestone_db\n\n def fitb_check_common_fields(index, div_id):\n answer, correct, percent = check_common_fields(\n selenium_utils_user_1,\n db,\n db.fitb_answers.div_id == div_id,\n index,\n div_id,\n )\n return json.loads(answer), correct, percent\n\n test_fitb.test_fitb1(selenium_utils_user_1)\n assert fitb_check_common_fields(0, \"test_fitb_string\") == ([\"\", \"\"], False, 0)\n\n test_fitb.test_fitb2(selenium_utils_user_1)\n assert fitb_check_common_fields(1, \"test_fitb_string\") == ([\"red\", \"\"], False, 0.5)\n\n test_fitb.test_fitb3(selenium_utils_user_1)\n assert fitb_check_common_fields(2, \"test_fitb_string\") == ([\"red\", \"away\"], True, 1)\n\n test_fitb.test_fitb4(selenium_utils_user_1)\n assert fitb_check_common_fields(3, \"test_fitb_string\") == ([\"red\", \"away\"], True, 1)\n\n test_fitb.test_fitboneblank_too_low(selenium_utils_user_1)\n assert fitb_check_common_fields(0, \"test_fitb_number\") == ([\" 6\"], False, 0)\n\n test_fitb.test_fitboneblank_wildcard(selenium_utils_user_1)\n assert fitb_check_common_fields(1, \"test_fitb_number\") == ([\"I give up\"], False, 0)\n\n test_fitb.test_fitbfillrange(selenium_utils_user_1)\n assert fitb_check_common_fields(2, \"test_fitb_number\") == ([\" 6.28 \"], True, 1)\n\n test_fitb.test_fitbregex(selenium_utils_user_1)\n assert fitb_check_common_fields(0, \"test_fitb_regex_1\") == (\n [\" maire \", \"LITTLE\", \"2\"],\n True,\n 1,\n )\n\n test_fitb.test_regexescapes1(selenium_utils_user_1)\n assert fitb_check_common_fields(0, \"test_fitb_regex_2\") == (\n [r\"C:\\windows\\system\"],\n True,\n 1,\n )\n\n test_fitb.test_regexescapes2(selenium_utils_user_1)\n assert fitb_check_common_fields(0, \"test_fitb_regex_3\") == ([\"[]\"], True, 1)\n\n\n# Lp\n# --\ndef test_lp_1(selenium_utils_user):\n su = selenium_utils_user\n href = \"lp_demo.py.html\"\n su.get_book_url(href)\n id = \"test_lp_1\"\n su.wait_until_ready(id)\n\n snippets = su.driver.find_elements_by_class_name(\"code_snippet\")\n assert len(snippets) == 1\n check_button = su.driver.find_element_by_id(id)\n result_id = \"lp-result\"\n result_area = su.driver.find_element_by_id(result_id)\n\n # Set snippets.\n code = \"def one(): return 1\"\n su.driver.execute_script(f'LPList[\"{id}\"].textAreas[0].setValue(\"{code}\");')\n assert not result_area.text\n\n # Click the test button.\n check_button.click()\n su.wait.until(\n EC.text_to_be_present_in_element_value((By.ID, \"lp-result\"), \"Building...\")\n )\n\n # Wait until the build finishes. To find this, I used the Chrome inspector; right-click on the element, then select \"Copy > Copy full XPath\".\n su.wait.until(\n EC.text_to_be_present_in_element(\n (By.XPATH, \"/html/body/div[4]/div[1]/div[3]/div\"), \"Correct. Grade: 100%\"\n )\n )\n\n # Refresh the page. See if saved snippets are restored.\n su.get_book_url(href)\n su.wait_until_ready(id)\n assert (\n su.driver.execute_script(f'return LPList[\"{id}\"].textAreas[0].getValue();')\n == code\n )\n\n\n# Mchoice\n# -------\ndef test_mchoice_1(selenium_utils_user_1, runestone_db):\n su = selenium_utils_user_1\n db = runestone_db\n div_id = \"test_mchoice_1\"\n\n def mc_check_common_fields(index):\n return check_common_fields(\n su, db, db.mchoice_answers.div_id == div_id, index, div_id\n )\n\n test_assess.test_ma1(selenium_utils_user_1)\n assert mc_check_common_fields(0) == (\"\", False, None)\n\n test_assess.test_ma2(selenium_utils_user_1)\n assert mc_check_common_fields(1) == (\"0,2\", True, 1)\n\n # TODO: There are a lot more multiple choice tests that could be easily ported!\n\n\n# Parsons's problems\n# =================\ndef test_parsons_1(selenium_utils_user_1, runestone_db):\n su = selenium_utils_user_1\n db = runestone_db\n\n def pp_check_common_fields(index, div_id):\n row = check_common_fields_raw(\n su, db, db.parsons_answers.div_id == div_id, index, div_id\n )\n return row.answer, row.correct, row.percent, row.source\n\n test_parsons.test_general(selenium_utils_user_1)\n assert pp_check_common_fields(0, \"test_parsons_1\") == (\n \"-\",\n False,\n None,\n \"0_0-1_2_0-3_4_0-6_0-5_0\",\n )\n assert pp_check_common_fields(1, \"test_parsons_1\") == (\n \"0_0-1_2_1-3_4_1-5_1\",\n True,\n 1.0,\n \"6_0\",\n )\n\n # TODO: There are several more Parsons's problems tests that could be easily ported.\n\n\n# Poll\n# ----\ndef test_poll_1(selenium_utils_user_1, runestone_db):\n id = \"test_poll_1\"\n test_poll.test_poll(selenium_utils_user_1)\n db = runestone_db\n assert (\n get_answer(db, (db.useinfo.div_id == id) & (db.useinfo.event == \"poll\"), 1)[\n 0\n ].act\n == \"4\"\n )\n\n\n# Short answer\n# ------------\ndef test_short_answer_1(selenium_utils_user_1, runestone_db):\n id = \"test_short_answer_1\"\n\n # The first test doesn't click the submit button.\n db = runestone_db\n expr = db.shortanswer_answers.div_id == id\n test_shortanswer.test_sa1(selenium_utils_user_1)\n s = get_answer(db, expr, 0)\n\n # The second test clicks submit with no text.\n test_shortanswer.test_sa2(selenium_utils_user_1)\n s = get_answer(db, expr, 1)\n assert s[0].answer == \"\"\n\n # The third test types text then submits it.\n test_shortanswer.test_sa3(selenium_utils_user_1)\n s = get_answer(db, expr, 2)\n assert s[1].answer == \"My answer\"\n\n # The fourth test is just a duplicate of the third test.\n test_shortanswer.test_sa4(selenium_utils_user_1)\n s = get_answer(db, expr, 3)\n assert s[2].answer == \"My answer\"\n\n\n# Selectquestion\n# --------------\n# Check rendering of selectquestion, which requires server-side support.\ndef test_selectquestion_1(selenium_utils_user_2, runestone_db):\n test_poll_1(selenium_utils_user_2, runestone_db)\n\n\n@pytest.mark.skip(reason=\"The spreadsheet component doesn't support selectquestion.\")\ndef test_selectquestion_2(selenium_utils_user_2):\n test_spreadsheet_1(selenium_utils_user_2)\n\n\ndef test_selectquestion_3(selenium_utils_user_2, runestone_db):\n test_clickable_area_1(selenium_utils_user_2, runestone_db)\n\n\ndef test_selectquestion_4(selenium_utils_user_2, runestone_db):\n test_fitb_1(selenium_utils_user_2, runestone_db)\n\n\ndef test_selectquestion_5(selenium_utils_user_2, runestone_db):\n test_mchoice_1(selenium_utils_user_2, runestone_db)\n\n\ndef test_selectquestion_6(selenium_utils_user_2, runestone_db):\n test_parsons_1(selenium_utils_user_2, runestone_db)\n\n\ndef test_selectquestion_20(selenium_utils_user_2, runestone_db):\n test_short_answer_1(selenium_utils_user_2, runestone_db)\n\n\n# Spreadsheet\n# -----------\ndef test_spreadsheet_1(selenium_utils_user_1):\n test_spreadsheet.test_ss_autograde(selenium_utils_user_1)\n","sub_path":"tests/test_runestone_components.py","file_name":"test_runestone_components.py","file_ext":"py","file_size_in_byte":11830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"452674809","text":"import scipy.stats as stats\nfrom utils.sp_normalization import *\nfrom utils.context_management import suppress_stdout\n\n\ndef preprocessing(dataset_dir, duration, threshold, input_size, n_downsampling):\n \"\"\"\n Performs all the preprocessing for the data\n\n :param dataset_dir: pathlib.Path object pointing to the source, which is a EEGlab format containing a single run\n from a single subject of shape (64, n_time_stamps)\n :param duration: duration of each epoch\n :param threshold: multiples of mean absolute deviation (MAD) within each epoch for thresholding outliers\n :param input_size: size of the input to the CNN\n\n :return: normalized_raw: mne Raw object with whitened data\n :return: ecg_stats: list containing the mean and std for the ECG channel, shape (1, 2)\n :return: eeg_stats: list containing the mean and std for the EEG channels, shape (63, 2)\n :return: epoched_data: mne Epoch object containing the epoched data\n :return: good_idx: list containing the epochs that passed the epoch rejection, used later in prediction step\n \"\"\"\n # Loading the raw input in EEGLAB format and downsampling it\n rs_raw = mne.io.read_raw_eeglab(dataset_dir, preload=True)\n fs_orig = rs_raw.info['sfreq']\n fs = fs_orig / n_downsampling\n rs_raw.resample(fs)\n\n # Performs normalization by whitening each channel\n normalized_raw, ecg_stats, eeg_stats = normalize_raw_data(rs_raw)\n\n # Perform epoch rejection by threshold * MAD\n unpadded_epoch_dataset, good_idx = dataset_epoch(dataset=normalized_raw, duration=duration, epoch_rejection=True,\n threshold=threshold, raw_dataset=rs_raw)\n\n normalized_raw_data = normalized_raw.get_data()\n n_padding = int((input_size - 1) / 2)\n padded_normalized_raw_data = np.pad(normalized_raw_data, ((0, 0), (n_padding, n_padding)), mode='constant')\n\n padded_epoch_data = np.zeros(shape=(unpadded_epoch_dataset.get_data().shape[0], unpadded_epoch_dataset.get_data().shape[1],\n unpadded_epoch_dataset.get_data().shape[2] + 2 * n_padding))\n for i in range(len(good_idx) - 1):\n idx_start = int(duration * fs * good_idx[i])\n idx_end = int(duration * fs * (good_idx[i] + 1) + 2 * n_padding)\n\n padded_epoch_data[i, :, :] = padded_normalized_raw_data[:, idx_start: idx_end]\n\n epoched_dataset = mne.EpochsArray(padded_epoch_data, unpadded_epoch_dataset.info)\n return normalized_raw, ecg_stats, eeg_stats, epoched_dataset, good_idx\n\n\ndef epoch_events(dataset, fs, duration):\n total_time_stamps = dataset.get_data().shape[1]\n constructed_events = np.zeros(shape=(int(np.floor(total_time_stamps/fs)/duration), 3), dtype=int)\n\n for i in range(0, int(np.floor(total_time_stamps/fs))-duration, duration):\n ix = i/duration\n constructed_events[int(ix)] = np.array([i * fs, 0, 1])\n\n tmax = duration - 1/fs\n\n return constructed_events, tmax\n\n\ndef mad_rejection(dataset, threshold, fs, duration):\n with suppress_stdout():\n srate = dataset.info['sfreq']\n if srate != fs:\n dataset.resample(fs)\n\n info = dataset.info\n ecg_ch = info['ch_names'].index('ECG')\n target_ch = np.delete(np.arange(0, len(info['ch_names']), 1), ecg_ch)\n\n constructed_events, tmax = epoch_events(dataset, fs, duration)\n epoched_dataset = mne.Epochs(dataset, constructed_events, tmin=0, tmax=tmax, baseline=None)\n data_abs = np.absolute(epoched_dataset.get_data())\n\n vec_mabs_eeg = np.mean(data_abs[:, target_ch, :], axis=(1, 2))\n vec_eeg_norm = (vec_mabs_eeg - np.median(vec_mabs_eeg)) / stats.median_absolute_deviation(vec_mabs_eeg)\n vec_bad_epochs_ix = np.arange(0, len(vec_eeg_norm), 1)[vec_eeg_norm > threshold]\n\n return vec_bad_epochs_ix\n\n\ndef dataset_epoch(dataset, duration, epoch_rejection, threshold=None, raw_dataset=None, good_idx=None):\n # Constructing events of duration 10s\n info = dataset.info\n fs = info['sfreq']\n\n constructed_events, tmax = epoch_events(dataset, fs, duration)\n old_epoched_dataset = mne.Epochs(dataset, constructed_events, tmin=0, tmax=tmax, baseline=None)\n\n if epoch_rejection:\n # Epoch rejection based on median absolute deviation of mean of absolute values for individual epochs\n reject_idx = mad_rejection(raw_dataset, threshold, fs, duration)\n good_idx = np.delete(np.arange(0, old_epoched_dataset.get_data().shape[0], 1), reject_idx)\n good_data = old_epoched_dataset.get_data()[good_idx, :, :]\n epoched_dataset = mne.EpochsArray(good_data, old_epoched_dataset.info)\n\n return epoched_dataset, good_idx\n\n else:\n epoched_data = old_epoched_dataset.get_data()[good_idx, :, :]\n\n return epoched_data\n","sub_path":"utils/sp_preprocessing.py","file_name":"sp_preprocessing.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"279322585","text":"# coding=utf-8\n# Monthly Topic Analysis\n# Use tf-idf on monthly messages to highlight key topics of conversation during a particular month\n# Thomas Coe\n# 2018\n\n# Library imports\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom utilities import create_stopword_list\n\n# Function Definitions\ndef tfidf_run(corpus,stopwords='stopwords.txt'):\n # Function to run the tf-idf algorithm on given corpus\n # Load custom stopwords list\n stopword_list = create_stopword_list(stopwords)\n\n # Initialise tfidf vectorizer object\n vect = TfidfVectorizer(stop_words=stopword_list,\n ngram_range=(1, 3), # ngram size\n min_df=5, # minimum number of occurences\n max_df=0.5, # maximum proportion of messages it occurs in\n max_features=10000) # take the top X ngrams that meet the criteria\n\n # fit the vectorizer\n counts = vect.fit_transform(corpus)\n\n # words used\n feature_names = vect.get_feature_names()\n\n # First way the whole matrix so lots of zeroes etc\n counts_df = pd.DataFrame(counts.todense(),columns=feature_names)\n\n return pd.DataFrame(counts_df.columns[np.argsort(-counts_df.values,axis=1)[:,:5]], index=corpus.index)\n\n\ndef corpus_gen(messages,user=None):\n # Function to compile a monthly corpus for usage in analysis\n # Filter on user (if default then no action\n if user is not None:\n messages = messages[(messages['SENDER'] == user)]\n\n # Filter on links & images\n messages_filt = messages[messages.MESSAGE.str.contains(\"omitted|https\") == False]\n\n # Create monthyear variable and group by\n messages_filt['MONTHYEAR'] = messages_filt['YEAR']*100 + messages_filt['MONTH']\n corpus = messages_filt.groupby('MONTHYEAR')['MESSAGE'].apply(lambda x: \"%s\" % ''.join(x))\n\n return corpus\n\n# Read data\nchat = pd.read_csv('Chat/parsed_chat.csv')\nstopword_list = create_stopword_list('stopwords.txt')\n\n# Create monthly corpus\nt_corpus = corpus_gen(chat,'Tom C')\ns_corpus = corpus_gen(chat,'Salka Sigurdardottir ')\ncorpus = corpus_gen(chat)\n\n# Top 5 monthly words\ntom_monthly = tfidf_run(t_corpus)\ntom_monthly.to_clipboard()\nsalka_monthly = tfidf_run(s_corpus)\nsalka_monthly.to_clipboard()\nmonthly = tfidf_run(corpus)\nmonthly.to_clipboard()\n\n\n\n","sub_path":"Code/monthly_topic.py","file_name":"monthly_topic.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"73768052","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport datetime\n\nfrom stockspider import items\nfrom scrapy.loader import ItemLoader\n\n\nclass StockHistoryTransactionSpider(scrapy.Spider):\n name = 'stockhistorytransaction'\n allowed_domains = ['www.aigaogao.com']\n start_urls = ['']\n\n def start_requests(self):\n cls = items.StockItem.Meta.model\n for item in cls.select().where(cls.last_update != datetime.datetime.now().strftime('%Y-%m-%d')):\n yield scrapy.Request(f'http://www.aigaogao.com/tools/history.html?s={item.code}', meta={'id': item.id})\n\n def parse(self, response):\n for item_selector in response.css('#ctl16_contentdiv tr:not(:first-child)'):\n item_loader = ItemLoader(items.StockHistoryTransactionItem(), selector=item_selector)\n item_loader.add_value('stock_id', response.meta.get('id'))\n item_loader.add_css('date', 'td a::text')\n item_loader.add_css('start_price', 'td:nth-child(2)::text')\n item_loader.add_css('max_price', 'td:nth-child(3)::text')\n item_loader.add_css('min_price', 'td:nth-child(4)::text')\n item_loader.add_css('end_price', 'td:nth-child(5)::text')\n item_loader.add_css('turnover', 'td:nth-child(6)::text')\n item_loader.add_css('turnover_price', 'td:nth-child(7)::text')\n item_loader.add_css('gain_price', 'td:nth-child(8)::text')\n item_loader.add_css('gain', 'td:nth-child(9) span::text')\n print(f'now load {response.meta.get(\"id\")}, date {item_selector.css(\"td a::text\").extract_first()}')\n stock_item_info = item_loader.load_item()\n yield stock_item_info\n","sub_path":"stockspider/stockspider/spiders/stockhistroytransation.py","file_name":"stockhistroytransation.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"638913635","text":"from pygame \\\n import sprite, image, transform\nfrom components.shared.buttontypes \\\n import ButtonTypes\nfrom constants.navigation import *\nfrom constants.screen import *\n\n\nclass RedirectScreen(sprite.Sprite):\n\n def __init__(self, nav, success=False):\n super(RedirectScreen, self).__init__()\n self.image = image.load(SUCCESS) if success else image.load(FAIL)\n self.image = transform.scale(self.image, (400, 200))\n self.rect = self.image.get_rect()\n self.rect.x = 0\n self.rect.y = 0\n\n self.nav = nav\n\n self.buttons = {\"LEFT\": None, \"RIGHT\": None}\n\n self._setup_buttons(success)\n\n def _setup_buttons(self, success):\n if success:\n self.buttons[\"LEFT\"] = ButtonTypes.cont(lambda: self.nav.next_level())\n else:\n self.buttons[\"LEFT\"] = ButtonTypes.retry(lambda: self.nav.retry())\n self.buttons[\"RIGHT\"] = ButtonTypes.levelselect(lambda: self.nav.level_select())\n\n def draw(self, screen):\n screen.blit(self.image, (SCREEN_WIDTH/2 - self.rect.width/2, SCREEN_HEIGHT/2 - self.rect.height/2))\n for k in self.buttons:\n self.buttons[k].draw(screen)\n\n def handle_event(self, e):\n for k in self.buttons:\n self.buttons[k].handle_event(e)\n\n def reset(self):\n for k in self.buttons:\n self.buttons[k].reset()\n\n def set_volume(self, v):\n for k in self.buttons:\n self.buttons[k].click_sound.set_volume(v)\n","sub_path":"components/navigation/redirectscreen.py","file_name":"redirectscreen.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"157806623","text":"import discord\nimport asyncio\nimport urllib.request, json\nimport re\n\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\n\nbot = commands.Bot(command_prefix='!', description='None')\n\nregex = re.compile(\"(hi|what's up|yo|hey|hello) felix\", re.IGNORECASE)\ngetgif = re.compile(\"felix gif \", re.IGNORECASE)\n\n\nkey = \"Your key here\"\nbotauth2 = \"auth2 for discord bot\"\n\n@bot.command(pass_context=True)\nasync def message_me():\n\tprint(\"test\")\n\n# an example python version of the live stream\n# https://www.youtube.com/watch?v=tqT3O0S38gY&t=617s\n@bot.event\nasync def on_message(message):\n\tif regex.match(message.content):\n\t\tawait bot.send_message(message.channel, \"hello\")\n\telif getgif.match(message.content):\n\t\tgif = message.content.split(\" \")[2]\n\n\t\tawait bot.send_message(message.channel,\n\t\t\t\"Let me get that for you!\")\n\n\t\ttry:\n\t\t\tdata = json.loads(urllib.request.urlopen(\\\n\t\t\t\"https://api.giphy.com/v1/gifs/search\"\\\n\t\t\t+ \"?api_key=\" + key\\\n\t\t\t+ \"&q=\" + gif\\\n\t\t\t+ \"&limit=1\"\\\n\t\t\t+ \"&offset=0\"\\\n\t\t\t+ \"&rating=R\"\\\n\t\t\t+ \"&lang=en\").read())\n\n\t\t\tawait bot.send_message(message.channel, data[\"data\"][0][\"embed_url\"])\n\t\texcept Exception as e:\n\t\t\tawait bot.send_message(message.channel,\n\t\t\t\t\"I'm sorry I couldn't find that\")\n\n\n\n\nbot.run(\"botauth2\")\n\n# keys in commit history have been regenerated.\n","sub_path":"EMFelixGifBot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"441862882","text":"#!/usr/bin/python3\nfrom troposphere import (\n Base64, \n Tags, \n Join, \n GetAtt,\n Parameter,\n Output,\n Ref,\n Template,\n Sub\n )\nfrom troposphere.cloudtrail import Trail\nfrom pathlib import Path\nimport types\nimport sys, getopt\nimport os\nimport re\n\n## BEGIN Input Parameter Definition ##\n## These are \"global\" parameters that will be assigned to all instances\nparameters = {\n \"LogBucket\" : Parameter(\n \"LogBucket\",\n Description = \"The S3 bucket to write logs to\",\n Type = \"String\",\n Default = \"cloudtrail-elasticsearch-s3bucket-yvjvmo9n5z5r\t\",\n ),\n \"SnsTopic\" : Parameter(\n \"SnsTopic\",\n Description = \"The arn of the SNS topic to notify for log delivery\",\n Type = \"String\",\n Default = \"arn:aws:sns:us-east-1:514107046317:cloudtrail-elasticsearch-SNSTopic-1M25KZBQ31OBB\",\n ),\n}\ndef gen_cloudtrail():\n trail = Trail(\n \"CloudTrail\",\n TrailName = \"CloudTrail\",\n IsLogging = True,\n IsMultiRegionTrail = True,\n IncludeGlobalServiceEvents = True,\n S3BucketName = Ref(parameters['LogBucket']),\n SnsTopicName = Ref(parameters['SnsTopic'])\n )\n return trail\n\n# Function to write template to specified file\ndef write_to_file( template ):\n \n # Define the directory to write to as located one level up from the current directory, in a folder named templates\n dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..','templates'))\n \n # Create the directory if it does not exist\n if not os.path.exists(dir):\n os.makedirs(dir)\n \n # Define filename for template equal to name of current script \n filename = re.sub('\\.py$','', sys.argv[0])\n file = os.path.join(dir,filename)\n \n # Write the template to file\n target = open(file + '.json', 'w')\n target.truncate()\n target.write(template)\n target.close()\n\n######################## MAIN BEGINS HERE ###############################\ndef main(argv):\n \n # Set up a blank template\n t = Template()\n \n # Add description\n t.add_description(\"[Platform] CloudTrail\")\n\n # Add all defined input parameters to template\n for p in parameters.values():\n t.add_parameter(p)\n \n t.add_resource(gen_cloudtrail())\n \n # Convert template to json\n template=(t.to_json())\n \n # Print template to console (for debugging) and write to file\n print(template)\n write_to_file(template)\n\nif __name__ == \"__main__\":\n main(sys.argv[0:])\n","sub_path":"troposphere/cloudtrail.py","file_name":"cloudtrail.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"358457954","text":"import os\nimport shutil\nimport logging\nfrom unittest import mock\n\nimport pytest\nimport toml\n\nfrom meguca.plugins.src.dispatch_updater import dispatch_renderer\n\n\nclass TestCustomVars():\n @pytest.fixture(scope='class')\n def setup_custom_vars_files(self):\n custom_vars_1 = {'foo1': {'bar1': 'john1'}}\n with open('test1.toml', 'w') as f:\n toml.dump(custom_vars_1, f)\n\n custom_vars_2 = {'foo2': {'bar2': 'john2'}}\n with open('test2.toml', 'w') as f:\n toml.dump(custom_vars_2, f)\n\n yield 0\n\n os.remove('test1.toml')\n os.remove('test2.toml')\n\n def test_load_custom_vars_with_one_file(self, setup_custom_vars_files):\n ins = dispatch_renderer.CustomVars('test1.toml')\n\n assert ins._custom_vars == {'foo1': {'bar1': 'john1'}}\n\n def test_load_custom_vars_with_many_files(self, setup_custom_vars_files):\n ins = dispatch_renderer.CustomVars(['test1.toml', 'test2.toml'])\n\n assert ins._custom_vars == {'foo1': {'bar1': 'john1'},\n 'foo2': {'bar2': 'john2'}}\n\n def test_load_custom_vars_with_non_existent_file(self):\n with pytest.raises(FileNotFoundError):\n dispatch_renderer.CustomVars(['asas.toml', 'asss.toml'])\n\n def test_load_custom_vars_with_no_file(self):\n \"\"\"Load custom vars if no file is provided.\n Nothing should happen.\n \"\"\"\n\n dispatch_renderer.CustomVars([])\n\n def test_get_custom_vars(self):\n ins = dispatch_renderer.CustomVars('')\n ins._custom_vars = {'foo': {'bar': 'john'}}\n\n assert ins.custom_vars == {'foo': {'bar': 'john'}}\n\n\nclass TestTemplateRenderer():\n def test_load_filters(self):\n ins = dispatch_renderer.TemplateRenderer('tests', 'tests/resources/filters.py', '')\n\n assert ins.env.filters['filter1']\n\n def test_load_filters_with_no_filters(self):\n ins = dispatch_renderer.TemplateRenderer('tests', '', '')\n assert 'filter1' not in ins.env.filters\n\n @pytest.mark.usefixtures('text_files')\n def test_validate_templates_no_error(self, text_files, caplog):\n caplog.set_level(logging.ERROR)\n\n text_files({'tests/template_1.txt': '{{ a }}',\n 'tests/template_2.txt': '{{ b }}'})\n ins = dispatch_renderer.TemplateRenderer('tests', '', 'txt')\n\n ins.validate_templates()\n\n for record in caplog.records:\n assert record.levelname != 'ERROR'\n\n @pytest.mark.usefixtures('text_files')\n def test_validate_templates_with_syntax_error(self, text_files, caplog):\n \"\"\"Validate templates with syntax errors. Should log error.\n \"\"\"\n\n caplog.set_level(logging.ERROR)\n\n text_files({'tests/template_1.txt': '{{ a }',\n 'tests/template_2.txt': '{{ b }'})\n ins = dispatch_renderer.TemplateRenderer('tests', '', 'txt')\n\n ins.validate_templates()\n\n for record in caplog.records:\n assert record.levelname == 'ERROR'\n\n @pytest.mark.usefixtures('text_files')\n def test_render_with_filters(self, text_files):\n text_files({'tests/template.txt': '{% for i in j %}{{ i|filter1(2) }} {{ i|filter2(3) }} {% endfor %}'})\n ins = dispatch_renderer.TemplateRenderer('tests', 'tests/resources/filters.py', 'txt')\n\n assert ins.render('template',\n context={'j': [1, 2]}) == '1 2 1and3 2 2 2and3 '\n\n\nclass TestDispatchRenderer():\n @pytest.mark.usefixtures('text_files', 'toml_files')\n @pytest.fixture\n def setup_template(self, text_files, toml_files):\n text_files({'tests/test1.txt': ('{% for i in data_products.j %}[tag1]{{ i|filter2(1) }}[/tag1]{% endfor %}'\n '[dar]{{ john.dave }}{{ current_dispatch.name }}[/dar][bar]'\n '{{ ext_config.meguca.key1 }}[/bar]')})\n\n toml_files({'tests/custom_vars.toml': {'john': {'dave': 'marry'},\n 'key1': 'val1'}})\n\n def test_render(self, setup_template):\n data = {'j': [1, 2, 3]}\n dispatch_info = {'test1': {'id': 1234567, 'title': 'ABC'},\n 'test2': {'id': 7890123, 'title': 'DEF'}}\n plg_config = {'conf1': 'val1'}\n config = {'meguca': {'key1': 'val1'}}\n renderer_config = {'template': {'template_dir_path': 'tests',\n 'filters_path': 'tests/resources/filters.py',\n 'template_file_ext': 'txt'},\n 'bbcode': {'simple_formatter_path': 'tests/resources/bb_simple_formatters.toml',\n 'complex_formatter_path': 'tests/resources/bb_complex_formatters.py',\n 'complex_formatter_config_path': 'tests/resources/bb_complex_formatter_config.toml'},\n 'custom_vars_path': 'tests/custom_vars.toml'}\n ins = dispatch_renderer.Renderer(renderer_config)\n\n ins.update_ctx(data, plg_config, config, dispatch_info)\n\n expected = ('[tagr1]1and1[/tagr1][tagr1]2and1[/tagr1][tagr1]3and1[/tagr1]'\n '[abc]marrytest1[/abc][xyz=testval]val1[/xyz]')\n assert ins.render('test1') == expected\n\n","sub_path":"tests/test_plugins/test_dispatch_updater/test_dispatch_renderer.py","file_name":"test_dispatch_renderer.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"173491458","text":"import numpy as np\nimport DTLearner as dt\nimport RTLearner as rt\nimport LinRegLearner as lr\n\nclass BagLearner(object):\n\n\tdef __init__(self, learner = {}, kwargs = {}, bags = 20, boost = False, verbose = False):\n\t\tself.learners = []\n\t\tself.bags = bags\n\t\tself.boost = boost\n\t\tself.verbose = verbose\n\t\tself.kwargs = kwargs\t\t\t\n\t\tfor i in range(0, bags):\n\t\t\tself.learners.append(learner(**kwargs))\n\t\tpass # move along, these aren't the drones you're looking for \n\t\n\tdef author(self):\n\t\treturn 'swang632' # replace tb34 with your Georgia Tech username\n\t\n\tdef addEvidence(self, Xtrain, Ytrain):\n\t\tfor i in range(0, self.bags):\n\t\t\tself.learners[i].addEvidence(Xtrain, Ytrain)\n\t\t\n\tdef query(self, Xtest):\n\t\tself.outputs = []\n\t\tfor i in range(0, self.bags):\n\t\t\tself.outputs.append(self.learners[i].query(Xtest))\n\t\treturn np.mean(self.outputs, axis = 0)\n\t\n\t\nif __name__==\"__main__\":\n\tprint (\"This is a Bag Learner\\n\")\n","sub_path":"assess_learner/BagLearner.py","file_name":"BagLearner.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"339533446","text":"from .element import Element\nfrom simulation.network import Content, Frame\nfrom simulation.environment import Environment\nfrom enum import IntEnum\n\n\nclass State(IntEnum):\n HEATING = 1\n WAITING = 2\n\n\nclass Heater(Element):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.state = State.WAITING\n\n def on_receive(self, frame: Frame) -> None:\n if frame.content is Content.START_HEAT and self.state is State.WAITING:\n self.state = State.HEATING\n Environment.heaters.append(self)\n self.node.slot.background_color = (1, .5, .5, .5)\n\n elif frame.content is Content.STOP_HEAT and self.state is State.HEATING:\n self.state = State.WAITING\n Environment.heaters.remove(self)\n self.node.slot.background_color = (.5, 1, .5, .5)\n\n def on_remove(self):\n super().on_remove()\n if self in Environment.heaters:\n Environment.heaters.remove(self)\n","sub_path":"simulation/nodes/elements/heater.py","file_name":"heater.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"485882546","text":"'''\ncontours_write_image.py\n\nExperiment with OpenCV for FITS files.\n\nhttp://docs.opencv.org/trunk/doc/py_tutorials/py_tutorials.html\n\nAuthor: S.T. Castle\nCreated: 20150226\n'''\n\nimport numpy as np\nimport cv2\nimport os.path\n\ndef contours(name, orig_img, blockSize, c):\n '''\n name: original filename of the image\n orig_img: The image to be processed.\n blockSize: size of the pixel neighborhood for adaptive thresholding.\n c: Correction to apply before thresholding.\n '''\n img = np.ndarray.copy(orig_img) # Copy of original for smoothing.\n #cnt_imgcopy = np.ndarray.copy(img) # for contours without thresholding.\n am_imgcopy = np.ndarray.copy(img) # for adaptive mean thresholding.\n ag_imgcopy = np.ndarray.copy(img) # for adaptive Gaussian thresholding.\n ot_imgcopy = np.ndarray.copy(img) # for Otsu's thresholding.\n ca_imgcopy = np.ndarray.copy(img) # for canny edge detection.\n\n cnt_width = 25 # Thickness of the drawn contours. No algorithmic influence.\n\n # Split filename and extension.\n spl = os.path.splitext(name)\n name_pref = spl[0] # Name prefix.\n\n # Apply a smoothing filter.\n img = cv2.bilateralFilter(img,9,75,75)\n name_pref = name_pref + '_smooth=bilat'\n \n # Just contours, no thresholding.\n #cnt_name = name_pref + '_contours' + spl[1]\n #contours, hierarchy = cv2.findContours(cnt_imgcopy,cv2.RETR_TREE,\\\n # cv2.CHAIN_APPROX_SIMPLE)\n #cv2.drawContours(cnt_imgcopy, contours, -1, (0,255,0), cnt_width)\n #cv2.imwrite(cnt_name, cnt_imgcopy)\n\n # Adaptive mean thresholding and contours.\n am_name = name_pref + '_adapt-mean-thr_contours' + spl[1]\n am_thr = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n cv2.THRESH_BINARY,blockSize,c)\n am_contours, am_hierarchy = cv2.findContours(am_thr,cv2.RETR_TREE,\\\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(am_imgcopy, am_contours, -1, (0,255,0), cnt_width)\n cv2.imwrite(am_name, am_imgcopy)\n\n # Adaptive Gaussian thresholding and contours.\n ag_name = name_pref + '_adapt-gauss-thr_contours' + spl[1]\n ag_thr = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv2.THRESH_BINARY,blockSize,c)\n ag_contours, ag_hierarchy = cv2.findContours(ag_thr,cv2.RETR_TREE,\\\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(ag_imgcopy, ag_contours, -1, (0,255,0), cnt_width)\n cv2.imwrite(ag_name, ag_imgcopy)\n \n # Otsu's thresholding and contours.\n ot_name = name_pref + '_otsu-thr_contours' + spl[1]\n ret, ot_thr = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n ot_contours, ot_hierarchy = cv2.findContours(ot_thr,cv2.RETR_TREE,\\\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(ot_imgcopy, ot_contours, -1, (0,255,0), cnt_width)\n cv2.imwrite(ot_name, ot_imgcopy)\n\n # Canny edge detection and contours.\n ca_name = name_pref + '_canny_contours' + spl[1]\n canny = cv2.Canny(img,100,200)\n ca_contours, ca_hierarchy = cv2.findContours(canny,cv2.RETR_TREE,\\\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(ca_imgcopy, ca_contours, -1, (0,255,0), cnt_width)\n cv2.imwrite(ca_name, ca_imgcopy)\n","sub_path":"fits-algs/contours-write-image-fits.py","file_name":"contours-write-image-fits.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"628776576","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 26 10:51:24 2018\r\n\r\n@author: Patrick\r\n\"\"\"\r\n\r\n#Number 1\r\nimport pandas as pd\r\nscoring=pd.read_table('UWvsMSU_1-22-13.txt',header=0,delim_whitespace=True, names=('time','team','score'))\r\nUWscore=0\r\nUWpoints=[]\r\nUWtime=[]\r\nMSUscore=0\r\nMSUpoints=[]\r\nMSUtime=[]\r\nfor i in range(0,len(scoring.time)):\r\n if scoring.team[i]=='UW':\r\n UWscore+=scoring.score[i]\r\n UWpoints.append(UWscore)\r\n time=scoring.time[i]\r\n UWtime.append(time)\r\n elif scoring.team[i]=='MSU':\r\n MSUscore+=scoring.score[i]\r\n MSUpoints.append(MSUscore)\r\n time=scoring.time[i]\r\n MSUtime.append(time)\r\n \r\nimport matplotlib.pyplot as plt\r\nplt.plot(UWtime,UWpoints,'r-')\r\nplt.plot(MSUtime,MSUpoints,'g-')\r\nplt.show()\r\n\r\n#Number 2\r\nimport random\r\nnum=random.randint(1,101)\r\nprint (\"I'm thinking of a number 1-100...\")\r\nwhile guess != num:\r\n guess=input()\r\n guess = int(guess)\r\n if guess>num:\r\n print ('lower') \r\n elif guess).\r\n# @author: - Timotius Wigianto \r\n# - Pambudi Satria \r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as\r\n# published by the Free Software Foundation, either version 3 of the\r\n# License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with this program. If not, see .\r\n#\r\n##############################################################################\r\n\r\n# 1 : imports of python lib\r\n\r\n\r\n# 2 : imports of openerp\r\nfrom openerp import models, fields, api, _\r\nfrom openerp.exceptions import AccessError, Warning\r\n\r\n# 3 : imports from odoo modules\r\nimport openerp.addons.decimal_precision as dp\r\n\r\n\r\nclass HRSalaryProposal(models.Model):\r\n # Private attributes\r\n _name = \"hr.salary.proposal\"\r\n \r\n # Default methods\r\n \r\n\r\n # Fields declaration\r\n name = fields.Char(string='Number', readonly=True)\r\n tanggal = fields.Date(default=lambda self: fields.Date.context_today(self), readonly=True,\r\n states={'draft': [('readonly', False)], 'submit': [('readonly', False)]})\r\n requestor = fields.Many2one('res.users', string=\"Requestor\", default=lambda self: self.env.user, readonly=True)\r\n nama_cabang = fields.Many2one('hr.department', string=\"Nama Cabang\", readonly=True)\r\n nama_karyawan = fields.Many2one(\"hr.employee\", string=\"Nama Karyawan\", required=True, readonly=True,\r\n states={'draft': [('readonly', False)], 'submit': [('readonly', False)]})\r\n jabatan = fields.Many2one('hr.job', string=\"Jabatan\", readonly=True)\r\n contract_id = fields.Many2one('hr.contract', string='Contract')\r\n wage = fields.Float(string='Wage', digits=dp.get_precision('Payroll'))\r\n gaji_semula = fields.Float(digits=dp.get_precision('Payroll'), string=\"Gaji Awal\", compute=\"compute_gaji\", readonly=True)\r\n kenaikan_gaji = fields.Float(digits=dp.get_precision('Payroll'), string=\"Kenaikan Gaji\", required=True, readonly=True,\r\n states={'draft': [('readonly', False)], 'submit': [('readonly', False)]})\r\n gaji_usulan = fields.Float(digits=dp.get_precision('Payroll'), string=\"Gaji Usulan\", compute=\"compute_gaji\", readonly=True)\r\n alasan = fields.Text(readonly=True, states={'draft': [('readonly', False)], 'submit': [('readonly', False)]})\r\n state = fields.Selection([\r\n ('draft','Open'),\r\n ('submit','Submit'),\r\n ('reject','Reject'),\r\n ('approved','Approved'),\r\n ], string='Status', default='draft', track_visibility='onchange', copy=False,)\r\n \r\n # compute and search fields, in the same order that fields declaration\r\n @api.one\r\n @api.depends('wage','kenaikan_gaji')\r\n def compute_gaji(self):\r\n# if((self.gaji_semula!=0) and (self.kenaikan_gaji!=0)):\r\n# self.gaji_usulan = self.gaji_semula+self.kenaikan_gaji\r\n self.gaji_semula = self.wage\r\n self.gaji_usulan = self.gaji_semula + self.kenaikan_gaji\r\n \r\n # Constraints and onchanges\r\n @api.one\r\n @api.constrains('contract_id')\r\n def _check_contract(self):\r\n if not self.contract_id:\r\n raise Warning(_('Karyawan tidak mempunyai kontrak kerja! Silakan buat kontrak kerja.'))\r\n \r\n @api.onchange('nama_karyawan','tanggal')\r\n def onchange_karyawan(self):\r\n if self.nama_karyawan:\r\n self.nama_cabang = self.nama_karyawan.department_id and self.nama_karyawan.department_id.id or False\r\n self.jabatan = self.nama_karyawan.job_id and self.nama_karyawan.job_id.id or False\r\n contract_ids = self.env['hr.payslip'].get_contract(self.nama_karyawan, self.tanggal, self.tanggal)\r\n if not contract_ids:\r\n self.contract_id = False\r\n self.wage = 0.0\r\n return\r\n contract = self.env['hr.contract'].browse(contract_ids[0])\r\n self.contract_id = contract and contract.id or False\r\n self.wage = contract and contract.wage or 0.0\r\n \r\n # CRUD methods\r\n @api.model\r\n def create(self, vals):\r\n vals['name'] = self.env['ir.sequence'].get(\"salary.proposal\")\r\n employee = self.env['hr.employee'].search([('id','=',vals['nama_karyawan'])])\r\n vals['nama_cabang'] = employee.department_id and employee.department_id.id or False\r\n vals['jabatan'] = employee.job_id and employee.job_id.id or False\r\n return super(HRSalaryProposal, self).create(vals)\r\n \r\n @api.multi\r\n def write(self, vals):\r\n if vals.get('nama_karyawan'):\r\n employee = self.env['hr.employee'].search([('id','=',vals.get('nama_karyawan'))])\r\n vals['nama_cabang'] = employee.department_id and employee.department_id.id or False\r\n vals['jabatan'] = employee.job_id and employee.job_id.id or False\r\n return super(HRSalaryProposal, self).write(vals)\r\n\r\n # Action methods\r\n @api.multi\r\n def action_submit(self):\r\n self.state = 'submit'\r\n\r\n @api.multi\r\n def action_approve(self):\r\n self.contract_id.wage = self.gaji_usulan\r\n self.state = 'approved'\r\n \r\n @api.multi\r\n def action_reject(self):\r\n self.state = 'reject'\r\n \r\n # Business methods\r\n","sub_path":"hr_salary_add/models/hr_salary_proposal.py","file_name":"hr_salary_proposal.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"447720920","text":"#!/usr/bin/env python3\n# converter.py - Fahrenheit and Celsius converter\nimport sys\n\narg = sys.argv[1]\ntemp = float(arg[:-1])\n\nif arg.endswith(\"c\"):\n ftemp = (9.0 / 5.0) * temp + 32\n print(\"%gf\" %ftemp)\n\nif arg.endswith(\"f\"):\n ctemp = (5.0 / 9.0) * (temp - 32)\n print(\"%gc\" %ctemp)\n\n##########################################\n\n# $ converter.py 23c\n# 73.4f\n#\n# $ converter.py 73.4f\n# 23c\n","sub_path":"learning/training/pythonsolns/py3/solns/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"647325694","text":"# python3\nfrom sys import stdin\nimport itertools\n\nEPS = 1e-9\nTOL = 1e-3\n\n#DEBUG = False\nDEBUG = True\n#LOG = True\nLOG = False\nif DEBUG: test = open(\"tests\\\\189\", \"r\")\n\ndef find_subsets(s, n):\n return list(itertools.combinations(s, n))\n\nclass Equation:\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\nclass Position:\n def __init__(self, column, row):\n self.column = column\n self.row = row\n\ndef SelectPivotElement(a, used_rows, used_columns):\n pivot_element = Position(0, 0)\n while used_rows[pivot_element.row]:\n pivot_element.row += 1\n while used_columns[pivot_element.column]:\n pivot_element.column += 1\n #correct if list index out of range (whole row of zeroes!)\n while abs(a[pivot_element.row][pivot_element.column]) < EPS:\n pivot_element.row += 1\n if (pivot_element.row >= len(a)):\n return None\n return pivot_element\n\ndef SwapLines(a, b, used_rows, pivot_element):\n #optimize\n a[pivot_element.column], a[pivot_element.row] = a[pivot_element.row], a[pivot_element.column]\n b[pivot_element.column], b[pivot_element.row] = b[pivot_element.row], b[pivot_element.column]\n used_rows[pivot_element.column], used_rows[pivot_element.row] = used_rows[pivot_element.row], used_rows[pivot_element.column]\n pivot_element.row = pivot_element.column;\n\ndef ProcessPivotElement(a, b, pivot_element):\n # Write your code here\n if LOG: print(\"before:...a:\", a, \"b\", b, \"r:\",pivot_element.row, \"c:\", pivot_element.column)\n \n #scale\n scale = a[pivot_element.row][pivot_element.column]\n if LOG: print(\".... scale\", scale)\n for x in range(pivot_element.column, len(a[pivot_element.row])):\n a[pivot_element.row][x] /= scale\n b[pivot_element.row] /= scale\n #substract\n for y in range(pivot_element.column + 1, len(a)):\n factor = a[y][pivot_element.column] / a[pivot_element.row][pivot_element.column]\n if LOG: print(\"factor\", factor)\n for x in range(pivot_element.column, len(a[pivot_element.row])):\n a[y][x] -= factor * a[pivot_element.row][x]\n b[y] -= factor * b[pivot_element.row]\n \n if LOG: print(\"after...a:\", a, \"b\", b, \"r:\",pivot_element.row, \"c:\", pivot_element.column)\n if LOG: print(\"-----------------------------------------\\n\")\n return\n\ndef MarkPivotElementUsed(pivot_element, used_rows, used_columns):\n used_rows[pivot_element.row] = True\n used_columns[pivot_element.column] = True\n\ndef SolveEquation(equation):\n a = equation.a\n b = equation.b\n size = len(a)\n\n used_columns = [False] * size\n used_rows = [False] * size\n for step in range(size):\n pivot_element = SelectPivotElement(a, used_rows, used_columns)\n if pivot_element is None:\n if LOG: print(\"NO SOLUTION!\")\n return None\n if LOG: print(\"pivot_element\", \"r:\",pivot_element.row, \"c:\", pivot_element.column)\n SwapLines(a, b, used_rows, pivot_element)\n ProcessPivotElement(a, b, pivot_element)\n MarkPivotElementUsed(pivot_element, used_rows, used_columns)\n \n if LOG: print(\"########### process triangular form ###########\")\n for row in range(size-1, 0, -1):\n for c in range(row-1, -1, -1):\n factor = a[c][row]/a[row][row]\n a[c][row] -= factor * a[row][row]\n b[c] -= factor *b[row]\n\n return b\n\ndef EquationFromSubset(s):\n a = []\n b = []\n for t in s:\n a.append(A[t].copy())\n b.append(B[t])\n return Equation(a,b)\n\ndef calcEQ(e, sol):\n sum = 0;\n for i in range (len(e)):\n sum += e[i]*sol[i]\n return sum\n\ndef setZero(solution):\n for s in range(len(solution)):\n if solution[s] < 0: solution[s] = 0\n return solution\n\ndef checkSolution(solution):\n if LOG: print(\"Checking solution\", solution)\n satisfy = True\n for e in range(len(A)):\n check = calcEQ(A[e], solution)\n solves = check <= B[e] + TOL\n #solves = check <= B[e]\n #if LOG: print(e, \"EQ:\", A[e], \"B[e]:\", B[e] + TOL, \"=>\",check,\"diff:\", check - B[e], solves, \"solution\", solution)\n if LOG: print(e, \"B[e] + TOL:\", B[e] + TOL, \"=>\",check, solves, \"solution:\", solution)\n if solves == False:\n if LOG: print(\"NOT satisfactory!\")\n satisfy = False\n break\n \n if satisfy == True:\n return solution\n else:\n return None\n \ndef solve_diet_problem(n, m, A, b, c): \n # Finding subsets\n global subsets\n subsets = find_subsets([i for i in range(len(A))], m)\n global Solutions\n Solutions = []\n \n for s in subsets:\n if LOG: print(\"\\n-----------------------\")\n if LOG: print(\"subset\", s)\n global eq\n eq = EquationFromSubset(s)\n solution = SolveEquation(eq)\n if LOG: print(\"SOLUTION: \", solution)\n if solution is not None:\n if LOG: print(\"checking solutions ...\")\n setZero(solution)\n solution = checkSolution(solution)\n if solution is not None:\n if LOG: print(\"adding solution\", solution)\n Solutions.append(solution)\n \n if LOG: print(\"\\n\\n\") \n if len(Solutions) == 0:\n return [-1, [0] * m]\n else:\n sol, pl = maximize(Solutions)\n if LOG: print(\"sol, pl\",sol, pl)\n if sum(sol) >= 1e9-TOL:\n return [1, [0] * m]\n else:\n return [0, sol]\n \ndef maximize(Solutions):\n i = -1\n pl = -float(\"Inf\")\n for sol in range(len(Solutions)):\n val = calcEQ(C, Solutions[sol])\n if LOG: print(Solutions[sol], \"val\", val)\n if val > pl: \n i = sol\n pl = val\n return [Solutions[i], pl]\n\nif DEBUG:\n n, m = list(map(int, test.readline().split()))\n A = []\n for i in range(n):\n A += [list(map(int, test.readline().split()))]\n B = list(map(int, test.readline().split()))\n C = list(map(int, test.readline().split()))\nelse:\n n, m = list(map(int, stdin.readline().split()))\n A = []\n for i in range(n):\n A += [list(map(int, stdin.readline().split()))]\n B = list(map(int, stdin.readline().split()))\n C = list(map(int, stdin.readline().split()))\n \n#add >0\nfor i in range(m):\n temp = [0] * m\n temp[i] = -1\n A += [temp]\n B.append(0)\n\n#add <1e9\n# =============================================================================\n# for i in range(m):\n# temp = [0] * m\n# temp[i] = 1\n# A += [temp]\n# B.append(1e9)\n# =============================================================================\n \ntemp = [1] * m\nA += [temp]\nB.append(1e9)\n\nanst, ansx = solve_diet_problem(n, m, A, B, C)\n\nif anst == -1:\n print(\"No solution\")\nif anst == 0: \n print(\"Bounded solution\")\n print(' '.join(list(map(lambda x : '%.18f' % x, ansx))))\nif anst == 1:\n print(\"Infinity\")\n \n","sub_path":"Linear programming/diet/diet V2.py","file_name":"diet V2.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"189638884","text":"class Person(object):\n def __init__(self):\n self._person = None\n\n @property\n def person(self):\n print(\"get person: %s\" % self._person)\n return self._person\n\n @person.setter\n def person(self, value):\n print(\"set person: %s\" % value)\n self._person = value\n\n @person.deleter\n def person(self):\n print(\"del person: %s\" % self._person)\n del self._person\n\n\nif __name__ == \"__main__\":\n from conf import load_json_conf, PERSON1\n person = load_json_conf(PERSON1)\n print(person)\n ps = Person()\n ps.person = person\n print(\"Person\", ps.person)\n import pdb;pdb.set_trace()","sub_path":"python/Lang/Class/init_from_conf.py","file_name":"init_from_conf.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"55702160","text":"from django import template\n\nregister = template.Library()\n\n@register.simple_tag(takes_context=True)\ndef get_section_flag(context, section):\n return context['MENU_ENTRIES'][section]\n\n@register.simple_tag(takes_context=True)\ndef get_sub_section_flag(context, section, sub_section):\n key = \"{}/{}\".format(section, sub_section.lower())\n return context[\"MENU_ENTRIES\"][key]\n\n@register.simple_tag(takes_context=True)\ndef get_category_flag(context, section, sub_section, category):\n if sub_section == None:\n key = \"{}/{}\".format(section, category)\n else:\n key = \"{}/{}/{}\".format(section, sub_section.lower(), category)\n return context[\"MENU_ENTRIES\"][key]\n","sub_path":"sti_prof_dashboard/dashboard/templatetags/dashboards_menus_extras.py","file_name":"dashboards_menus_extras.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"574395498","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nfrom odoo.tools.float_utils import float_compare, float_is_zero, float_round\nfrom odoo.exceptions import UserError\n\n\nclass StockPicking(models.Model):\n _inherit = 'stock.picking'\n\n lustre_shipping_weight = fields.Float(string='Lustre Shipping Weight')\n dimension = fields.Char(string='Dimension (LxWxH)')\n sale_order_id = fields.Many2one('sale.order', string='SO Id', compute='get_so_id')\n payment_term = fields.Selection([('Cash', 'Cash'), ('Bank', 'Bank'), ('Credit Card', 'Credit Card')], string='Payment Terms')\n state = fields.Selection([\n ('draft', 'Completed'),\n ('waiting', 'Waiting Another Operation'),\n ('confirmed', 'Waiting for Shipping'),\n ('assigned', 'Assigned'),\n ('done', 'Shipped'),\n ('email_sent', 'Email Sent'),\n ('cancel', 'Cancelled'),\n ], string='Status', compute='_compute_state',\n copy=False, index=True, readonly=True, store=True, track_visibility='onchange',\n help=\" * Draft: not confirmed yet and will not be scheduled until confirmed.\\n\"\n \" * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows).\\n\"\n \" * Waiting: if it is not ready to be sent because the required products could not be reserved.\\n\"\n \" * Ready: products are reserved and ready to be sent. If the shipping policy is 'As soon as possible' this happens as soon as anything is reserved.\\n\"\n \" * Done: has been processed, can't be modified or cancelled anymore.\\n\"\n \" * Cancelled: has been cancelled, can't be confirmed anymore.\")\n stock_picking_type_code = fields.Selection(related='picking_type_id.code')\n sale_order_number = fields.Char(related='sale_order_id.name')\n high_value = fields.Char(string='High Value', compute='_get_high_value')\n planned_ship_date = fields.Datetime(string='Planned Shipped Date', related='sale_order_id.expected_date')\n cc_status = fields.Selection([('unpaid', 'Unpaid'), ('unprocessed', 'Unprocessed')], string='CC Status')\n cc_alert = fields.Char(string='CC Unpaid', compute='_get_cc_alert')\n manufacturing_status = fields.Char(string='Production Station #', compute='get_manufacturing_status')\n clip_board_number = fields.Char(related='sale_order_id.clip_board_number')\n actual_ship_date = fields.Datetime(related='scheduled_date', string='Actual Ship Date')\n in_shipping = fields.Selection([('shipped', 'Shipped'), ('arrived', 'Arrived')], string='In Shipping')\n\n # this function get manufacturing status for each so\n @api.multi\n def get_manufacturing_status(self):\n for order in self:\n if order.sale_order_id:\n order.manufacturing_status = self.env['mrp.production'].search([('origin', '=', order.sale_order_id.name)], limit=1).state\n\n # this function indicate cc status\n @api.depends('cc_status')\n def _get_cc_alert(self):\n for line in self:\n if line.cc_status in ['unpaid', 'unprocessed']:\n line.cc_alert = 'ALERT'\n else:\n line.cc_alert = ''\n\n # this function get high value\n @api.depends('high_value')\n def _get_high_value(self):\n for line in self:\n if line.sale_order_id:\n if line.sale_order_id.amount_total > 2500:\n line.high_value = 'YES'\n else:\n line.high_value = ''\n\n # get associated SO to DO\n @api.depends('origin')\n def get_so_id(self):\n for line in self:\n if line.origin:\n so_id = self.env['sale.order'].search([('name', '=', line.origin)])\n if so_id:\n line.sale_order_id = so_id.id\n\n # send shipping confirmation email to customer\n @api.multi\n def action_send_confirmation_email(self):\n self.ensure_one()\n delivery_template_id = self.env.ref('stock_inherit.lustre_email_template_order_acknowledgement').id\n compose_form_id = self.env.ref('mail.email_compose_message_wizard_form').id\n ctx = dict(\n default_composition_mode='comment',\n default_res_id=self.id,\n default_model='stock.picking',\n default_use_template=bool(delivery_template_id),\n default_template_id=delivery_template_id,\n custom_layout='mail.mail_notification_light'\n )\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n\n # set non standard courier number to associated SO\n @api.onchange('carrier_tracking_ref')\n def mapped_tracking_number(self):\n if self.origin:\n so_id = self.env['sale.order'].search([('name', '=', self.origin)])\n tracking_numbers = ()\n tracking_numbers = list(tracking_numbers)\n if so_id:\n if self.carrier_tracking_ref:\n if so_id.tracking:\n tracking_numbers.append(so_id.tracking)\n tracking_numbers.append(self.carrier_tracking_ref)\n tracking_numbers = (tuple(tracking_numbers))\n numbers = ', '.join(tracking_numbers)\n so_id.write({'tracking': numbers})\n else:\n so_id.write({'tracking': self.carrier_tracking_ref})\n\n # overwrite button_validate function\n @api.multi\n def button_validate(self):\n self.ensure_one()\n if not self.move_lines and not self.move_line_ids:\n raise UserError(_('Please add some items to move.'))\n\n # If no lots when needed, raise error\n picking_type = self.picking_type_id\n precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n no_quantities_done = all(\n float_is_zero(move_line.qty_done, precision_digits=precision_digits) for move_line in self.move_line_ids)\n no_reserved_quantities = all(\n float_is_zero(move_line.product_qty, precision_rounding=move_line.product_uom_id.rounding) for move_line in\n self.move_line_ids)\n if no_reserved_quantities and no_quantities_done:\n raise UserError(_(\n 'You cannot validate a transfer if no quantites are reserved nor done. To force the transfer, switch in edit more and encode the done quantities.'))\n\n if picking_type.use_create_lots or picking_type.use_existing_lots:\n lines_to_check = self.move_line_ids\n if not no_quantities_done:\n lines_to_check = lines_to_check.filtered(\n lambda line: float_compare(line.qty_done, 0,\n precision_rounding=line.product_uom_id.rounding)\n )\n\n for line in lines_to_check:\n product = line.product_id\n if product and product.tracking != 'none':\n if not line.lot_name and not line.lot_id:\n raise UserError(\n _('You need to supply a Lot/Serial number for product %s.') % product.display_name)\n\n if no_quantities_done:\n view = self.env.ref('stock.view_immediate_transfer')\n wiz = self.env['stock.immediate.transfer'].create({'pick_ids': [(4, self.id)]})\n return {\n 'name': _('Immediate Transfer?'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'stock.immediate.transfer',\n 'views': [(view.id, 'form')],\n 'view_id': view.id,\n 'target': 'new',\n 'res_id': wiz.id,\n 'context': self.env.context,\n }\n\n if self._get_overprocessed_stock_moves() and not self._context.get('skip_overprocessed_check'):\n view = self.env.ref('stock.view_overprocessed_transfer')\n wiz = self.env['stock.overprocessed.transfer'].create({'picking_id': self.id})\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'stock.overprocessed.transfer',\n 'views': [(view.id, 'form')],\n 'view_id': view.id,\n 'target': 'new',\n 'res_id': wiz.id,\n 'context': self.env.context,\n }\n\n # send email to stock logistic manager\n partners = self.env['res.users'].search([])\n for partner in partners:\n if partner.user_has_groups('stock.group_stock_manager'):\n template_id = self.env.ref('stock_inherit.email_template_notification_ls')\n self.env['mail.template'].with_context(so=self.origin).browse(template_id.id).send_mail(partner.id, True)\n\n # get all minimum over level exceeded items\n table_data = ''\n if self.stock_picking_type_code == 'outgoing':\n for row in self.move_ids_without_package:\n if row.product_id.reordering_min_qty != 0:\n if row.product_id.qty_available - row.quantity_done <= row.product_id.reordering_min_qty:\n table_data += str(row.product_id.name) + ' [' + str(row.product_id.product_tmpl_id.default_code) + ']' + '\\n'\n\n # send email to purchaser\n partner = self.env['res.users'].search([])\n for purchaser in partner:\n if purchaser.user_has_groups('purchase.group_purchase_user') or purchaser.user_has_groups('purchase.group_purchase_manager'):\n template_id = self.env.ref('stock_inherit.email_template_manage_inventory')\n self.env['mail.template'].with_context(table_data=table_data).browse(template_id.id).send_mail(purchaser.id, True)\n\n # send back order\n if self._check_backorder():\n return self.action_generate_backorder_wizard()\n self.action_done()\n\n # this function send order acknowledgement\n @api.multi\n def send_order_acknowledgement(self):\n '''\n This function opens a window to compose an email, with the edi sale template message loaded by default\n '''\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference('stock_inherit', 'lustre_email_template_order_acknowledgement')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n\n ctx = {\n 'default_model': 'stock.picking',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'mark_so_as_sent': True,\n 'custom_layout': \"mail.mail_notification_paynow\",\n 'force_email': True,\n }\n\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n\n '''\n This function opens a window to compose an email, with the edi sale template message loaded by default\n '''\n @api.multi\n def action_received(self):\n partners = self.env['res.users'].search([])\n for partner in partners:\n if partner.user_has_groups('account.group_account_user') or partner.user_has_groups('account.group_account_invoice'):\n template_id = self.env.ref('stock_inherit.email_template_notification_ar')\n self.env['mail.template'].with_context(so=self.origin).browse(template_id.id).send_mail(partner.id, True)\n\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference('stock_inherit', 'lustre_email_template_order_acknowledgement')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n\n ctx = dict(\n default_composition_mode='comment',\n default_res_id=self.id,\n default_model='stock.picking',\n default_use_template=bool(template_id),\n default_template_id=template_id,\n custom_layout='mail.mail_notification_light',\n is_received=True,\n )\n\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n\n\nclass MailComposer(models.TransientModel):\n _inherit = 'mail.compose.message'\n\n # overwrite send mail action\n @api.multi\n def action_send_mail(self):\n res = super(MailComposer, self).action_send_mail()\n if self._context.get('is_received'):\n form_obj = self.env['stock.picking'].search([('id', '=', self.res_id)])\n if form_obj:\n form_obj.write({\n 'state': 'email_sent',\n })\n return res\n","sub_path":"stock_inherit/models/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":13974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"263671571","text":"import inspect\nimport sheraf\nfrom sheraf.models.indexation import BaseIndexedModel\nfrom sheraf.attributes import Attribute\nfrom sheraf.attributes.collections import ListAttribute, SetAttribute\n\n\nclass ModelLoader(object):\n \"\"\"\n Loads models from the base in a cache.\n\n Inherited by most model types\n (``Model[Attribute|List|Set|(Large)Dict]``)\n \"\"\"\n\n cache = {}\n\n def __init__(self, model=None, **kwargs):\n super().__init__(**kwargs)\n self._model = model\n frame = inspect.stack()[3]\n module = inspect.getmodule(frame[0])\n self.module_path = module.__name__ if module else None\n\n def load_model(self, modelpath):\n # Internal.\n # :param modelpath: namespace of the model to load (eg 'x.y.z')\n # :return: a model class given its path\n if isinstance(modelpath, bytes):\n modelpath = modelpath.decode(\"utf-8\")\n\n path = modelpath.split(\".\")\n module_path, klass = \".\".join(path[:-1]), path[-1]\n module_path = module_path or self.module_path\n\n module = __import__(module_path, globals(), locals(), [klass], 0)\n\n # pypy:\n if module is None: # pragma: no cover\n raise ImportError(modelpath)\n\n try:\n return getattr(module, klass)\n except AttributeError as exc:\n raise ImportError(exc)\n\n def read(self, parent):\n self.check_model(parent)\n return super().read(parent)\n\n def write(self, parent, value):\n self.check_model(parent)\n return super().write(parent, value)\n\n @property\n def model(self):\n self.check_model()\n return self._model\n\n def check_model(self, parent=None):\n if isinstance(self._model, (list, tuple)):\n self._model = type(self._model)(\n self._check_model(m, parent) for m in self._model\n )\n else:\n self._model = self._check_model(self._model, parent)\n\n def _check_model(self, model, parent):\n if isinstance(model, (str, bytes)):\n try:\n return ModelLoader.cache[model]\n except KeyError:\n ModelLoader.cache[model] = self.load_model(model)\n return ModelLoader.cache[model]\n\n elif parent and not isinstance(model, type):\n return type(\n \"{}.{}\".format(parent.__class__.__name__, self.key(parent)),\n (model.__class__,),\n model.attributes,\n )\n\n else:\n return model\n\n\nclass AttributeLoader(ModelLoader):\n def __init__(self, attribute=None, **kwargs):\n self._attribute = attribute\n super().__init__(**kwargs)\n\n def check_attribute(self, attribute_name, parent):\n if attribute_name not in self._model.attributes:\n raise sheraf.SherafException(\n f\"'{attribute_name}' is not an attribute of {self._model.__name__}\"\n )\n\n attribute = self._model.attributes[attribute_name]\n if isinstance(attribute, (ListAttribute, SetAttribute)):\n if not isinstance(attribute.attribute, ModelAttribute):\n raise sheraf.SherafException(\n f\"'{self._model.__name__}.{attribute}' attribute should hold a 'ModelAttribute' to be referenced by a 'ReverseModelAttribute'\"\n )\n\n elif not isinstance(attribute, ModelAttribute):\n raise sheraf.SherafException(\n f\"'{self._model.__name__}.{attribute}' should be a 'ModelAttribute' or a collection of 'ModelAttribute' to be referenced by a 'ReversedModelAttribute'\"\n )\n\n if attribute_name not in self.model.indexes:\n raise sheraf.SherafException(\n f\"'{self._model.__name__}.{attribute}' should have an index to be referenced by a 'ReversedModelAttribute'\"\n )\n\n\nclass ModelAttribute(ModelLoader, Attribute):\n \"\"\"This attribute references another :class:`~sheraf.models.Model`.\n\n :param model: The model type to store.\n :type model: :class:`~sheraf.models.Model` or list of :class:`~sheraf.models.Model`\n\n >>> class Horse(sheraf.Model):\n ... table = \"horse\"\n ... name = sheraf.SimpleAttribute()\n ...\n >>> class Cowboy(sheraf.Model):\n ... table = \"cowboy\"\n ... name = sheraf.SimpleAttribute()\n ... mount = sheraf.ModelAttribute(Horse)\n ...\n >>> with sheraf.connection(commit=True):\n ... jolly = Horse.create(name=\"Jolly Jumper\")\n ... george = Cowboy.create(name=\"George Abitbol\", mount=jolly)\n ...\n ... george.mount.name\n 'Jolly Jumper'\n\n The referenced model can be dynamically created if its structure is passed through as a dict:\n\n >>> with sheraf.connection(commit=True):\n ... peter = Cowboy.create(name=\"Peter\", mount={\"name\": \"Polly Pumper\"})\n ... assert isinstance(peter.mount, Horse)\n ... peter.mount.name\n 'Polly Pumper'\n\n When the referenced model is deleted, the value of the attribute becomes ``None``.\n\n >>> with sheraf.connection(commit=True):\n ... george = Cowboy.read(george.id)\n ... jolly.delete()\n ... assert george.mount is None\n\n Several model classes can be used, but this will be more memory consuming in the database.\n\n >>> class Pony(sheraf.Model):\n ... table = \"pony\"\n ... name = sheraf.SimpleAttribute()\n ...\n >>> class Cowboy(sheraf.Model):\n ... table = \"cowboy\"\n ... name = sheraf.SimpleAttribute()\n ... mount = sheraf.ModelAttribute((Horse, Pony))\n ...\n >>> with sheraf.connection(commit=True):\n ... superpony = Pony.create(name=\"Superpony\")\n ... peter = Cowboy.create(name=\"Peter\", mount=superpony)\n\n When several models are set, the first one is considered to be the default model.\n The default model is used when there is a doubt on the read data, or in the case\n of model creation with a dict.\n \"\"\"\n\n def __init__(self, model=None, **kwargs):\n if not model:\n raise sheraf.exceptions.SherafException(\n \"ModelAttribute requires model parameter.\"\n )\n kwargs[\"read_memoization\"] = False\n kwargs[\"write_memoization\"] = False\n super().__init__(default=None, model=model, **kwargs)\n\n def index_keys(self, model):\n \"\"\"\n By default :class:`~sheraf.attributes.models.ModelAttribute` are indexed on\n their identifier.\n \"\"\"\n if model is None:\n return {None}\n\n return {\n (model.table, model.identifier)\n if isinstance(self.model, (tuple, list))\n else model.identifier\n }\n\n def deserialize(self, value):\n if isinstance(value, tuple):\n table, id_ = value\n model = BaseIndexedModel.from_table(table)\n if model is None:\n self.check_model()\n model = BaseIndexedModel.from_table(table)\n\n else:\n id_ = value\n model = (\n self.model[0] if isinstance(self.model, (list, tuple)) else self.model\n )\n if model is None:\n self.check_model()\n model = (\n self.model[0]\n if isinstance(self.model, (list, tuple))\n else self.model\n )\n\n try:\n return model.read(id_)\n except (KeyError, sheraf.exceptions.ModelObjectNotFoundException):\n return None\n\n def serialize(self, value):\n if value is None:\n return None\n\n elif isinstance(value, sheraf.IndexedModel):\n return (\n (value.table, value.identifier)\n if isinstance(self.model, (tuple, list))\n else value.identifier\n )\n\n elif self.model and isinstance(value, dict):\n return self.model.create(**value).identifier\n\n else:\n try:\n return self.model.read(value).identifier\n except sheraf.SherafException:\n return None\n\n def update(\n self,\n old_value,\n new_value,\n addition=True,\n edition=True,\n deletion=False,\n replacement=False,\n ):\n if replacement or old_value is None or not isinstance(new_value, dict):\n return self.serialize(new_value)\n\n return old_value.edit(new_value, addition, edition, deletion, replacement)\n\n\nclass ReverseModelAttribute(AttributeLoader, Attribute):\n \"\"\"\n Inverse reference to a :class:`~sheraf.attributes.models.ModelAttribute`.\n\n :param model: The :class:`~sheraf.models.Model` to refer to.\n :param attribute: The :class:`~sheraf.attributes.Attribute` in the model to refer to.\n This model must be a :class:`~sheraf.attributes.models.ModelAttribute` or a\n collection of :class:`~sheraf.attributes.models.ModelAttribute`.\n\n The referenced attribute must be indexed.\n\n >>> class Cowboy(sheraf.Model): # doctest: +SKIP\n ... table = \"reverse_cowboys\"\n ... name = sheraf.StringAttribute()\n ... horse = sheraf.ModelAttribute(\"Horse\").index()\n ...\n >>> class Horse(sheraf.Model): # doctest: +SKIP\n ... table = \"reverse_horses\"\n ... name = sheraf.StringAttribute()\n ... cowboy = sheraf.ReverseModelAttribute(\"Cowboy\", \"horse\")\n ...\n >>> with sheraf.connection(): # doctest: +SKIP\n ... george = Cowboy.create(name=\"George\")\n ... horse = Horse.create(name=\"Jolly\", cowboy=george)\n ... george.horse.name\n \"Jolly\"\n\n Collection attributes are also supported:\n\n >>> class Cowboy(sheraf.Model): # doctest: +SKIP\n ... table = \"reverse_multicowboys\"\n ... name = sheraf.StringAttribute()\n ... horses = sheraf.LargeListAttribute(ModelAttribute(\"Horse\").index())\n ...\n >>> class Horse(sheraf.Model): # doctest: +SKIP\n ... table = \"reverse_multihorses\"\n ... name = sheraf.StringAttribute()\n ... cowboy = sheraf.ReverseModelAttribute(\"Cowboy\", \"horses\")\n ...\n >>> with sheraf.connection(): # doctest: +SKIP\n ... george = Cowboy.create(name=\"George\")\n ... jolly = Horse.create(name=\"Jolly\", cowboy=george)\n ... polly = Horse.create(name=\"Polly\", cowboy=george)\n ... george.horses[0].name\n ... george.horses[1].name\n \"Jolly\"\n \"Polly\"\n \"\"\"\n\n def __init__(self, model, attribute, **kwargs):\n kwargs[\"read_memoization\"] = False\n kwargs[\"write_memoization\"] = False\n super().__init__(default=None, model=model, attribute=attribute, **kwargs)\n\n def read(self, parent):\n self.check_model(parent)\n self.check_attribute(self._attribute, parent)\n search_args = {self._attribute: parent}\n\n if not self.model.indexes[self._attribute].details.unique:\n return self.model.search(**search_args)\n\n try:\n return self.model.search(**search_args).get()\n except sheraf.QuerySetUnpackException:\n return None\n\n def write(self, parent, value):\n self.check_model(parent)\n self.check_attribute(self._attribute, parent)\n\n self.delete(parent)\n\n if value is None:\n return None\n\n if not isinstance(value, (list, set)):\n value = [value]\n\n # if values are ids, then we should load corresponding models\n checked_values = []\n for v in value:\n if isinstance(v, self.model):\n checked_values.append(v)\n else:\n try:\n checked_values.append(self.model.read(v))\n except sheraf.ModelObjectNotFoundException:\n pass\n\n for referent in checked_values:\n attribute = self.model.attributes[self._attribute]\n\n if isinstance(attribute, ModelAttribute):\n setattr(referent, self._attribute, parent)\n\n if isinstance(attribute, sheraf.ListAttribute):\n setattr(\n referent,\n self._attribute,\n getattr(referent, self._attribute) + [parent],\n )\n\n if isinstance(attribute, sheraf.SetAttribute):\n setattr(\n referent,\n self._attribute,\n getattr(referent, self._attribute) | {parent},\n )\n\n return checked_values\n\n def delete(self, parent):\n referents = self.read(parent)\n\n if referents is None:\n return\n\n if isinstance(referents, self.model):\n referents = [referents]\n\n for referent in list(referents):\n attribute = self.model.attributes[self._attribute]\n\n if isinstance(attribute, ModelAttribute):\n delattr(referent, self._attribute)\n\n if isinstance(\n attribute,\n sheraf.ListAttribute,\n ):\n new_values = [\n e for e in getattr(referent, self._attribute) if e != parent\n ]\n setattr(\n referent,\n self._attribute,\n new_values,\n )\n\n if isinstance(\n attribute,\n sheraf.SetAttribute,\n ):\n new_values = {\n e for e in getattr(referent, self._attribute) if e != parent\n }\n setattr(\n referent,\n self._attribute,\n new_values,\n )\n\n\nclass InlineModelAttribute(ModelLoader, Attribute):\n \"\"\":class:`~sheraf.attributes.models.ModelAttribute` behaves like a basic\n model (i.e. have no indexation capability). The child attribute mapping is stored\n in the parent mapping.\n\n :param model: The model type to store.\n :type model: :class:`~sheraf.models.inline.InlineModel`\n\n >>> class Horse(sheraf.InlineModel):\n ... name = sheraf.StringAttribute()\n ...\n >>> class Cowboy(sheraf.Model):\n ... table = \"cowboy_inliner\"\n ... name = sheraf.StringAttribute()\n ... horse = sheraf.InlineModelAttribute(Horse)\n ...\n >>> with sheraf.connection(commit=True):\n ... jolly = Horse.create(name=\"Jolly Jumper\")\n ... george = Cowboy.create(name=\"George\", horse=jolly)\n ... george.horse.name\n 'Jolly Jumper'\n \"\"\"\n\n default_mapping = sheraf.types.SmallDict\n\n def __init__(self, model=None, **kwargs):\n kwargs.setdefault(\"default\", self.default_mapping)\n super().__init__(model=model, **kwargs)\n\n def deserialize(self, value):\n if value is None:\n return None\n\n return self.model._decorate(value)\n\n def serialize(self, value):\n if value is None:\n return None\n\n elif isinstance(value, sheraf.InlineModel):\n return value.mapping\n\n elif isinstance(value, dict):\n return self.model.create(**value).mapping\n\n else:\n return self._default_value(value)\n\n def update(\n self,\n old_value,\n new_value,\n addition=True,\n edition=True,\n deletion=False,\n replacement=False,\n ):\n if replacement or old_value is None:\n return self.serialize(new_value)\n\n return old_value.edit(new_value, addition, edition, deletion, replacement)\n\n\nclass IndexedModelAttribute(ModelLoader, Attribute):\n \"\"\"\n :class:`~sheraf.attributes.models.ModelAttribute` behaves like a classic model,\n including the indexation capabilities. The child attribute mapping and all the\n index mappings are is stored in the parent mapping.\n\n :param model: The model type to store.\n :type model: :class:`~sheraf.models.AttributeModel`\n\n .. note:: The :class:`~sheraf.models.AttributeModel` must have a *primary* index.\n\n >>> class Horse(sheraf.AttributeModel):\n ... name = sheraf.StringAttribute().index(primary=True)\n ... size = sheraf.IntegerAttribute().index()\n ...\n >>> class Cowboy(sheraf.Model):\n ... table = \"cowboy_indexer\"\n ... name = sheraf.StringAttribute()\n ... horses = sheraf.IndexedModelAttribute(Horse)\n ...\n >>> with sheraf.connection(commit=True):\n ... george = Cowboy.create(name=\"George Abitbol\")\n ... jolly = george.horses.create(name=\"Jolly Jumper\", size=32)\n ...\n ... assert jolly == george.horses.read(\"Jolly Jumper\")\n ... assert jolly in george.horses.search(size=32)\n \"\"\"\n\n index_table_default = sheraf.types.SmallDict\n\n def read(self, parent):\n for index in self.model.indexes.values():\n key = self.key(parent)\n if key not in parent.mapping:\n parent.mapping[key] = self.index_table_default()\n index.persistent = parent.mapping[key]\n\n return self.model\n\n def write(self, parent, value):\n model = self.read(parent)\n for values_dict in value:\n model.create(**values_dict)\n return model\n","sub_path":"sheraf/attributes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"483131834","text":"def crypt(word, rot):\n\tnewword = \"\"\n\tfor i in range (len(word)):\n\t\tx = ord(word[i]) + rot\n\t\tnewword = newword + chr(x)\n\treturn newword\n\na = input(\"Digite a palavra desejada: \")\nb = int(input(\"Digite o numero de rotacao: \"))\n\nprint (crypt(a, b))","sub_path":"crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"257100655","text":"import math\nimport primes\nimport random\ndef invmod(a, p, maxiter=1000000):\n \"\"\"The multiplicitive inverse of a in the integers modulo p:\n a * b == 1 mod p\n Returns b.\n (http://code.activestate.com/recipes/576737-inverse-modulo-p/)\"\"\"\n if a == 0:\n raise ValueError('0 has no inverse mod %d' % p)\n r = a\n d = 1\n for i in xrange(min(p, maxiter)):\n d = ((p // r + 1) * d) % p\n r = (d * a) % p\n if r == 1:\n break\n else:\n raise ValueError('%d has no inverse mod %d' % (a, p))\n return d\n\ndef modpow(base, exponent, modulus):\n \"\"\"Modular exponent:\n c = b ^ e mod m\n Returns c.\n (http://www.programmish.com/?p=34)\"\"\"\n result = 1\n while exponent > 0:\n if exponent & 1 == 1:\n result = (result * base) % modulus\n exponent = exponent >> 1\n base = (base * base) % modulus\n return result\n\nclass PrivateKey(object):\n\n def __init__(self, p, q,s,w,r_m):\n self.l = (p-1) * (q-1)\n self.s=s\n self.w=w\n self.r_m=r_m\n\n def __repr__(self):\n return '' % (self.l, self.s, self.w, self.r_m)\n\nclass PublicKey(object):\n\n @classmethod\n def from_n(cls, n):\n return cls(n)\n\n def __init__(self, n,p,q):\n self.n = n\n self.m=invmod((p-1) * (q-1), n)\n self.n_sq = n * n\n self.g = n + 1\n\n\n def __repr__(self):\n return '' % (self.n, self.m, self.n,self.g)\n\ndef generate_keypair(bits,l):\n p = primes.generate_prime(bits / 2)\n q = primes.generate_prime(bits / 2)\n n = p * q\n s=math.floor(math.log(n,2))\n w=random.randint(l+1,s-2)\n r_m=random.randint(2,math.pow(2,w-l)-1)\n return PrivateKey(p, q, s,w,r_m), PublicKey(n,p,q)\n\n\ndef encrypt(pub,priv, plain):\n while True:\n r = primes.generate_prime(long(round(math.log(pub.n, 2))))\n if r > 0 and r < pub.n:\n break\n x = pow(r, priv.l*pub.n, pub.n_sq)\n cipher = (pow(pub.g, priv.l*plain, pub.n_sq) * x) % pub.n_sq\n return cipher\n\ndef e_add(pub, a, b):\n \"\"\"Add one encrypted integer to another\"\"\"\n return a * b % pub.n_sq\n\ndef e_add_const(pub, a, n):\n \"\"\"Add constant n to an encrypted integer\"\"\"\n return a * modpow(pub.g, n, pub.n_sq) % pub.n_sq\n\ndef e_mul_const(pub, a, n):\n \"\"\"Multiplies an ancrypted integer by a constant\"\"\"\n return modpow(a, n, pub.n_sq)\n\ndef decrypt(pub, cipher):\n # x = pow(cipher, priv.l, pub.n_sq) - 1\n x=cipher % pub.n_sq-1\n plain = ((x // pub.n) * pub.m) % pub.n\n return plain\n\n","sub_path":"paillier/paillier.py","file_name":"paillier.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"611652726","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # def preorderTraversal(self, root: TreeNode) -> List[int]:\n def preorderTraversal(self, root):\n \"\"\"\n 1. 迭代:略\n 2. 递归:基于颜色标记法\n :param root:\n :return:\n \"\"\"\n result = []\n stack = [(1, root)]\n while len(stack) > 0:\n unVisit, node = stack.pop()\n if node is None:\n continue\n if unVisit:\n stack.append((1, node.right))\n stack.append((1, node.left))\n stack.append((0, node))\n else:\n result.append(node.val)\n return result\n\n","sub_path":"Week_02/preorder_traversal.py","file_name":"preorder_traversal.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"430414272","text":"from Multisoftmax import MultiSoftmax\nfrom MLPCost import MLPCost\nfrom KeypointSGD import KeypointSGD\nfrom pylearn2.train import Train\nfrom pylearn2.train_extensions.best_params import MonitorBasedSaveBest\nfrom pylearn2.training_algorithms.sgd import MomentumAdjustor\nfrom pylearn2.termination_criteria import MonitorBased\nfrom pylearn2.models.maxout import Maxout\nfrom pylearn2.training_algorithms.sgd import LinearDecayOverEpoch\nfrom pylearn2.space import Conv2DSpace\nfrom pylearn2.models.mlp import ConvRectifiedLinear, MLP\nfrom KeypointADADELTA import KeypointADADELTA\n\n#params = __import__('experiment_' + sys.argv[1] + '_params')\n\nfrom comboDS import ComboDatasetPyTable\n\n# The number of features in the Y vector\nnumberOfKeyPoints = 98*2\n\n\ndef main():\n\n #creating layers\n #2 convolutional rectified layers, border mode valid\n batch_size = 48\n lr = 1.0 #0.1/4\n finMomentum = 0.9\n maxout_units = 2000\n num_pcs = 4\n lay1_reg = lay2_reg = maxout_reg = None\n #save_path = './models/no_maxout/titan_lr_0.1_btch_64_momFinal_0.9_maxout_2000_4.joblib'\n #best_path = '/models/no_maxout/titan_bart10_gpu2_best.joblib'\n #save_path = './models/'+params.host+'_'+params.device+'_'+sys.argv[1]+'.joblib'\n #best_path = './models/'+params.host+'_'+params.device+'_'+sys.argv[1]+'best.joblib'\n save_path = '/Tmp/zumerjer/bart10_meancost_adadelta_ema.joblib'\n best_path = '/Tmp/zumerjer/bart10_meancost_adadelta_ema_best.joblib'\n\n #numBatches = 400000/batch_size\n\n '''\n print 'Applying preprocessing'\n ddmTrain = EmotiwKeypoints(start=0, stop =40000)\n ddmValid = EmotiwKeypoints(start=40000, stop = 44000)\n ddmTest = EmotiwKeypoints(start=44000)\n\n stndrdz = preprocessing.Standardize()\n stndrdz.applyLazily(ddmTrain, can_fit=True, name = 'train')\n stndrdz.applyLazily(ddmValid, can_fit=False, name = 'val')\n stndrdz.applyLazily(ddmTest, can_fit=False, name = 'test')\n\n GCN = preprocessing.GlobalContrastNormalization(batch_size = 1000)\n GCN.apply(ddmTrain, can_fit =True, name = 'train')\n GCN.apply(ddmValid, can_fit =False, name = 'val')\n GCN.apply(ddmTest, can_fit = False, name = 'test')\n return\n '''\n\n ddmTrain = ComboDatasetPyTable('/Tmp/zumerjer/all_', which_set='train')\n ddmValid = ComboDatasetPyTable('/Tmp/zumerjer/all_', which_set='valid')\n ddmSmallTrain = ComboDatasetPyTable('/Tmp/zumerjer/all_', which_set='small_train')\n\n layer1 = ConvRectifiedLinear(layer_name = 'convRect1',\n output_channels = 64,\n irange = .05,\n kernel_shape = [5, 5],\n pool_shape = [4, 4],\n pool_stride = [2, 2],\n W_lr_scale = 0.1,\n max_kernel_norm = lay1_reg)\n layer2 = ConvRectifiedLinear(layer_name = 'convRect2',\n output_channels = 128,\n irange = .05,\n kernel_shape = [5, 5],\n pool_shape = [3, 3],\n pool_stride = [2, 2],\n W_lr_scale = 0.1,\n max_kernel_norm = lay2_reg)\n\n # Rectified linear units\n #layer3 = RectifiedLinear(dim = 3000,\n # sparse_init = 15,\n # layer_name = 'RectLin3')\n\n #Maxout layer\n maxout = Maxout(layer_name= 'maxout',\n irange= .005,\n num_units= maxout_units,\n num_pieces= num_pcs,\n W_lr_scale = 0.1,\n max_col_norm= maxout_reg)\n\n #multisoftmax\n n_groups = 196\n n_classes = 96\n layer_name = 'multisoftmax'\n layerMS = MultiSoftmax(n_groups=n_groups,irange = 0.05, n_classes=n_classes, layer_name= layer_name)\n\n #setting up MLP\n MLPerc = MLP(batch_size = batch_size,\n input_space = Conv2DSpace(shape = [96, 96],\n num_channels = 3, axes=('b', 0, 1, 'c')),\n layers = [ layer1, layer2, maxout, layerMS])\n\n #mlp_cost\n missing_target_value = -1\n mlp_cost = MLPCost(cost_type='default',\n missing_target_value=missing_target_value )\n #mlp_cost.setup_dropout(input_include_probs= { 'convRect1' : 0.8 }, input_scales= { 'convRect1': 1. })\n\n #dropout_cost = Dropout(input_include_probs= { 'convRect1' : .8 },\n # input_scales= { 'convRect1': 1. })\n\n #algorithm\n monitoring_dataset = {'validation':ddmValid, 'mini-train':ddmSmallTrain}\n\n term_crit = MonitorBased(prop_decrease = 1e-7, N = 100, channel_name = 'validation_objective')\n\n kp_ada = KeypointADADELTA(decay_factor = 0.95, \n #init_momentum = 0.5, \n monitoring_dataset = monitoring_dataset, batch_size = batch_size,\n termination_criterion = term_crit,\n cost = mlp_cost)\n\n #train extension\n #train_ext = ExponentialDecayOverEpoch(decay_factor = 0.998, min_lr_scale = 0.001)\n #train_ext = LinearDecayOverEpoch(start= 1,saturate= 250,decay_factor= .01)\n #train_ext = ADADELTA(0.95)\n\n #train object\n train = Train(dataset = ddmTrain,\n save_path= save_path,\n save_freq=10,\n model = MLPerc,\n algorithm= kp_ada,\n extensions = [#train_ext, \n MonitorBasedSaveBest(channel_name='validation_objective',\n save_path= best_path)#,\n\n# MomentumAdjustor(start = 1,#\n # saturate = 25,\n # final_momentum = finMomentum)\n ] )\n train.main_loop()\n train.save()\n\nif __name__=='__main__':\n main()\n","sub_path":"emotiw/zumerjer/keypoints_model_ada_nodrop.py","file_name":"keypoints_model_ada_nodrop.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"20549412","text":"# -*- coding: utf-8 -*-\n\"\"\"\n题目:\n 1. 和为定值的两个数\n 2. 和为定值的 n 个数\n\"\"\"\nimport collections\n\n\ndef enum_two_number_01(lst):\n lst_len = len(lst)\n for i in range(0, lst_len - 1):\n for j in range(i + 1, lst_len):\n if lst[i] + lst[j] == total:\n print(lst[i], lst[j])\n\n\ndef enum_two_number_02(lst):\n lst = sorted(lst)\n i, j = 0, len(lst) - 1\n while i < j:\n s = lst[i] + lst[j]\n if s == total:\n print(lst[i], lst[j])\n i, j = i + 1, j - 1\n elif s > total:\n j -= 1\n elif s < total:\n i += 1\n\n\ndef enum_two_number_03(lst):\n d = collections.defaultdict(None).fromkeys(lst)\n for e in lst:\n if total - e in d:\n print(e, total - e)\n\n\ndef enum_number_01(lst, p, i, has):\n if i > len(lst) - 1:\n return\n if lst[i] + has == total:\n p[i] = True\n print(*[lst[i] for i in range(len(lst)) if p[i]])\n p[i] = False\n p[i] = True\n enum_number_01(lst, p, i + 1, has + lst[i])\n p[i] = False\n enum_number_01(lst, p, i + 1, has)\n\n\n# 不考虑 lst 中有负数的情况, 如果 lst 中有负数, 则分支限定的条件将要改变.\ndef enum_number_02(lst, p, i, has, residue):\n if i > len(lst) - 1:\n return\n if lst[i] + has == total:\n p[i] = True\n print(*[lst[i] for i in range(len(lst)) if p[i]])\n p[i] = False\n\n if has + lst[i] <= total <= has + residue:\n p[i] = True\n enum_number_02(lst, p, i + 1, has + lst[i], residue - lst[i])\n # 注意将 p[i] 的值还原.\n p[i] = False\n if has + residue - lst[i] >= total:\n p[i] = False\n enum_number_02(lst, p, i + 1, has, residue - lst[i])\n\n\nif __name__ == '__main__':\n nums1 = [5, 6, 1, 3, 9, 2, 4]\n nums2 = [-1, -2, 5, 6, 1, 3, 9, 12]\n\n total = 10\n\n # print(\"enum_two_number_01\")\n # enum_two_number_01(nums1)\n # enum_two_number_01(nums2)\n #\n # print(\"enum_two_number_02\")\n # enum_two_number_02(nums1)\n # enum_two_number_02(nums2)\n #\n # print(\"enum_two_number_03\")\n # enum_two_number_03(nums1)\n # enum_two_number_03(nums2)\n\n print(\"enum_number_01\")\n enum_number_01(nums1, [0] * len(nums1), 0, 0)\n # enum_number_01(nums2, [0] * len(nums2), 0, 0)\n\n print(\"enum_number_02\")\n enum_number_02(nums1, [0] * len(nums1), 0, 0, sum(nums1))\n","sub_path":"014.py","file_name":"014.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"36905812","text":"import math\nimport os.path as op\nimport operator\n\nfrom array import array\nimport ROOT\nfrom ROOT import TFile, TH1D, TCanvas, gStyle, gROOT, TF1, TColor, TMinuit, gPad, TPad\nfrom ROOT import TGaxis, TBox, THStack, TLegend, TGraphErrors, TVectorD, TGraph\nfrom ROOT.Math import GaussIntegrator, WrappedTF1, IParamFunction\nfrom ROOT.Math import IParametricFunctionOneDim\n\ngROOT.SetBatch(True)\n\nclass MyMassSpectrum(IParametricFunctionOneDim):\n def __init__(self):\n self.pars = None\n \n def Clone(self):\n return MyMassSpectrum()\n \n def DoEval(self, x):\n scale = x/14\n arg1 = self.pars[0]*math.pow(1-scale, self.pars[1])\n arg2 = self.pars[2]+self.pars[3]*math.log(scale)\n arg3 = math.pow(scale, arg2)\n return arg1*arg3\n \n def DoEvalPar(self, x, p):\n scale = x/14\n arg1 = self.pars[0]*math.pow(1-scale, self.pars[1])\n arg2 = self.pars[2]+self.pars[3]*math.log(scale)\n arg3 = math.pow(scale, arg2)\n return arg1*arg3\n \n def Parameters(self):\n return self.pars\n \n def SetParameters(self, p):\n self.pars = p\n \n def NPar(self):\n return 4\n\n\n\nclass Fits:\n def __init__(self):\n gROOT.LoadMacro(\"FitFunction.cpp+g\")\n \n self.p_n = [0,]*100\n self.e_n = [0,]*100\n self.stored_parameters = [0,]*100\n \n self.num_bins = 0\n self.xmins = []\n self.xmaxes = []\n \n self.data = []\n self.errors = []\n self.data_fits = []\n \n self.col1 = 1\n self.col2 = TColor.GetColor(27, 158, 119)\n self.col3 = TColor.GetColor(217, 95, 2)\n self.col4 = TColor.GetColor(117, 112, 179)\n \n def run_mass_fit(self):\n self.gMinuit = TMinuit(30)\n self.gMinuit.SetFCN(self.Fitfcn_max_likelihood)\n \n arglist = array(\"d\", [0,]*10)\n ierflg = ROOT.Long(0)\n arglist[0] = ROOT.Double(1)\n \n self.gMinuit.mnexcm(\"SET ERR\", arglist, 1, ierflg)\n self.gMinuit.mnparm(0, \"p1\", 5e-6, 1e-7, 0, 0, ierflg)\n self.gMinuit.mnparm(1, \"p2\", 10, 10, 0, 0, ierflg)\n self.gMinuit.mnparm(2, \"p3\", -5.3, 1, 0, 0, ierflg)\n self.gMinuit.mnparm(3, \"p4\", -4e-2, 1e-2, 0, 0, ierflg)\n \n arglist[0] = ROOT.Double(0)\n arglist[1] = ROOT.Double(0)\n \n self.gMinuit.FixParameter(2)\n self.gMinuit.FixParameter(3)\n \n self.gMinuit.mnexcm(\"simplex\", arglist, 2, ierflg)\n self.gMinuit.mnexcm(\"MIGRAD\", arglist, 2, ierflg)\n \n self.gMinuit.Release(2)\n self.gMinuit.mnexcm(\"simplex\", arglist, 2, ierflg)\n self.gMinuit.mnexcm(\"MIGRAD\", arglist, 2, ierflg)\n \n self.gMinuit.Release(3)\n self.gMinuit.mnexcm(\"simplex\", arglist, 2, ierflg)\n self.gMinuit.mnexcm(\"MIGRAD\", arglist, 2, ierflg)\n\n def Fitfcn(self, npar, gin, fcnVal, par, iflag):\n chisq = 0\n mf = ROOT.MyMassSpectrum()\n mf.SetParameters(par)\n ig = GaussIntegrator()\n ig.SetFunction(mf)\n ig.SetRelTolerance(0.00001)\n \n for i in range(0, self.num_bins):\n val = ig.Integral(self.xmins[i], self.xmaxes[i])/(self.xmaxes[i]-self.xmins[i])\n chiValue = 0\n if self.errors[i]:\n chiValue = (self.data[i]-val)/math.sqrt(val)\n chiValue *= chiValue\n chisq += chiValue\n self.data_fits[i] = val\n \n fcnVal[0] = chisq\n\n def Fitfcn_max_likelihood(self, npar, gin, fcnVal, par, iflag):\n likelihood = 0\n mf = ROOT.MyMassSpectrum()\n mf.SetParameters(par)\n \n ig = GaussIntegrator()\n ig.SetFunction(mf)\n ig.SetRelTolerance(0.00001)\n \n model_total = 0\n\n events_total = 0\n \n for i in range(0, self.num_bins):\n model_val = ig.Integral(self.xmins[i], self.xmaxes[i]) / (self.xmaxes[i]-self.xmins[i])\n self.data_fits[i] = model_val\n if 3.5 < self.xmins[i] < 4.5:\n continue\n if model_val <= 0 or (self.data[i] == 0 and self.xmins[i] < 2):\n continue\n\n likelihood += self.data[i] * math.log(model_val)\n\n model_total += model_val\n events_total += self.data[i]\n\n \n fcnVal[0] = 2*(model_total-likelihood)\n\n\ndef fit_mass():\n gROOT.LoadMacro(\"IABStyle.cpp+g\")\n ROOT.IABstyles.global_style()\n TGaxis.SetMaxDigits(3)\n\n root_file = TFile.Open(\"mjj_data15_13TeV_00276262_physics_Main_total_final.root\")\n hist = root_file.Get(\"mjj_data15_13TeV_00276262_physics_Main_total_final\")\n \n root_file_blackmax = TFile.Open(\"dataLikeHistograms.BlackMax4000.root\")\n nominal = root_file_blackmax.GetDirectory(\"Nominal\")\n hist_blackmax = nominal.Get(\"mjj_Scaled_BlackMax4000_1fb\")\n #hist_blackmax = root_file_blackmax.Get(\"Nominal/mjj_Data\n \n hist.Add(hist_blackmax)\n \n nbins = hist.GetNbinsX()\n xwidth = [(hist.GetBinLowEdge(b+1)/1000-hist.GetBinLowEdge(b)/1000)/2 for b in range(1, nbins+1)]\n xmiddle = [hist.GetBinCenter(b)/1000 for b in range(1, nbins+1)]\n \n \n fits = Fits()\n fits.xmins = [hist.GetBinLowEdge(b)/1000 for b in range(1, nbins+1)]\n fits.xmaxes = [hist.GetBinLowEdge(b+1)/1000 for b in range(1, nbins+1)]\n fits.data = [hist.GetBinContent(b) for b in range(1, nbins+1)]\n fits.data_fits = [0,]*nbins\n \n fits.errors = [math.sqrt(x) for x in fits.data]\n fits.num_bins = nbins\n \n fits.run_mass_fit()\n \n test_canvas = TCanvas(\"TestCanvas\", \"Ds Fit\", 0, 0, 800, 575)\n\n gStyle.SetPadBorderMode(0)\n gStyle.SetFrameBorderMode(0)\n\n test_canvas.Divide(1, 2, 0, 0)\n upper_pad = test_canvas.GetPad(1)#TPad(\"upper_pad\", \"upper_pad\", 0.005, 0.7525, 0.995, 0.995)\n lower_pad = test_canvas.GetPad(2)#TPad(\"lower_pad\", \"lower_pad\", 0.005, 0.005, 0.995, 0.7475)\n low, high = 0.05, 0.95\n upper_pad.SetPad(low, 0.4, high, high)\n lower_pad.SetPad(low, low, high, 0.4)\n \n test_canvas.cd(1)\n \n ROOT.IABstyles.canvas_style(test_canvas, 0.25, 0.05, 0.02, 0.15, 0, 0)\n \n h_Mjj = TH1D(\"h_Mjj\", \"Mass Spectrum\", 100, 0.2, 12)\n h_Mjj.GetYaxis().SetTitle(\"num. events\")\n h_Mjj.GetXaxis().SetTitle(\"M [Tev/c^{-2}]\")\n \n ROOT.IABstyles.h1_style(h_Mjj, ROOT.IABstyles.lWidth/2, ROOT.IABstyles.Scolor, 1, 0, 0, -1111, -1111, 508, 508, 8, ROOT.IABstyles.Scolor, 0.1, 0)\n \n h_Mjj.GetYaxis().SetRangeUser(0.1, 1e6)\n h_Mjj.GetXaxis().SetRangeUser(1, 10)\n h_Mjj.GetXaxis().SetTitleOffset(1)\n h_Mjj.GetYaxis().SetTitleOffset(1.1)\n \n upper_pad.SetLogy(1)\n upper_pad.SetLogx(1)\n lower_pad.SetLogx(1)\n \n gr = TGraphErrors(fits.num_bins, array(\"d\", xmiddle), array(\"d\", fits.data), array(\"d\", xwidth), array(\"d\", fits.errors))\n ROOT.IABstyles.h1_style(gr, ROOT.IABstyles.lWidth/2, ROOT.IABstyles.Scolor, 1, 0, 0, -1111, -1111, 505, 505, 8, ROOT.IABstyles.Scolor, 0.1, 0)\n \n grFit = TGraph(fits.num_bins, array(\"d\", xmiddle), array(\"d\", fits.data_fits))\n ROOT.IABstyles.h1_style(grFit, ROOT.IABstyles.lWidth/2, 632, 1, 0, 0, -1111, -1111, 505, 505, 8, 632, 0.1, 0)\n \n h_Mjj.Draw(\"axis\")\n gr.Draw(\"P\")\n grFit.Draw(\"c\")\n #upper_pad.Draw()\n \n test_canvas.Update()\n \n gPad.SetBottomMargin(1e-5)\n \n test_canvas.cd(2)\n gPad.SetTopMargin(1e-5)\n \n \"\"\"h2 = TH1D(\"h2\", \"Significance\", fits.num_bins, 0.2, 12)\n h2.GetYaxis().SetTitle(\"Significance\")\n for bin_num, (data, theory) in enumerate(zip(fits.data, fits.data_fits), start=1):\n print(\"bin\", bin_num, data, theory, (data-theory)/theory)\n h2.SetBinContent(bin_num, (data-theory)/theory)\n h2.GetXaxis().SetRangeUser(1, 10)\n \n h2.Draw()\"\"\"\n h2 = TH1D(\"h2\", \"\", 100, 0.2, 12)\n h2.GetXaxis().SetRangeUser(1, 10)\n h2.GetYaxis().SetRangeUser(-10, 10)\n h2.SetStats(False) # don't show stats box\n h2.Draw(\"axis\")\n sig_values = [(data-theory)/theory if data!= 0 else -100 for data, theory in zip(fits.data, fits.data_fits)]\n sig = TGraph(fits.num_bins, array(\"d\", xmiddle), array(\"d\", sig_values))\n #ROOT.IABstyles.h1_style(sig, ROOT.IABstyles.lWidth/2, 632, 1, 0, 0, -1111, -1111, 505, 505, 8, 632, 0.1, 0)\n ROOT.IABstyles.h1_style(gr, ROOT.IABstyles.lWidth/2, ROOT.IABstyles.Scolor, 1, 0, 0, -1111, -1111, 505, 505, 8, ROOT.IABstyles.Scolor, 0.1, 0)\n sig.SetMarkerStyle(22) # triangle\n sig.SetMarkerColor(2) # red\n sig.SetMarkerSize(0.8)\n sig.Draw(\"P\")\n #lower_pad.Draw()\n \n test_canvas.SaveAs(\"output_blackmax.pdf\")\n test_canvas.SaveAs(\"output_blackmax.png\")\n\nfit_mass()\n","sub_path":"week06/fit_mass_blackmax.py","file_name":"fit_mass_blackmax.py","file_ext":"py","file_size_in_byte":8660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"434864585","text":"from random import randint\nfrom time import sleep\ndef main():\n\tprint('Start')\n\tlst = []\n\tfor i in range(1000):\n\t\trnd = randint(0,100)\n\t\tlst.append(rnd)\n\t\tavg = sum(lst)/len(lst)\n\t\tprint(\"val:\\t\",randint(0,100),\"\\tavg:\\t\",str(avg)[:5],\"\\titer:\\t\",i, end='\\r')\n\t\tsleep(0.03)\n\tprint('Done')\n\nmain()\n","sub_path":"rand.py","file_name":"rand.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"242955116","text":"\"\"\"Read transition files\n\nThe object.read_transition function takes in a path to a transition file,\nreads it, and returns the transition data as a dictionary.\n\n Example:\n tpath = '../../OneLifeData7/transitions/659_556.txt'\n build_stanchion = object.read_transition(tpath)\n\"\"\"\n\nimport os, sys, re\nimport ohol_objects as obj\n\n# Parse decay time\ndef decay_time(t0):\n if t0 < 0:\n t = -t0 * 60 * 60 # hours are in the negative\n else:\n t = t0 # everything else is positive \n \n return t\n\n# Check if is tool\ndef is_tool(tdata):\n \n tool_kw = re.search('broken|just murdered|empty|needle|bottle|bowl', tdata['newActorName'], re.IGNORECASE)\n matches_tool = tool_kw is not None\n \n is_obj = tdata['origActor'] > 0\n is_persistent = matches_tool | (tdata['origActor'] == tdata['newActor'])\n \n return is_obj & is_persistent\n\n# MAIN FUNCTION: Returns dict of transition data\ndef read_transition(tpath):\n \n tkeys = [\n 'origActor',\n 'origTarget',\n 'newActor',\n 'newTarget',\n 'autoDecaySeconds',\n 'actorMinUseFraction',\n 'targetMinUseFraction',\n 'reverseUseActor',\n 'reverseUseTarget',\n 'move',\n 'desiredMoveDist',\n 'noUseActor',\n 'noUseTarget'\n ]\n \n tfile = os.path.basename(tpath)\n tid = tfile.replace('.txt', '')\n \n # parse original actor/target from filename\n # remove LA/LT/L suffix\n originals = tid.split('_')\n if len(originals) > 2:\n suffix = originals[-1]\n originals = originals[:2]\n else: \n suffix = ''\n \n lastActor = suffix == 'LA'\n lastTarget = suffix in ['L', 'LT'] \n \n with open(tpath, 'r') as file:\n raw_values = file.read()\n \n raw_values = raw_values.split(' ')\n raw_values = originals + raw_values\n tvals = [obj.parse_obj_values(v) for v in raw_values]\n tdata = dict(zip(tkeys, tvals))\n \n # Last use?\n tdata['lastUseActor'] = lastActor\n tdata['lastUseTarget'] = lastTarget\n \n # Add names\n tdata['origActorName'] = obj.obj_name(tdata['origActor'])\n tdata['origTargetName'] = obj.obj_name(tdata['origTarget'])\n tdata['newActorName'] = obj.obj_name(tdata['newActor'])\n tdata['newTargetName'] = obj.obj_name(tdata['newTarget'])\n \n # Extra properties (from OneTech repo)\n tool_bool = is_tool(tdata)\n tdata['autoDecaySeconds'] = decay_time(tdata['autoDecaySeconds'])\n tdata['isTool'] = tool_bool\n \n return tdata\n\n# Parse read_transition, returning only objects\ndef read_objs(tpath):\n \n tdata = read_transition(tpath)\n objs = [(tdata['origActor'], tdata['origTarget']), (tdata['newActor'], tdata['newTarget'])]\n \n return objs","sub_path":"ohol_transitions.py","file_name":"ohol_transitions.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"179616077","text":"#!/data/home/henry/anaconda3/bin/python3.6\n# -*- coding: utf-8 -*-\n# @File : Batch_area_average.py\n# @Author: Hanzhaohui\n# @Date : 2018/9/17\n# @Desc : 启用多进程嵌套多线程,遍历读取区域平均数据,并画图\n\n# import matplotlib\n# matplotlib.use('Qt5Agg')\nfrom Tools.Area_read import Mread_area_month_average\n# from Tools.Figure_bar import fig_area\nimport numpy as np\nfrom concurrent.futures import ProcessPoolExecutor\n\n\n# lat: 45~65 lon: 310~350\n\nlon = np.arange(310, 350, 11)\nlat = np.arange(45, 60, 5)\nm = np.arange(len(lon) - 1)\nn = np.arange(len(lat) - 1)\n\n\ndef mrun(lon1, lon2, lat1, lat2):\n Mread_area_month_average(lon1, lon2, lat1, lat2) # 读取数据\n # fig_area(lon1,lon2,lat1,lat2)# 出图\n\n\np = ProcessPoolExecutor(2)\nlon1 = []; lon2 = []\nlat1 = []; lat2 = []\nfor i in m:\n for j in n:\n lon1 = np.append(lon1, lon[i])\n lon2 = np.append(lon2, lon[i + 1])\n lat1 = np.append(lat1, lat[j])\n lat2 = np.append(lat2, lat[j + 1])\np.map(mrun, lon1, lon2, lat1, lat2)\n","sub_path":"Batch_area_average.py","file_name":"Batch_area_average.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"531927078","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 20 16:05:46 2019\n\n@author: angelo\n\"\"\"\n\nmango = (255,119,51)\nnero = (0,0,0)\nbianco = (255,255,255)\nrosso = (255,0,0)\nverde = (0,255,0)\nblu = (0,0,255)\nultra = (255,102,255)\n\nfrom immagini import load, save\n\ndef strisce(imm, col1, col2):\n \"\"\" Riempie l'immagine imm con strisce di colori alternati col1, col2 \"\"\"\n for i in range(0,len(imm),2):\n imm[i] = [col1] * len(imm[0])\n imm[i+1] = [col2] * len(imm[0])\n if len(imm) % 2 == 1:\n imm[-1] = [col1] * len(imm[0])\n\ndef strisce_ver(imm, col1, col2):\n for i in range(len(imm)):\n for j in range(len(imm[0])):\n if j % 2 == 0:\n imm[i][j] = col1\n else:\n imm[i][j] = col2\n\ndef strisce_ver2(imm, col1, col2):\n for i in range(len(imm)):\n imm[i][:len(imm[0])] = [col1, col2] * (len(imm[0])//2)\n if len(imm[0]) % 2 == 1:\n for i in range(len(imm)):\n imm[i][-1]=col1\n\n\n\n","sub_path":"programming_lab/lab201119/strisce.py","file_name":"strisce.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"460153940","text":"\"\"\"\nLeetCode\nPython3\n-*- coding: utf-8 -*-\n\n@Problem : 264. Ugly Number II\n@Difficulty : \n\n@AUTHOR : Yvette WANG\n\n@Description\n\"\"\"\n\nfrom heapq import heappush, heappop\n\n\n# medium, v2, heap + close and open list, always pop min in open list and push new to open list, 300ms\nclass Solution:\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n\n open = [1]\n closed = {}\n factors = [2, 3, 5]\n\n m = 0\n while m < n:\n num = heappop(open)\n if closed.get(num, 0) == 0:\n closed[num] = 1\n for fac in factors:\n heappush(open, num * fac)\n m += 1\n\n return num\n\n\n# # medium, v1, dp, Time Limit Exceeded, 238/596\n# class Solution:\n# def nthUglyNumber(self, n):\n# \"\"\"\n# :type n: int\n# :rtype: int\n# \"\"\"\n#\n# result = 1\n#\n# self.is_ugly_d = {1: True, 2: True, 3: True, 5: True}\n#\n# self.factors = [2, 3, 5]\n#\n# n -= 1\n# num = 2\n# while n > 0:\n# if self.isUgly(num):\n# n -= 1\n# result = num\n# num += 1\n#\n# return result\n#\n# def isUgly(self, num):\n# if self.is_ugly_d.get(num, 0) == 0:\n# for fac in self.factors:\n# remaining = int(num / fac)\n# if remaining == num / fac:\n# if self.is_ugly_d[remaining]:\n# self.is_ugly_d[num] = True\n# break\n# self.is_ugly_d[num] = self.is_ugly_d.get(num, False)\n#\n# return self.is_ugly_d[num]\n\n\nif __name__ == \"__main__\":\n argument = 600\n # argument = 10\n print(Solution().nthUglyNumber(argument))\n\n","sub_path":"264. Ugly Number II.py","file_name":"264. Ugly Number II.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"311963151","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport jsonfield.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('podrazdeleniya', '0003_subgroups_podrazdeleniye'),\n ('researches', '0007_auto_20150623_0618'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DirectionsGroup',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ],\n ),\n migrations.CreateModel(\n name='Fractions',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255)),\n ('units', models.CharField(max_length=255)),\n ('ref_m', jsonfield.fields.JSONField()),\n ('ref_f', jsonfield.fields.JSONField()),\n ('uet_doc', models.FloatField()),\n ('uet_lab', models.FloatField()),\n ],\n ),\n migrations.CreateModel(\n name='ReleationsFT',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('tube', models.ForeignKey(to='researches.Tubes')),\n ],\n ),\n migrations.CreateModel(\n name='Researches',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255)),\n ('quota_oms', models.IntegerField()),\n ('preparation', models.CharField(max_length=2047)),\n ('direction', models.ForeignKey(to='directory.DirectionsGroup')),\n ('subgroup', models.ForeignKey(to='podrazdeleniya.Subgroups', related_name='subgroup')),\n ],\n ),\n migrations.AddField(\n model_name='fractions',\n name='relation',\n field=models.ForeignKey(to='directory.ReleationsFT'),\n ),\n migrations.AddField(\n model_name='fractions',\n name='research',\n field=models.ForeignKey(to='directory.Researches'),\n ),\n ]\n","sub_path":"directory/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"199980477","text":"# coding: utf-8\n# 数据挖掘与知识发现第3次作业-黎豪-18101223769:\nimport pylab\nfrom matplotlib import image as img\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport random\nimport math\n\n# 获取图片的rgb列表\ndef getImageRGB(file):\n image = img.imread(file)\n width, height, x = image.shape\n # 创建与照片像素规模同大小rgb列表,存rgb数据\n rgb = np.zeros((width, height, x))\n for i in range(width):\n for j in range(height):\n rgb[i][j] = image[i, j]\n return rgb\n\n\n# 随机生成k个种子,返回k个随机像素点坐标\ndef initCentroids(imageRGB, k):\n center = []\n for i in range(k):\n x, y = random.randint(0, imageRGB.shape[0]), random.randint(0, imageRGB.shape[1])\n center += [[x, y]]\n return center\n\n\n# 计算变量中每个像素点与k个中心点的欧式距离\ndef caclEucDistance(imageRGB, centers):\n region = []\n for i in range(imageRGB.shape[0]): #行\n x = []\n for j in range(imageRGB.shape[1]): #列\n temp = []\n for k in range(len(centers)): #计算k个像素点与k个中心点的欧式距离\n dist = np.sqrt(np.sum(np.square(imageRGB[i, j] - imageRGB[centers[k][0], centers[k][1]])))\n temp += [dist] #添加到temp临时数组中\n x.append(np.argmin(temp)) #距离最小的集群的下标,按距离分簇\n region.append(x)\n return region #返回与数组同大小的 像素与簇对应关系\n\n\n# 迭代计算变量中每个像素点与k个中心点的欧式距离\ndef loopCaclEucDistance(imageRGB, CalCentercolor):\n region = []\n for i in range(imageRGB.shape[0]): #行\n x = []\n for j in range(imageRGB.shape[1]): #列\n temp = []\n for k in range(len(CalCentercolor)): #计算k个像素点与k个中心点的欧式距离\n dist = np.sqrt(np.sum(np.square(imageRGB[i, j] - CalCentercolor[k])))\n temp += [dist] #添加到temp临时数组中\n x.append(np.argmin(temp)) #距离最小的集群的下标,按距离分簇\n region.append(x)\n return region #返回与数组同大小的 像素与簇对应关系\n\n\n# 计算集群的平均值\ndef calNewCenter(features, imageRGB, k):\n temp = [] #一维数组\n for i in features:\n for j in i:\n temp.append(j)\n centercolor = [0] * k\n # 累加 每个集群中所包含的 像素点的RGB值\n for i in range(len(features)): #Rows\n for j in range(len(features[i])): #Columns\n centercolor[features[i][j]] += imageRGB[i, j]\n \n for i in range(len(centercolor)):\n centercolor[i] /= temp.count(i) #求每个集群的RGB 均值\n # 将求得的均值[取整]\n for j in range(len(centercolor[i])): #Columns\n centercolor[i][j] = int(centercolor[i][j])\n return centercolor\n\n\n# 显示分割前后对比图程序\ndef showImage(imageRGB, centercolor, features, k, iteration):\n NewImage = np.empty((len(features), len(features[0]), 3))\n for i in range(len(features)):\n for j in range(len(features[i])):\n NewImage[i, j] = centercolor[features[i][j]]\n # 绘制图片\n fig = plt.figure(figsize=(10, 4), facecolor='white')\n fig.suptitle('k='+str(k)+', iteration='+str(iteration), fontsize=12, color='k')\n fig.gca().xaxis.set_major_locator(plt.NullLocator())\n fig.gca().yaxis.set_major_locator(plt.NullLocator())\n\n # 创建子图1: 绘制原图\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.axis('off') # 关闭坐标轴显示\n ax1.imshow(imageRGB / 255)\n ax1.set_title('Original image', fontsize=10, color='k')\n\n # 创建子图2: 绘制分割图\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.axis('off') # 关闭坐标轴显示\n ax2.imshow(NewImage / 255)\n ax2.set_title('Split graph', fontsize=10, color='k')\n\n # 显示绘制图像\n pylab.show()\n plt.show()\n\n\n# -------------------main---------------------------\ndef main():\n #加载图片数据\n imageRGB = getImageRGB('picture.jpg')\n print('Finish load image RGB data...')\n #设置集群数:k=3\n k = 3\n # 生成k个随机像素点坐标\n InitialCenter = initCentroids(imageRGB, k)\n # 计算样本中每个像素点与k个中心点的欧式距离,并重新分类\n features = caclEucDistance(imageRGB, InitialCenter)\n\n #设置k-means算法执行的最大迭代次数:iteration = 20\n iteration = 20\n for i in range(iteration, 0, -1):\n print('iteration = ', i)\n CalCentercolor = calNewCenter(features, imageRGB, k) # 得到每个簇的均值\n # 根据簇中的新均值,并重新分簇\n features = loopCaclEucDistance(imageRGB, CalCentercolor)\n print('\\n'+'Center[k] = ', CalCentercolor, '\\n')\n\n #显示分割前后对比图\n showImage(imageRGB, CalCentercolor, features, k, iteration)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"PythonKmeansPicture/kmeansPicture.py","file_name":"kmeansPicture.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"135773483","text":"import matplotlib\n\nmatplotlib.use('agg')\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\n\norder = ['Split', 'Mixed', 'Reverse split', 'UK', 'US']\ncol_order = ['Neural network', 'Support vector machine', 'Random forest']\n\ndef base_plot(data):\n sns.set(font_scale=1.5)\n x='Dataset'\n y='Accuracy'\n g = sns.factorplot(x=x, y=y, hue='Kmer Length',\n col='Model', row='Kmer Filter', kind='box',\n data=data, legend=False, size=12, fliersize=10,\n linewidth=2, col_order=col_order, order=order,\n medianprops={'color': 'black', 'linewidth': 3,\n 'solid_capstyle': 'butt'})\n g.set_titles(\"{col_name} with {row_name} Data\")\n g.axes[0][0].legend(loc='upper left', title='Kmer Length')\n return g\n\ndef add_constant_overlay(g, overlay, color):\n args = {'medianprops': {'color': color, 'linewidth': 3},\n 'order': order, 'x': 'Dataset', 'y': 'Accuracy',\n 'func': sns.boxplot}\n for i in range(g.shape[0]):\n for j in range(g.shape[1]):\n g = g.map(data=overlay, ax=g.axes[i][j], **args)\n return g\n\ndef add_variable_overlay(g, overlays, color, axis):\n assert len(overlays) == g.shape[axis]\n args = {'medianprops': {'color': color, 'linewidth': 3},\n 'order': order, 'x': 'Dataset', 'y': 'Accuracy',\n 'func': sns.boxplot}\n if axis == 0:\n for j in range(g.shape[1]):\n for i in range(g.shape[0]):\n g = g.map(data=overlays[i], ax=g.axes[i][j], **args)\n else:\n for i in range(g.shape[0]):\n for j in range(g.shape[1]):\n g = g.map(data=overlays[j], ax=g.axes[i][j], **args)\n return g\n\ndef plot_lupolova(outputfile):\n data = pd.read_csv(snakemake.input[0])\n lupolova = pd.read_csv(snakemake.input[1], sep='|', skiprows=[1],\n converters={'Accuracy': lambda x: float(x)})\n g = base_plot(data)\n color = 'red'\n g = add_constant_overlay(g, lupolova, color)\n patch = Patch(color=color, label='Lupolova Accuracies')\n g.axes[0][0].legend(loc='upper right', handles=[patch])\n plt.savefig(outputfile, dpi=1200)\n\ndef get_overlays(data, selection):\n base = data.loc[data['Datatype'] == 'Genome region']\n base = data.loc[data['Feature Selection'] == selection]\n base = base.groupby(by=['Model', 'Dataset']).mean().reset_index()\n overlays = []\n for model in col_order:\n overlays.append(base.loc[base['Model'] == model])\n return overlays\n\ndef plot_genome_region(outputfile):\n data = pd.read_csv(snakemake.input[0])\n g = base_plot(data)\n colors = ['red', 'orange']\n patches = []\n for i, selection in enumerate(np.unique(base['Feature Selection'].values)):\n overlays = get_overlays(data, selection)\n g = g.add_variable_overlay(g, overlays, colors[i], 1)\n patches.append(Patch(color=colors[i], label=selection))\n g.axes[0][0].legend(loc='upper right', handles=patches)\n plt.savefig(outputfile, dpi=1200)\n\ndef plot_both(outputfile):\n data = pd.read_csv(snakemake.input[0])\n lupolova = pd.read_csv(snakemake.input[1], sep='|', skiprows=[1],\n converters={'Accuracy': lambda x: float(x)})\n g = base_plot(data)\n g = g.add_constant_overlay(g, lupolova, 'blue')\n patches = [Patch(color='blue', label='Lupolova Accuracies')]\n colors = ['red', 'orange']\n for i, selection in enumerate(np.unique(base['Feature Selection'].values)):\n overlays = get_overlays(data, selection)\n g = g.add_variable_overlay(g, overlays, colors[i], 1)\n patches.append(Patch(color=colors[i], label=selection))\n g.axes[0][0].legend(loc='upper left', handles=patches)\n plt.savefig(outputfile, dpi=1200)\n\ndef main():\n plot_lupolova(snakemake.output[0])\n plot_genome_regione(snakemake.output[1])\n plot_both(snakemake.output[2])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/validation_figs.py","file_name":"validation_figs.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"507883444","text":"from setuptools import setup, find_packages\n\nfrom os.path import join, dirname, realpath, relpath, splitext, relpath\nfrom os import walk, sep\n\nimport numpy as np\n\ntry:\n from Cython.Distutils.extension import Extension\n from Cython.Distutils import build_ext as cython_build_ext\n use_cython = True\nexcept ImportError:\n from distutils.extension import Extension\n use_cython = False\n\nSCRIPT_DIR = dirname(realpath(__file__))\nGLOVE_SOURCE_DIR = join(SCRIPT_DIR, \"glove\")\nGLOVE_MODULE_NAME = \"glove\"\n\ndef strict_lstrip(string, ending):\n if string.endswith(ending):\n return string[:-len(ending)]\n return string\n\ndef path_to_module_name(path):\n relative_path = relpath(path, SCRIPT_DIR)\n path_no_ext, _ = splitext(relative_path)\n return strict_lstrip(path_no_ext, sep).replace(sep, '.')\n\n\ndef find_files_by_suffix(path, suffix):\n \"\"\"Recursively find files with specific suffix in a directory\"\"\"\n for relative_path, dirs, files in walk(path):\n for fname in files:\n if fname.endswith(suffix):\n yield join(path, relative_path, fname)\n\next_modules = []\nfor pyx_file in find_files_by_suffix(GLOVE_SOURCE_DIR, \".pyx\"):\n ext_modules.append(Extension(\n name=path_to_module_name(pyx_file),\n sources=[pyx_file if use_cython else pyx_file.replace(\".pyx\", \".cpp\")],\n library_dirs=[],\n language='c++',\n extra_compile_args=['-std=c++11', '-Wno-unused-function',\n '-Wno-sign-compare', '-Wno-unused-local-typedef',\n '-Wno-undefined-bool-conversion', '-O3',\n '-Wno-reorder'],\n extra_link_args=[],\n libraries=[],\n extra_objects=[],\n include_dirs=[np.get_include()]\n ))\n\ndef readfile(fname):\n return open(join(dirname(__file__), fname)).read()\n\ncmdclass = {}\nif use_cython:\n cmdclass['build_ext'] = cython_build_ext\nsetup(\n name=GLOVE_MODULE_NAME,\n version='1.0.2',\n cmdclass=cmdclass,\n description='Python package for computing embeddings from co-occurence matrices',\n long_description=readfile('README.md'),\n ext_modules=ext_modules,\n packages=find_packages(),\n py_modules = [],\n author='Jonathan Raiman',\n author_email='jonathanraiman@gmail.com',\n url='https://github.com/JonathanRaiman/glove',\n download_url='https://github.com/JonathanRaiman/glove',\n keywords='NLP, Machine Learning',\n license='MIT',\n platforms='any',\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.3'\n ],\n setup_requires = [],\n install_requires=['numpy'],\n include_package_data=True,\n)\n","sub_path":"pypi_install_script/glove-1.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"302761151","text":"from matplotlib import pyplot as plt\nfrom keras.preprocessing.image import img_to_array\nfrom keras.applications import imagenet_utils\nimport numpy as np\nimport os\nimport cv2\n\n\nclass Functions:\n def __init__(self):\n ''' Constructor for this class. '''\n # Create some member animals\n self.members = ['Tiger', 'Elephant', 'Wild Cat']\n\n def printMembers(self):\n print('Printing members of the Mammals class')\n for member in self.members:\n print('\\t%s ' % member)\n\n '''\n Gets the image paths\n from current folder and\n returns them\n '''\n def getImagePaths(self, paths):\n #set counter\n imagePaths = []\n #go through all folders\n for name, path in paths.iteritems():\n #print(name)\n files = os.walk(path).next()[2]\n if (len(files) > 0):\n for file in files:\n #print(file)\n imagePath = os.path.join(path, file)\n imagePaths.append(imagePath)\n\n return imagePaths\n\n\n\n '''\n Gets the image paths\n from a structured folder\n set and returns them\n '''\n def getStructuredImagePaths(self, paths, imageType):\n #set counter\n imagePaths = []\n #go through all folders\n for name, path in paths.iteritems():\n #print(name)\n subdirs = [x[0] for x in os.walk(path)]\n for subdir in subdirs:\n files = os.walk(subdir).next()[2]\n if (len(files) > 0):\n for file in files:\n if (imageType in file):\n imagePath = os.path.join(subdir, file)\n imagePaths.append(imagePath)\n\n return imagePaths\n\n\n '''\n Pre-Processing\n '''\n def preProcessImage(self, img, crop=False):\n\n if crop:\n #crop if required\n img = img[0:1200, 600:1800]\n #trans to HSV, saturation channel\n #have to invert colours first\n destRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # blurred = cv2.medianBlur(destRGB, 25)\n # hsv = cv2.cvtColor(destRGB, cv2.COLOR_BGR2HSV)\n # h, s, v = cv2.split(hsv)\n # #threshhold saturation channel\n # (T, threshInv1) = cv2.threshold(s, 180, 255, cv2.THRESH_BINARY)\n # # thresh = cv2.adaptiveThreshold(s, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 15, 4)\n # #apply mask\n # sat = cv2.bitwise_and(destRGB, destRGB, mask = threshInv1)\n # (T, threshInv2) = cv2.threshold(s, 180, 255, cv2.THRESH_BINARY)\n # white = cv2.bitwise_and(destRGB, destRGB, mask = threshInv2)\n # masked = cv2.bitwise_or(sat, white)\n # #convert to LAB colourspace\n # Lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)\n # L, a, b = cv2.split(Lab)\n # #find any light patches\n # #subtract b from a\n # sub = a-b\n # (T, threshInv) = cv2.threshold(sub, 200, 255, cv2.THRESH_BINARY)\n # #apply mask\n # masked2 = cv2.bitwise_and(destRGB, destRGB, mask = threshInv)\n\n #small = cv2.resize(masked, (0,0), fx=0.5, fy=0.5)\n # resize to ResNet image input size\n res = cv2.resize(destRGB, dsize=(100, 100), interpolation=cv2.INTER_CUBIC)\n\n # cv2.imshow(\"image\", masked)\n # cv2.imshow(\"image2\", res)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n #flatten\n image = img_to_array(res)\n\n # preprocess the image by (1) expanding the dimensions and\n # (2) subtracting the mean RGB pixel intensity from the\n # ImageNet dataset\n image = np.expand_dims(image, axis=0)\n # image = imagenet_utils.preprocess_input(image)\n return image\n\n\n '''\n Get the colour values\n of each pixel in the frame\n '''\n def getColourVals(self, path, crop=False):\n #get image\n img = plt.imread(path, 0)\n if crop:\n #crop if required\n img = img[0:1200, 600:1800]\n\n #reverse RGB\n destRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n #blur image\n blurred = cv2.medianBlur(destRGB, 25)\n\n # resize to ResNet image input size\n res = cv2.resize(blurred, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)\n\n pixels = []\n #go though each pixel to find RGB values\n for p in res:\n pix = [p[0], p[1], p[2]]\n pixels = np.append(pixels, pix)\n\n return pixels\n","sub_path":"openCV/transfer_learn/strawberry_TL/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"454712962","text":"from Domain.obiect import get_descriere, get_pret_achizitie, get_locatie\n\n\ndef mutare_obiecte(old_loc, new_loc, lst):\n\t\"\"\"\n\tmuta toate obiectele dintr-o locatie in alta\n\tparam. old_loc: locatia obiectelor de mutat\n\tparam. new_loc: noua locatie\n\treturn: lista dupa mutarea tuturor obiectele dintr-o locatie in alta\n\t\"\"\"\n\texista_old_loc = False\n\tfor obiect in lst:\n\t\tif get_locatie(obiect) == old_loc:\n\t\t\texista_old_loc = True\n\t\t\tobiect[\"locatie\"] = new_loc\n\tif exista_old_loc is False:\n\t\traise ValueError(\"Locatia din care incercati sa mutati obiecte nu exista!\")\n\tif len(new_loc) != 4:\n\t\traise ValueError(\"Locatia noua trebuie sa aiba exact 4 caractere!\")\n\tif old_loc == new_loc:\n\t\traise RuntimeError(\"Locatia noua coincide cu cea curenta!\")\n\treturn lst\n\n\ndef concatenare_str(obiect, string):\n\t\"\"\"\n\tconcateneaza un string la descrierea unui obiect\n\tparam. obiect: obiectul\n\tparam. str: stringul\n\treturn: obiectul dupa concatenarea stringului la descriere\n\t\"\"\"\n\tobiect[\"descriere\"] = get_descriere(obiect) + string\n\treturn obiect\n\n\ndef pret_max_fiecare_locatie(lst):\n\t\"\"\"\n\tdetermina cel mai mare pret pentru fiecare locatie\n\tparam. lst: lista de obiecte\n\treturn: un dictionar in care cheile sunt locatiile si valorile sunt pretul maxim pentru ficare lcatie\n\t\"\"\"\n\trezultat = {}\n\tfor obiect in lst:\n\t\tpret = get_pret_achizitie(obiect)\n\t\tlocatie = get_locatie(obiect)\n\t\tif locatie in rezultat:\n\t\t\tif pret > rezultat[locatie]:\n\t\t\t\trezultat[locatie] = pret\n\t\telse:\n\t\t\trezultat[locatie] = pret\n\treturn rezultat\n\n\ndef ordonare_cresc_dupa_pret(lst):\n\t\"\"\"\n\tordoneaza crescator o lista de obiecte in functie de pretul de achizitie\n\tparam. lst: lista de obiecte\n\treturn: lista ordonata crescator\n\t\"\"\"\n\treturn sorted(lst, key=get_pret_achizitie)\n\n\ndef suma_fiecare_locatie(lst):\n\t\"\"\"\n\tdetermina suma prețurilor pentru fiecare locație\n\tparam. lst: lista de obiecte\n\treturn: un dictionar in care cheile sunt locatiile si valorile sunt sumele preturilor pentru ficare lcatie\n\t\"\"\"\n\trezultat = {}\n\tfor obiect in lst:\n\t\tpret = get_pret_achizitie(obiect)\n\t\tlocatie = get_locatie(obiect)\n\t\tif locatie in rezultat:\n\t\t\trezultat[locatie] += pret\n\t\telse:\n\t\t\trezultat[locatie] = pret\n\treturn rezultat\n","sub_path":"Logic/functionalitati.py","file_name":"functionalitati.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"192536744","text":"# Copyright (c) 2021 Graphcore Ltd. All rights reserved.\n\nimport numpy as np\nimport pytest\nimport tensorflow.compat.v1 as tf\n\nimport optimiser\n\n\n@pytest.mark.parametrize(\"dtype\", [tf.float32, tf.float16])\ndef test_adam(dtype: tf.dtypes.DType) -> None:\n with tf.Graph().as_default(), tf.Session() as session:\n with tf.variable_scope(\"impl\"):\n impl_x = tf.get_variable(\"x\", (),\n dtype=dtype,\n initializer=tf.zeros_initializer())\n impl_update = optimiser.Adam(0.1).minimize_with_global_step(\n (impl_x - 3)**2)\n\n with tf.variable_scope(\"ref\"):\n ref_x = tf.get_variable(\"x\", (),\n dtype=tf.float32,\n initializer=tf.zeros_initializer())\n ref_update = tf.train.AdamOptimizer(0.1).minimize((ref_x - 3)**2)\n\n session.run(tf.global_variables_initializer())\n for _ in range(10):\n impl, ref, _, _ = session.run(\n [impl_x, ref_x, impl_update, ref_update])\n np.testing.assert_allclose(impl, ref, rtol=1e-3)\n assert ref < 6\n","sub_path":"applications/tensorflow/tgn/tests/test_optimiser.py","file_name":"test_optimiser.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"522774238","text":"\n\n\n\ndef get_merge_pdf(*pdf_list):\n\t# merging all donation pdf \n\t# to single pdf and making it to \n\t# download\n\tfile_path = '/home/mahiti/git-p/icfn-api/icfn-rest-demo/static/DonationReceipts.pdf'\n\t# file_path = '/home/mahiti/ICFN/icfn3/static/tempreceipt/DonationReceipts.pdf'\n#\t# file_path = '/home/mahiti/Desktop/icfn-api/icfn-rest-demo/static/donation_receipt/DonationReceipts.pdf'\n\tmerger = PdfFileMerger()\n\tfor pdf in pdf_list:\n\t\tmerger.append(pdf)\n\tmerger.write(file_path)\n\t# response = {\"status\":2,\"message\":\"successfully\",\"all_receipts_pdf\":PHOTO_URL + '/static/tempreceipt/DonationReceipts.pdf'}\n\tresponse = {\"status\":2,\"message\":\"successfully\",\"all_receipts_pdf\":PHOTO_URL + '/static/DonationReceipts.pdf'}\n\treturn response","sub_path":"pdf-merger.py","file_name":"pdf-merger.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"181116065","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .forms import AuthorizationKeyFormSet, SiteForm, SiteSettingForm\nfrom ..views import staff_member_required\nfrom ...site.models import AuthorizationKey, SiteSettings\n\n\n@staff_member_required\n@permission_required('site.edit_settings')\ndef index(request):\n settings = get_current_site(request).settings\n return redirect('dashboard:site-update', site_id=settings.pk)\n\n\n@staff_member_required\n@permission_required('site.edit_settings')\ndef update(request, site_id=None):\n site_settings = get_object_or_404(SiteSettings, pk=site_id)\n site = site_settings.site\n site_settings_form = SiteSettingForm(\n request.POST or None, instance=site_settings)\n site_form = SiteForm(request.POST or None, instance=site)\n authorization_qs = AuthorizationKey.objects.filter(\n site_settings=site_settings)\n formset = AuthorizationKeyFormSet(\n request.POST or None, queryset=authorization_qs,\n initial=[{'site_settings': site_settings}])\n if all([site_settings_form.is_valid(), site_form.is_valid(),\n formset.is_valid()]):\n site = site_form.save()\n site_settings_form.instance.site = site\n site_settings = site_settings_form.save()\n formset.save()\n messages.success(request, _('Updated site %s') % site_settings)\n return redirect('dashboard:site-update', site_id=site_settings.id)\n ctx = {'site': site_settings, 'site_settings_form': site_settings_form,\n 'site_form': site_form, 'formset': formset}\n return TemplateResponse(request, 'dashboard/sites/detail.html', ctx)\n","sub_path":"saleor/dashboard/sites/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"242224055","text":"from django.db import models\n\nfrom .managers import safedelete_manager_factory\nfrom .utils import (related_objects,\n HARD_DELETE, SOFT_DELETE, HARD_DELETE_NOCASCADE,\n DELETED_INVISIBLE, DELETED_VISIBLE_BY_PK)\n\n\ndef safedelete_mixin_factory(policy,\n visibility=DELETED_INVISIBLE,\n manager_superclass=models.Manager,\n queryset_superclass=models.query.QuerySet):\n \"\"\"\n Returns an abstract Django model, with a ``deleted`` field.\n It will also have a custom default manager, and an overriden ``delete()`` method.\n\n :param policy: define what happens when you delete an object. It can be one of ``HARD_DELETE``, ``SOFT_DELETE`` and ``HARD_DELETE_NOCASCADE``.\n :param visibility: useful to define how deleted objects can be accessed. It can be ``DELETED_INVISIBLE`` (by default), or ``DELETED_VISIBLE_BY_PK``.\n\n :param manager_superclass: if you want, you can make your manager inherits from another class. Useful if you need to use a custom manager.\n :param queryset_superclass: the manager that will be created will return a queryset instance, which class will inherits from this class.\n\n :Example:\n\n >>> my_mixin = safedelete_mixin_factory(policy=SOFT_DELETE)\n >>> class MyModel(my_mixin):\n ... my_field = models.TextField()\n ...\n >>> # Now you have your model (with its ``deleted`` field, and custom manager and delete method)\n\n \"\"\"\n\n assert policy in (HARD_DELETE, SOFT_DELETE, HARD_DELETE_NOCASCADE)\n assert visibility in (DELETED_INVISIBLE, DELETED_VISIBLE_BY_PK)\n\n class Model(models.Model):\n\n deleted = models.BooleanField(default=False)\n\n objects = safedelete_manager_factory(manager_superclass, queryset_superclass, visibility)()\n\n def save(self, keep_deleted=False, **kwargs):\n \"\"\"\n Save an object, un-deleting it if it was deleted.\n If you want to keep it deleted, you can set the ``keep_deleted`` argument to ``True``.\n \"\"\"\n if not keep_deleted:\n self.deleted = False\n super(Model, self).save(**kwargs)\n\n def undelete(self):\n assert self.deleted\n self.save()\n\n def delete(self, force_policy=None, **kwargs):\n current_policy = policy if (force_policy is None) else force_policy\n\n if current_policy == SOFT_DELETE:\n\n # Only soft-delete the object, marking it as deleted.\n self.deleted = True\n super(Model, self).save(**kwargs)\n\n elif current_policy == HARD_DELETE:\n\n # Normally hard-delete the object.\n super(Model, self).delete()\n\n elif current_policy == HARD_DELETE_NOCASCADE:\n\n # Hard-delete the object only if nothing would be deleted with it\n\n if sum(1 for _ in related_objects(self)) > 0:\n self.delete(force_policy=SOFT_DELETE, **kwargs)\n else:\n self.delete(force_policy=HARD_DELETE, **kwargs)\n\n class Meta:\n abstract = True\n\n return Model\n\n\n# Often used\nSoftDeleteMixin = safedelete_mixin_factory(SOFT_DELETE)\n","sub_path":"safedelete/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"556122858","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nFunctions and classes for common use.\n\"\"\"\n\n# ---- Imports -----------------------------------------------------------\nfrom os import path\n\n# the project's home directory\nPROJECT_HOME = path.dirname(__file__)\n\n# the icons' storage path\nICONS_PATH = path.join(PROJECT_HOME, 'icons')\n\n# the bridges' description file storage path\nBRIDGES_PATH = path.join(PROJECT_HOME, 'bridges')\n\n# directory where binary tools locate\nBINARY_PATH = path.join(PROJECT_HOME, 'binary')\n\n# logs directory\nLOG_PATH = path.join(PROJECT_HOME, 'logs')\n\n\ndef singleton(cls):\n \"\"\"\n A decorator to make the decorated *cls* have only one instance.\n\n Example:\n\n @singleton\n class Foo(object):\n pass\n\n :param cls: class\n The class to be decorated\n :return:\n The decorated class\n \"\"\"\n _instances = {}\n\n def _singleton(*args, **traits):\n if cls not in _instances:\n _instances[cls] = cls(*args, **traits)\n return _instances[cls]\n\n return _singleton\n\n# EOF\n\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"449717766","text":"import datetime\nfrom unittest.mock import patch\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom django.test import TestCase\n\nfrom zenslackchat.models import ZenSlackChat\nfrom zenslackchat.models import NotFoundError\nfrom zenslackchat.models import OutOfHoursInformation\n\n\nUTC = datetime.timezone.utc\n\n\n@pytest.mark.parametrize(\n ('now', 'expected'),\n [\n # office hours range 09:00 - 17:00.\n (datetime.datetime(2021, 3, 9, 9, 0, 0, tzinfo=UTC), False),\n (datetime.datetime(2021, 3, 9, 15, 34, 12, tzinfo=UTC), False),\n (datetime.datetime(2021, 3, 9, 17, 0, 0, tzinfo=UTC), False),\n # Outside range is out of hours:\n (datetime.datetime(2021, 3, 9, 8, 59, 59, tzinfo=UTC), True),\n (datetime.datetime(2021, 3, 9, 17, 0, 1, tzinfo=UTC), True),\n (datetime.datetime(2021, 3, 9, 22, 40, 2, tzinfo=UTC), True),\n # weekend is oo:\n (datetime.datetime(2021, 3, 13, 9, 0, 0, tzinfo=UTC), True),\n (datetime.datetime(2021, 3, 14, 17, 0, 0, tzinfo=UTC), True),\n (datetime.datetime(2021, 3, 7, 8, 59, 59, tzinfo=UTC), True),\n]\n)\n@patch('zenslackchat.models.post_message')\ndef test_inform_if_out_of_hours(post_message, log, db, now, expected):\n \"\"\"Verify when an out of hours message is 'posted' to slack.\n \"\"\"\n slack_client = MagicMock()\n\n OutOfHoursInformation.update(\"Contact XYZ\", hours=(\"09:00\", \"17:00\"))\n\n assert OutOfHoursInformation.inform_if_out_of_hours(\n now,\n chat_id='some-chat-id',\n channel_id='A0192KL3TFG',\n slack_client=slack_client\n ) == expected\n\n if expected is True:\n post_message.assert_called_with(\n slack_client,\n 'some-chat-id',\n 'A0192KL3TFG',\n \"Contact XYZ\"\n )\n\n else:\n post_message.assert_not_called()\n\n\n@pytest.mark.parametrize(\n ('now', 'expected'),\n [\n # office hours range 09:00 - 17:00.\n (datetime.datetime(2021, 3, 9, 9, 0, 0, tzinfo=UTC), False),\n (datetime.datetime(2021, 3, 9, 15, 34, 12, tzinfo=UTC), False),\n (datetime.datetime(2021, 3, 9, 17, 0, 0, tzinfo=UTC), False),\n # Outside range is out of hours:\n (datetime.datetime(2021, 3, 9, 8, 59, 59, tzinfo=UTC), True),\n (datetime.datetime(2021, 3, 9, 17, 0, 1, tzinfo=UTC), True),\n (datetime.datetime(2021, 3, 9, 22, 40, 2, tzinfo=UTC), True),\n # Weekend is out of hours:\n # saturday\n (datetime.datetime(2021, 3, 13, 9, 0, 0, tzinfo=UTC), True),\n # sunday\n (datetime.datetime(2021, 3, 14, 17, 0, 0, tzinfo=UTC), True),\n (datetime.datetime(2021, 3, 7, 8, 59, 59, tzinfo=UTC), True),\n ]\n)\ndef test_is_out_of_hours_with_default(log, db, now, expected):\n \"\"\"Test the logic for working out if a time is in or out of working hours.\n \"\"\"\n OutOfHoursInformation.update(hours=(\"09:00\", \"17:00\"))\n assert OutOfHoursInformation.is_out_of_hours(now) == expected\n\n\n@pytest.mark.parametrize(\n ('now',),\n [\n (datetime.datetime(2021, 3, 9, 9, 0, 0, tzinfo=UTC),),\n (datetime.datetime(2021, 3, 9, 8, 59, 59, tzinfo=UTC),),\n ]\n)\n@patch('zenslackchat.models.post_message')\ndef test_no_out_of_hours_defined(post_message, log, db, now):\n \"\"\"Veify the behaviour when office hours are defined.\n\n False is returned indicating all day is office hours.\n\n \"\"\"\n slack_client = MagicMock()\n\n assert OutOfHoursInformation.is_out_of_hours(now) is False\n\n assert OutOfHoursInformation.inform_if_out_of_hours(\n now,\n chat_id='some-chat-id',\n channel_id='A0192KL3TFG',\n slack_client=slack_client\n ) is False\n\n post_message.assert_not_called()\n\n\ndef test_out_of_hours_information(log, db):\n \"\"\"Test default and help text recovery.\n \"\"\"\n # Test the default with not text set\n message = OutOfHoursInformation.help_text()\n assert message == 'No Out Of Hours Message Set!'\n\n OutOfHoursInformation.update(\"\"\"\nContact a@b.com\nModile: +44 123456\n \"\"\")\n\n message = OutOfHoursInformation.help_text()\n assert message == \"\"\"\nContact a@b.com\nModile: +44 123456\n \"\"\"\n\n\ndef test_out_of_hours_instance(log, db):\n \"\"\"Test default and help text recovery.\n \"\"\"\n OutOfHoursInformation.update()\n\n oohi = OutOfHoursInformation.help()\n assert oohi.message == 'No Out Of Hours Message Set!'\n assert oohi.office_hours_begin == datetime.time(9, 0)\n assert oohi.office_hours_end == datetime.time(17, 0)\n\n oohi2 = OutOfHoursInformation.update(\n \"Contact XYZ\",\n (\"09:30\", \"18:30\")\n )\n assert oohi2.message == 'Contact XYZ'\n assert oohi2.office_hours_begin == datetime.time(9, 30)\n assert oohi2.office_hours_end == datetime.time(18, 30)\n\n\ndef test_basic_cru_functionality(log, db):\n \"\"\"Test the basic operations we rely on.\n \"\"\"\n ZenSlackChat.open(\n channel_id=\"slack-channel-id-1\",\n chat_id=\"slack-chat-id-1\",\n ticket_id=\"zendesk-ticket-id-1\",\n opened=datetime.datetime(2020, 1, 1, 12, 30, tzinfo=UTC)\n )\n\n ZenSlackChat.open(\n channel_id=\"slack-channel-id-2\",\n chat_id=\"slack-chat-id-2\",\n ticket_id=\"zendesk-ticket-id-2\",\n opened=datetime.datetime(2020, 7, 17, 14, 0, tzinfo=UTC)\n )\n\n assert ZenSlackChat.objects.count() == 2\n\n chat1 = ZenSlackChat.get(\"slack-channel-id-1\", \"slack-chat-id-1\")\n assert chat1.active is True\n assert chat1.closed is None\n assert chat1.opened == datetime.datetime(\n 2020, 1, 1, 12, 30, tzinfo=UTC\n )\n\n chat2 = ZenSlackChat.get(\"slack-channel-id-2\", \"slack-chat-id-2\")\n assert chat2.active is True\n assert chat2.closed is None\n assert chat2.opened == datetime.datetime(\n 2020, 7, 17, 14, 0, tzinfo=UTC\n )\n\n results = ZenSlackChat.open_issues()\n assert len(results) == 2\n # The most recent issue should be first I've decided, check:\n assert results[0].opened == datetime.datetime(\n 2020, 7, 17, 14, 0, tzinfo=UTC\n )\n assert results[1].opened == datetime.datetime(\n 2020, 1, 1, 12, 30, tzinfo=UTC\n )\n\n ZenSlackChat.resolve(\n \"slack-channel-id-1\",\n \"slack-chat-id-1\",\n closed=datetime.datetime(2020, 8, 2, 9, 31, tzinfo=UTC)\n )\n\n results = ZenSlackChat.open_issues()\n assert len(results) == 1\n assert results[0].opened == datetime.datetime(\n 2020, 7, 17, 14, 0, tzinfo=UTC\n )\n\n chat1 = ZenSlackChat.get(\"slack-channel-id-1\", \"slack-chat-id-1\")\n assert chat1.active is False\n assert chat1.closed == datetime.datetime(2020, 8, 2, 9, 31, tzinfo=UTC)\n assert chat1.opened == datetime.datetime(2020, 1, 1, 12, 30, tzinfo=UTC)\n\n\ndef test_not_found(db):\n \"\"\"Verify how I handle not finding instances.\n \"\"\"\n with pytest.raises(NotFoundError):\n ZenSlackChat.get(\"slack-channel-id-1\", \"slack-chat-id-1\")\n\n with pytest.raises(NotFoundError):\n ZenSlackChat.get_by_ticket(\"slack-chat-id-1\", \"zendesk-ticket-id-1\")\n\n with pytest.raises(NotFoundError):\n ZenSlackChat.resolve(\"slack-channel-id-1\", \"slack-chat-id-1\")\n\n\ndef test_no_open_issues(db):\n \"\"\"Verify I get no open issues when DB is empty.\n \"\"\"\n assert ZenSlackChat.open_issues() == []\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"272306350","text":"from django.contrib import admin\nfrom django.utils.text import Truncator\n\nfrom .models import Glucose, Category\n\n\nclass GlucoseAdmin(admin.ModelAdmin):\n list_display = [\n 'value',\n 'category',\n 'record_date',\n 'record_time',\n 'notes_truncated',\n 'tag_list',\n 'user',\n 'created',\n 'modified',\n ]\n\n list_filter = [\n 'user',\n 'category',\n ]\n\n def notes_truncated(self, obj):\n return Truncator(obj.notes).chars(75)\n notes_truncated.admin_order_field = 'notes'\n notes_truncated.short_description = 'Notes'\n\n def tag_list(self, obj):\n \"\"\"\n Retrieve the tags separated by comma.\n \"\"\"\n return ', '.join([t.name for t in obj.tags.all()])\n tag_list.short_description = 'Tags'\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = [\n 'id',\n 'name',\n ]\n\n\nadmin.site.register(Glucose, GlucoseAdmin)\nadmin.site.register(Category, CategoryAdmin)\n","sub_path":"glucosetracker/glucoses/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"613467707","text":"#!/usr/bin/env python3\n\nimport psycopg2\n\n\n# Outputs the most famous authors from most famous to least famous\ndef popular_authors():\n db = psycopg2.connect(\"dbname=news\")\n query = \"SELECT name, count(substr(log.path,10)) as views \" \\\n \"FROM authors INNER JOIN articles \" \\\n \"ON articles.author = authors.id \" \\\n \"INNER JOIN log ON articles.slug = substr(log.path,10) \" \\\n \"WHERE log.path != '/' GROUP BY name ORDER BY views DESC;\"\n cursor = db.cursor()\n cursor.execute(query)\n posts = cursor.fetchall()\n db.close()\n print(\"Most famous author classification:\")\n for author, number in posts:\n print(str(author) + \"- - - -\" + str(number) + \" views\")\n print('\\n')\n\n\n# Outputs the three most popular articles per page request\ndef popular_articles():\n db = psycopg2.connect(\"dbname=news\")\n query = \"SELECT title, count(substr(log.path,10)) as views \" \\\n \"FROM articles INNER JOIN log \" \\\n \"ON articles.slug = substr(log.path,10) \" \\\n \"WHERE log.path != '/' GROUP BY title ORDER BY views DESC LIMIT 3;\"\n cursor = db.cursor()\n cursor.execute(query)\n posts = cursor.fetchall()\n db.close()\n print(\"Top 3 most viewed articles:\")\n for title, number in posts:\n print(str(title) + \"- - - -\" + str(number) + \" views\")\n print('\\n')\n\n\n# Outputs the days on which the error rate was above 1%\ndef errors():\n db = psycopg2.connect(\"dbname=news\")\n query = \"SELECT TO_CHAR(result.date_error, 'Mon DD, YYYY'), \" \\\n \"result.percent_errors FROM result;\"\n cursor = db.cursor()\n cursor.execute(query)\n posts = cursor.fetchall()\n db.close()\n print(\"Day in which the output rate was higher than 1%:\")\n for date, percent in posts:\n print(str(date) + \"- - - -\" + str(percent) + \"% errors\")\n print('\\n')\n\n\nif __name__ == \"__main__\":\n popular_articles()\n popular_authors()\n errors()\n","sub_path":"vagrant/logs_analyzer/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"530895114","text":"from pymongo import MongoClient\nimport utils\n\nclass ModVote:\n client = MongoClient()\n db = client.ModVote\n poll_collection = db.polls\n\n count = 0\n countY = 0\n countN = 0\n current_poll = \"\"\n poll_id = 0\n nicks = []\n poll_list = []\n no_poll_cmds = [\"logs\", \"log\", \"list\", \"history\", \"new\"]\n def __init__(self):\n self.poll_list = list(self.poll_collection.find({}))\n if len(self.poll_list) > 0:\n self.poll_id = self.poll_list[-1]['poll_id'] + 1\n pass\n\n def execute(self, serv, canal, handle, message):\n message = message.lower()\n cmd = utils.extract_command(message)\n \n if message == \"h\" or message == \"help\" or message.strip() == \"\":\n serv.notice(handle,\"Command : !vote \")\n serv.notice(handle,\"actions :\")\n serv.notice(handle,\" new \")\n serv.notice(handle,\" y\")\n serv.notice(handle,\" n\")\n serv.notice(handle,\" results\")\n serv.notice(handle,\" clear\")\n serv.notice(handle,\" DESACTIVATED (ask win and Aste ..) log (aliases : history, logs, list)\")\n elif self.current_poll == \"\" and \"new\" in message:\n if utils.extract_message(message, \"\").strip() == \"\":\n serv.privmsg(canal,\"You have to give a name ! You stupid winw\")\n else:\n self.current_poll = utils.extract_message(message, \"\")\n serv.privmsg(canal,\"New poll created : \" + self.current_poll)\n self.insert_database()\n elif self.current_poll != \"\" and \"new\" in message:\n serv.privmsg(canal,\"You can't create a new poll, because poll \" + self.current_poll + \" is not cleared\")\n elif self.current_poll == \"\" and cmd not in self.no_poll_cmds:\n serv.privmsg(canal,\"No poll currently running. !vote new or !vote history\") \n elif message == \"y\" or message == \"n\":\n if handle not in self.nicks:\n self.nicks.append(handle)\n if message == \"y\":\n self.countY += 1\n serv.privmsg(canal,\"Y - \" + str(self.countY) + \"/\" + str(self.countN) + \" - N\") \n elif message == \"n\":\n self.countN += 1\n serv.privmsg(canal,\"Y - \" + str(self.countY) + \"/\" + str(self.countN) + \" - N\")\n else:\n serv.privmsg(canal,\"<\" + handle + \"> : You've already voted for poll \" + str(self.poll_id))\n# elif\n elif message == \"clear\":\n self.poll_list.append({\"poll_id\": self.poll_id, \"name\": self.current_poll, \"y\": str(self.countY), \"n\": str(self.countN), \"nicks\": self.nicks})\n self.update_database()\n self.poll_id += 1\n self.current_poll = \"\"\n self.countY = 0\n self.nicks = []\n self.countN = 0\n serv.privmsg(canal, \"Poll Nb \" + str(self.poll_id - 1) + \" saved and cleared.\")\n elif message == \"results\":\n serv.privmsg(canal, \"Results for poll \" + self.current_poll)\n serv.privmsg(canal, \"Yes : \" + str(self.countY))\n serv.privmsg(canal, \"No : \" + str(self.countN))\n elif message in [\"logs\", \"log\", \"list\", \"history\"] and 1==0:\n if len(self.poll_list) == 0:\n serv.privmsg(canal, \"Empty history.\")\n else:\n serv.privmsg(canal, \"History of \" + str(len(self.poll_list)) + \" polls in notice\")\n for poll in self.poll_list:\n serv.notice(handle, \"Poll \" + str(poll[\"poll_id\"]) + \" : \" + poll[\"name\"])\n serv.notice(handle, \"Y - \" + str(poll[\"y\"]) + \"/\" + str(poll[\"n\"]) + \" - N\")\n else:\n serv.notice(handle,\"Command : !vote \")\n serv.notice(handle,\"actions :\")\n serv.notice(handle,\" new \")\n serv.notice(handle,\" y\")\n serv.notice(handle,\" n\")\n serv.notice(handle,\" results\")\n serv.notice(handle,\" clear\")\n serv.notice(handle,\" DESACTIVATED (ask win and Aste ..) log (aliases : history, logs, list)\")\n if self.current_poll != \"\":\n self.update_database()\n\n def update_database(self):\n self.poll_collection.update({'poll_id': self.poll_id}, {'$set': {'y': str(self.countY)}})\n self.poll_collection.update({'poll_id': self.poll_id}, {'$set': {'n': str(self.countN)}})\n self.poll_collection.update({'poll_id': self.poll_id}, {'$set': {'nicks': self.nicks}})\n def insert_database(self):\n self.poll_collection.insert({'poll_id': self.poll_id, 'name': self.current_poll, 'y': str(self.countY), 'n': str(self.countN), 'nicks': self.nicks})\n","sub_path":"modules/ModVote.py","file_name":"ModVote.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"464807395","text":"\"\"\"\n\nUn nombre premier c'est un nombre qui n'a que deux diviseurs 1 et lui-même\n\nThéorème que l'on peut trouver en Maths\nSi il n'y a pas de diviseur d inférieur à racine de n alors n est premier\n\n\"\"\"\n\nimport math\n\nn = int(input(\"Donnez un nombre entier \\n\"))\n\ncount = 1\nd = 2\n\n# flag boolean\nisPrimary = True\n\nwhile True:\n\n if n % d == 0:\n isPrimary = False\n break\n\n d += 1\n\n # Si d est supérieur ou égal à racine carré du nombre n\n # On arrête la recherche des diviseurs d\n if d >= math.sqrt(n):\n isPrimary = True\n break\n\n count += 1\n\n\nif isPrimary:\n print(\"Ce nombre {} est premier\".format(n))\nelse:\n print(\"Ce nombre {} n'est pas premier\".format(n))\n\nprint(\"nombre de boucle(s) {}\".format(count))","sub_path":"B1A/primary.py","file_name":"primary.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"579427736","text":"# -*- coding: utf-8 -*-\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.common.exceptions import NoAlertPresentException\r\nimport unittest, time, re\r\nimport HTMLTestRunner\r\nimport fun\r\n\r\n\r\nclass Login():\r\n def __init__(self):\r\n self.driver = webdriver.Chrome()\r\n self.driver.implicitly_wait(30)\r\n self.base_url = \"http://daily.chinacaring.com/w/man/index\"\r\n\r\n def test_HB(self):\r\n '''睿恒项目管理登录页面'''\r\n driver = self.driver\r\n driver.get(self.base_url)\r\n driver.add_cookie({'name':'proj_mUserNm','value':'zhanghh@chinacaring.com'})\r\n driver.add_cookie({'name': 'proj_mPassword', 'value': 'Chinacaring_'})\r\n # driver.add_cookie({'name': 'JSESSIONID', 'value': '9DEF5066D183617DC3B921C8DE50C65E'}) # 验证码可变\r\n\r\n driver.get(self.base_url)\r\n\r\n time.sleep(1)\r\n\r\n txt = driver.find_element_by_xpath('/html/body/div[1]/div/div/dl/dt/span').text\r\n if txt == '张海红(普通成员)':\r\n print('登录成功!')\r\n\r\n\r\n'''\r\n try:\r\n driver.find_element_by_id('email').send_keys('zhanghh@chinacaring.com')\r\n driver.find_element_by_id('password').send_keys('Zhh123456')\r\n driver.find_element_by_id('button').click() # 这边刻意定位出错,走下面的except分支\r\n except:\r\n # 这里的图片可以使用成变量\r\n driver.get_screenshot_as_file(r\".\\erro.png\") # 若上面不能顺利执行,则会截图失败界面\r\n'''\r\n\r\n\r\nLogin().test_HB()\r\n\r\n\r\n","sub_path":"11.21(zhongDaMDT)/cookie_test.py","file_name":"cookie_test.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"83035589","text":"from game_data import data\nimport random\nimport art\nfrom replit import clear\n\n#Random person A and B from a list. Make sure that A is not equal to B\ndef index():\n index_a = random.randint(0,49)\n index_b = random.randint(0,49)\n while index_b == index_a:\n index_b = random.randint(0,49)\n return [index_a, index_b]\n\n#Assign information to candidate A and B\ndef assign(index_of_a,index_of_b):\n name_a = data[index_of_a]['name']\n description_a = data[index_of_a]['description']\n country_a = data[index_of_a]['country']\n\n name_b = data[index_of_b]['name']\n description_b = data[index_of_b]['description']\n country_b= data[index_of_b]['country']\n\n print(f\"Compare A: {name_a}, {description_a}, from {country_a}\")\n print(art.vs)\n print(f\"Against B: {name_b}, {description_b}, from {country_b}\")\n\nscore = 0\nend_game = False\n\nprint(art.logo)\nprint(\"Which instagram account has higher follower?\")\n\n#assign index of A and B\nnum_a = index()[0]\nnum_b = index()[1]\n\n#retrieve follower of A and B\nfollower_a = data[num_a]['follower_count']\nfollower_b = data[num_b]['follower_count']\n\nassign(index_of_a = num_a, index_of_b = num_b)\nchoice = input(\"Type 'A' or 'B': \")\n\n#Check answer of the user and calculate score\nwhile not end_game:\n if ((choice == \"A\" and follower_a > follower_b) or (choice == \"B\" and follower_b > follower_a)):\n clear()\n score += 1\n num_a = num_b\n num_b = index()[0]\n follower_a = data[num_a]['follower_count']\n follower_b = data[num_b]['follower_count']\n\n print(art.logo)\n print(f\"You are right. Current score is {score}.\")\n\n assign(index_of_a = num_a, index_of_b = num_b)\n choice = input(\"Type 'A' or 'B': \")\n else:\n clear()\n print(art.logo)\n end_game = True\n print(f\"Game over you got it wrong. Your final score is {score}\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"624508401","text":"import numpy as np\nfrom mdp.dynamics import double_integrator\nfrom mdp.signed_distance import hypercube_int\nfrom mdp.grid_world_ext import Avoid\nfrom functools import partial\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nif __name__ == \"__main__\":\n \n # Grid parameters\n num_nodes = np.array([21, 21])\n s_lims = np.array([[-1,-5],[5,5]]) #state space limits\n num_nodes_a = np.array([2])\n a_lims = np.array([[0],[1]]) #action/control limits\n\n #Dynamical system (double integrator model)\n grav = 9.81 # gravity\n sys_params = {} # parameters of dynamical system\n max_u = 0.2 * grav\n min_u = -0.2 * grav\n sys_params['max_u'] = max_u\n sys_params['min_u'] = min_u\n dynamics = partial(double_integrator, **sys_params)\n \n # Construct avoid region, system should stay within hypercube \n cube_lims = np.array([[0, -3], [4, 3]])\n avoid_func = lambda x: hypercube_int(x, cube_lims=cube_lims)\n \n # Make MDP\n lamb = 0.1 #lambda\n my_world = Avoid(num_nodes, s_lims, num_nodes_a, a_lims, dynamics=dynamics,\n avoid_func=avoid_func, lamb=lamb, sparse=True)\n\n # Compute value function and policy\n v_opt, pi_opt = my_world.v_pi_opt(method='vi')\n\n # Computing analytic safe set\n s_min = s_lims[0]\n s_max = s_lims[1]\n x = range(my_world.num_nodes[0]) * my_world.ds[0] + s_min[0] \n y = range(my_world.num_nodes[1]) * my_world.ds[1] + s_min[1]\n u_lims = cube_lims[1]\n l_lims = cube_lims[0]\n \n analytic_1 = [min((-2*min_u*(u_lims[0]-min(x_e, u_lims[0])))**0.5,\n u_lims[1]) for x_e in x]\n analytic_2 = [max(-(2*max_u*(max(x_e,0)))**0.5, l_lims[1]) for x_e in x]\n\n # level sets to be visualized\n L = np.max(my_world.reward)\n tau = 2.0 \n c = L * (1 - np.exp(-lamb * tau)) #under approximation level curve\n v_func_conts = [0, c]\n\n # Plot contours of value function\n plt.figure(1)\n CS = plt.contour(x, y, v_opt.reshape(num_nodes).T, levels=v_func_conts)\n \n labels = ['V_$\\\\lambda$ zero level', 'Safe Set Under-approx']\n\n for i in range(len(labels)):\n CS.collections[i].set_label(labels[i])\n plt.plot(x, analytic_1,'b-.', label='Analytic Safe Set')\n plt.plot(x, analytic_2,'b-.')\n plt.title('Value Function Contours')\n plt.legend()\n \n plt.pause(100) ","sub_path":"mdp/examples/double_integrator_example.py","file_name":"double_integrator_example.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"575516741","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom drl_hw1.utils.gym_env import EnvSpec\nfrom torch.utils.data import TensorDataset, DataLoader\n\nclass MLPBaseline:\n def __init__(self, env_spec: EnvSpec, hidden_sizes=(64,64), learning_rate=1e-4, epoch=10, batch=10, seed=None):\n self.feature_size = env_spec.observation_dim + 4\n self.loss_fn = nn.MSELoss(size_average=False)\n self.learning_rate = learning_rate\n self.hidden_sizes = hidden_sizes\n self.epoch = epoch\n # torch.manual_seed(seed)\n self.batch = batch\n self.model = nn.Sequential()\n self.model.add_module('fc_0', nn.Linear(self.feature_size, self.hidden_sizes[0]))\n self.model.add_module('tanh_0', nn.Tanh())\n self.model.add_module('fc_1', nn.Linear(self.hidden_sizes[0], self.hidden_sizes[1]))\n self.model.add_module('tanh_1', nn.Tanh())\n self.model.add_module('fc_2', nn.Linear(self.hidden_sizes[1], 1))\n\n def _features(self, path):\n # compute regression features for the path\n o = np.clip(path[\"observations\"], -10, 10)\n if o.ndim > 2:\n o = o.reshape(o.shape[0], -1)\n l = len(path[\"rewards\"])\n al = np.arange(l).reshape(-1, 1) / 1000.0\n feat = np.concatenate([o, al, al**2, al**3, np.ones((l, 1))], axis=1)\n return feat\n\n def fit(self, paths, return_errors=False):\n\n featmat = np.concatenate([self._features(path) for path in paths])\n returns = np.concatenate([path[\"returns\"] for path in paths])\n\n dataset = TensorDataset(torch.FloatTensor(featmat), torch.FloatTensor(returns))\n data_loader = DataLoader(dataset, batch_size=self.batch, shuffle=True)\n\n optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n if return_errors:\n error_before = self.get_error(data_loader)\n\n for _ in range(self.epoch):\n for batch_idx, (data, target) in enumerate(data_loader):\n data = Variable(data)\n target = Variable(target).float()\n predictions = self.model(data)\n loss = self.loss_fn(predictions, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if return_errors:\n error_after = self.get_error(data_loader)\n return error_before/np.sum(returns**2), error_after/np.sum(returns**2)\n\n def get_error(self, data_loader):\n error = 0\n for batch_idx, (data, target) in enumerate(data_loader):\n data = Variable(data)\n target = Variable(target).float()\n predictions = self.model(data)\n error += self.loss_fn(predictions, target)\n return error\n\n def predict(self, path):\n if self.model is None:\n return np.zeros(len(path[\"rewards\"]))\n return self.model(Variable(torch.FloatTensor(self._features(path)))).data.numpy().reshape(-1)\n","sub_path":"drl_hw1/baselines/mlp_baseline.py","file_name":"mlp_baseline.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"236736916","text":"import os\n\nfrom django import forms\nfrom django.utils.safestring import mark_safe\nfrom django.contrib.gis.geos import Point\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\n\nfrom . import models\n\nclass ReadonlyWidget(forms.HiddenInput):\n\n def __init__(self,f_display=None,*args,**kwargs):\n super().__init__(*args,**kwargs)\n self._f_display = f_display if f_display else lambda value:str(value) if value is not None else \"\"\n\n @property\n def is_hidden(self):\n return False\n\n def render(self, name, value, attrs=None, renderer=None):\n return \"{}{}\".format(super().render(name,value,attrs=attrs,renderer=renderer),self._f_display(value))\n\ntext_readonly_widget = ReadonlyWidget()\nboolean_readonly_widget = ReadonlyWidget(lambda value: '\"True\"' if value else '\"True\"')\n\n\nclass LabeledMixin(object):\n _template_name = None\n def __init__(self,label, *args,**kwargs):\n self.label = mark_safe(label)\n super().__init__(*args,**kwargs)\n\n @property\n def template_name(self):\n if not self._template_name:\n self._template_name = os.path.splitext(os.path.split(super().template_name)[1])\n self._template_name = \"{}_labeled{}\".format(*self._template_name)\n\n return self._template_name\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name,value,attrs)\n context['widget']['label'] = self.label\n return context\n\nclass LabeledNumberInput(LabeledMixin,forms.NumberInput):\n pass\n\nclass PointWidget(forms.MultiWidget):\n\n def __init__(self, attrs=None):\n widgets = [\n LabeledNumberInput(\"Longitude\",attrs={\"step\":0.001}),\n LabeledNumberInput(\"Latitude\",attrs={\"step\":0.001})\n ]\n super().__init__(widgets, attrs)\n\n def decompress(self,value):\n if value:\n return value.x,value.y\n return [None,None]\n\n def value_from_datadict(self,data,files,name):\n x,y = super().value_from_datadict(data,files,name)\n if x and y:\n return Point(float(x),float(y))\n else:\n return None\nclass NetworkEditForm(forms.ModelForm):\n repeaters = forms.ModelMultipleChoiceField(queryset=models.Repeater.objects.all().order_by(\"district__name\",\"site_name\"),widget=FilteredSelectMultiple(verbose_name='Repeater',is_stacked=False),required=False)\n\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n if self.instance and self.instance.pk:\n self.initial[\"repeaters\"] = models.Repeater.objects.filter(network=self.instance)\n if \"name\" in self.fields :\n self.fields[\"name\"].widget = text_readonly_widget\n\n def save(self,*args,**kwargs):\n obj = super().save(*args,**kwargs)\n if kwargs[\"commit\"]:\n self.save_repeaters()\n\n return obj\n\n \n\n def save_repeaters(self):\n repeaters = self.cleaned_data[\"repeaters\"]\n repeater_ids = [obj.id for obj in repeaters] if repeaters else []\n for repeater in models.Repeater.objects.filter(network=self.instance).exclude(id__in=repeater_ids):\n repeater.network = None\n repeater.save(update_fields=[\"network\"])\n for repeater in models.Repeater.objects.filter(id__in=repeater_ids).exclude(network=self.instance):\n repeater.network = self.instance\n repeater.save(update_fields=[\"network\"])\n\n class Meta:\n model = models.Network\n fields = \"__all__\"\n widgets = {\n }\n\n\nclass RepeaterEditForm(forms.ModelForm):\n class Meta:\n model = models.Repeater\n fields = \"__all__\"\n widgets = {\n \"point\":PointWidget(),\n \"link_point\":PointWidget(),\n \"tx_frequency\":forms.NumberInput(attrs={\"step\":0.001}),\n \"rx_frequency\":forms.NumberInput(attrs={\"step\":0.001}),\n \"ctcss_tx\":forms.NumberInput(attrs={\"step\":0.001}),\n \"ctcss_rx\":forms.NumberInput(attrs={\"step\":0.001}),\n \"sss_description\":forms.TextInput(attrs={\"style\":\"width:80%\"}),\n \"link_description\":forms.TextInput(attrs={\"style\":\"width:80%\"}),\n }\n\nclass OptionEditForm(forms.ModelForm):\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n if self.instance :\n if self.instance.pk:\n if \"name\" in self.fields :\n self.fields[\"name\"].widget = text_readonly_widget\n \n class Meta:\n model = models.Option\n fields = \"__all__\"\n widgets = {\n \"comments\":forms.Textarea(attrs={\"style\":\"width:80%\"}),\n }\n\n","sub_path":"radio/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"330368374","text":"import unittest\n\nfrom src.question_one import QuestionOne\n\n\nclass TestQuestionOne(unittest.TestCase):\n \"\"\" It tests methods for Question One in the exercise \"\"\"\n\n def test_sorted_letters_takes_text_with_more_than_two_alphaberts(self):\n \"\"\"Test see if the provided text has a minimun of 2 characters in it\"\"\"\n instance = QuestionOne(\"I am a test here...\")\n self.assertTrue(\n len(instance.sorted_letters()) > 2)\n\n def test_sorted_letters_raises_exception_on_empty_text(self):\n \"\"\"Test that sorted_letters raises a ValueException on null text\"\"\"\n instance = QuestionOne(\"\")\n with self.assertRaises(ValueError):\n instance.sorted_letters()\n\n def test_sorted_letters_returns_lowercase_string(self):\n \"\"\"Test that the returned string is all lowercase values\"\"\"\n text = \"This is a test\"\n instance = QuestionOne(text)\n self.assertTrue(instance.sorted_letters().islower())\n\n def test_sorted_letters_returns_sorted_string_of_alphaberts(self):\n \"\"\"Test if the returned value is a sorted string of alphaberts\"\"\"\n \"\"\"in the original string\"\"\"\n text = \"cool gang\"\n instance = QuestionOne(text)\n self.assertEqual(instance.sorted_letters(), \"acgglnoo\")\n\n def test_sorted_letters_ignores_punctuation_marks(self):\n \"\"\"Test if the sorted_letters method ignores punctuation marks/characters\"\"\"\n test_input = \"@cool gang#$%!!\"\n expected_result = \"acgglnoo\"\n instance = QuestionOne(text=test_input)\n self.assertEqual(instance.sorted_letters(), expected_result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/tests_for_question_one.py","file_name":"tests_for_question_one.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"488911162","text":"import requests\nimport pdftotext\nurl_head = 'https://www.gstatic.com/covid19/mobility/'\nurl_tail = 'Mobility_Report_en.pdf'\nlocal_path = '/Users/karlkatzen/Documents/code/covid/data/'\ndef _process_data_string(line):\n return [int(string.strip().split()[0][:-1]) if 'baseline' in string else 'None' for string in line.split(' ') if len(string) != 0]\ndef _identify_subdivision_line(lines):\n leng = len(lines)\n for i in range(2,leng):\n if 'Retail' in lines[i]:\n return i - 1\n return -1\ndef _find_data_lines(lines):\n return [_process_data_string(line) for line in lines if ('compared to baseline' in line or 'Not enough data' in line)]\ndef mobility_report_extract(country_code, subdivision = None, date = '2020-03-29'):\n if subdivision:\n url = url_head + date + '_' + country_code + '_' + subdivision.replace(' ','_') + '_' + url_tail\n top_level_name = subdivision\n else:\n url = url_head + date + '_' + country_code + '_' + url_tail\n top_level_name = country_code\n r = requests.get(url)\n if r.status_code == 200:\n file_path = local_path + top_level_name.replace(' ','_') + '.pdf'\n with open(file_path, 'wb') as f:\n f.write(r.content)\n with open(file_path, 'rb') as f:\n pdf = pdftotext.PDF(f)\n leng = len(pdf)\n first = pdf[0]\n second = pdf[1]\n res = {}\n top_level_data = []\n messages = first.split('Mobility trends')[1:]\n for message in messages:\n top_level_data.append(int(message.split('\\n')[1][:-1]))\n messages = second.split('Mobility trends')[1:]\n for message in messages:\n top_level_data.append(int(message.split('\\n')[1][:-1]))\n res[top_level_name] = top_level_data\n for i in range(2,leng-1):\n lines=pdf[i].split('\\n')\n second_sudivision_line_num = _identify_subdivision_line(lines)\n data_res = _find_data_lines(lines)\n data_res[0].extend(data_res[1])\n res[lines[0].strip()] = data_res[0]\n if second_sudivision_line_num != -1:\n data_res[2].extend(data_res[3])\n res[lines[second_sudivision_line_num].strip()] = data_res[2]\n return res\n else:\n print(f\"Doesn't work for {top_level_name} at {date}\")\n return None\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"616500838","text":"#\n# Copyright 2017-2023- Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test plan database gateways.\"\"\"\n\nfrom datetime import datetime, timezone\n\nfrom renku.domain_model.workflow.composite_plan import CompositePlan\nfrom renku.domain_model.workflow.plan import Plan\nfrom renku.infrastructure.gateway.plan_gateway import PlanGateway\n\n\ndef test_plan_gateway_add_get(project_with_injection):\n \"\"\"Test getting a plan by id.\"\"\"\n\n plan = Plan(id=Plan.generate_id(), name=\"plan\", command=\"\")\n composite_plan = CompositePlan(id=CompositePlan.generate_id(), name=\"composite-plan\", plans=[plan])\n\n plan_gateway = PlanGateway()\n\n plan_gateway.add(plan)\n plan_gateway.add(composite_plan)\n\n assert plan == plan_gateway.get_by_id(plan.id)\n assert composite_plan == plan_gateway.get_by_id(composite_plan.id)\n\n assert plan == plan_gateway.get_by_name(\"plan\")\n assert composite_plan == plan_gateway.get_by_name(\"composite-plan\")\n\n assert not plan_gateway.get_by_name(plan.id)\n assert not plan_gateway.get_by_name(composite_plan.id)\n\n assert not plan_gateway.get_by_id(\"plan\")\n assert not plan_gateway.get_by_id(\"composite-plan\")\n\n\ndef test_plan_gateway_newest_plans(project_with_injection):\n \"\"\"Test getting newest plans.\"\"\"\n plan = Plan(id=Plan.generate_id(), name=\"plan\", command=\"\")\n plan2 = Plan(id=Plan.generate_id(), name=\"plan\", command=\"\")\n invalidated_plan = Plan(\n id=Plan.generate_id(), name=\"invalidated_plan\", command=\"\", date_removed=datetime.now(timezone.utc)\n )\n invalidated_plan2 = Plan(\n id=Plan.generate_id(), name=\"invalidated_plan\", command=\"\", date_removed=datetime.now(timezone.utc)\n )\n\n plan_gateway = PlanGateway()\n\n plan_gateway.add(plan)\n plan_gateway.add(plan2)\n plan_gateway.add(invalidated_plan)\n plan_gateway.add(invalidated_plan2)\n\n newest_plans_by_names = {p.id for p in plan_gateway.get_newest_plans_by_names().values()}\n\n assert {plan2.id} == newest_plans_by_names\n\n newest_plans_by_names_with_deleted = {\n p.id for p in plan_gateway.get_newest_plans_by_names(include_deleted=True).values()\n }\n\n assert {plan2.id, invalidated_plan2.id} == newest_plans_by_names_with_deleted\n","sub_path":"tests/core/metadata/test_plan_gateway.py","file_name":"test_plan_gateway.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"493118437","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 16 11:19:14 2018\n\n@author: Dartoon\n\"\"\"\nimport numpy as np\nimport astropy.io.fits as pyfits\n# import matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys, os, copy, glob\nfrom subprocess import call\nfrom matplotlib.colors import LogNorm\nfrom decomprofile.tools.measure_tools import find_loc_max, measure_FWHM, twoD_Gaussian, fit_data_twoD_Gaussian #, esti_bgkstd\nfrom decomprofile.data_process import DataProcess\nfrom decomprofile.fitting_specify import FittingSpeficy\nfrom decomprofile.fitting_process import FittingProcess\n\nimage_ID = sys.argv[1] #'141637.44+003352.2' \nimage_RA = float(sys.argv[2]) #214.15602111816406\nimage_DEC = float(sys.argv[3]) #0.5645210146903992\n\n# image_ID ='100043.13+020637.2' \n# image_RA = 150.1797789\n# image_DEC = 2.110369603\n\n# image_ID = '141637.44+003352.2' \n# image_RA = 214.15602111816406\n# image_DEC = 0.5645210146903992\n\n# image_ID = '000047.43+004337.1' \n# image_RA = 0.1976759185\n# image_DEC = 0.7270061463\n\n# image_ID ='010048.81+021604.0'\n# image_RA = 15.2033809 \n# image_DEC = 2.2677925\n\n# # run_line = [5069, 5070, 5086, 5093, 5096, 5110][1]\n# run_line = 0\n# targets_info = open(\"QSO_id_file.txt\", \"r\")\n# targets_info = targets_info.read().split('\\n')\n# info = copy.deepcopy(targets_info[run_line])\n# info = info.split(' ')\n# info = [info[i] for i in range(len(info)) if info[i] != '']\n# image_ID, image_RA, image_DEC = info[:3]\n# image_RA, image_DEC = float(image_RA), float(image_DEC)\n\n#%%\nprint(image_ID, image_RA, image_DEC)\n\ndeep_seed = True #Set as True to put more seed and steps to fit,\nshow_plot = 0\nfit_data = True #If you simply want to do the search without fitting, set False\n\nimage_folder = './images_directory/'\n \nif os.path.exists('fit_result_detect')==False:\n os.mkdir('fit_result_detect')\n\nfilename_ascii = 'RESULTS/' + image_ID + '_result.txt'\n\n# band_run_list = [2,0,1,3,4] #run I band first\n# band_seq = ['G', 'R', 'I', 'Z', 'Y']\nband_run_list = [0] #run I band first\nband_seq = ['I']\n\nfilename_list = [image_ID+'_HSC-{0}.fits'.format(band_seq[i]) for i in range(len(band_run_list))]\nrun_list = copy.deepcopy(band_run_list)\n\ndata_process_list, zp_list = [], []\n\nfor i in range(len(band_seq)):\n # The pixel scale is all 0.168\n if len(glob.glob(image_folder+filename_list[i])) == 0:\n print(filename_list[i] + \" DOES NOT EXIST!!!\")\n QSO_im, err_map, PSF, _, _, qso_center, fr_c_RA_DEC = [], [], [], [], [], [], []\n run_list.remove(i)\n else:\n fitsFile = pyfits.open(image_folder+filename_list[i])\n fov_image= fitsFile[1].data\n header = fitsFile[1].header # if target position is add in WCS, the header should have the wcs information, i.e. header['EXPTIME']\n err_data= fitsFile[3].data ** 0.5\n \n file_header0 = fitsFile[0].header\n # FLUXMAG0 = file_header0['FLUXMAG0']\n zp = 27.0 #2.5 * np.log10(FLUXMAG0) # This is something Xuheng can't make sure.\n \n data_process_i = DataProcess(fov_image = fov_image, fov_noise_map = err_data, target_pos = [image_RA, image_DEC],\n pos_type = 'wcs', header = header,\n rm_bkglight = True, if_plot=False, zp = zp)\n data_process_i.noise_map = err_data\n data_process_i.generate_target_materials(radius=40, create_mask = False, nsigma=2.8,\n exp_sz= 1.2, npixels = 15, if_plot=False) \n PSF = pyfits.getdata(image_folder+filename_list[i].split('.fits')[0]+'_psf.fits')\n if len(PSF) != 0 and PSF.shape[0] != PSF.shape[1]:\n cut = ((PSF.shape[0] - PSF.shape[1])/2)\n if cut>0:\n PSF = PSF[cut:-cut,:]\n elif cut<0:\n PSF = PSF[:,-cut:cut]\n PSF /= PSF.sum()\n if PSF.shape[0] != PSF.shape[1]:\n raise ValueError(\"PSF shape is not a square.\")\n data_process_i.PSF_list = [PSF]\n data_process_list.append(data_process_i)\n zp_list.append(zp)\n\n#%%\nfor k in run_list: #['G', 'R', 'I', 'Z', 'Y']\n QSO_img = data_process_list[k].target_stamp\n neighborhood_size, threshold = 4, 4\n x, y = find_loc_max(QSO_img, neighborhood_size = neighborhood_size, threshold = threshold)\n arr_x, arr_y = np.asarray(x, dtype=float), np.asarray(y, dtype=float)\n center = len(QSO_img)/2\n bool_x, bool_y = (arr_x>(center-18))*(arr_x<(center+18)), (arr_y>(center-18))*(arr_y<(center+18))\n arr_x = arr_x[bool_x*bool_y]\n arr_y = arr_y[bool_x*bool_y]\n #Remove arr_x's elememt if the corresponding pixel is too faint:\n arr_bool = np.array([True] * len(arr_x), dtype=bool)\n for i in range(len(arr_x)):\n if QSO_img[int(arr_y[i]), int(arr_x[i])] < 3.0 :\n arr_bool[i] = False\n arr_x = arr_x[arr_bool]\n \n qsoid = filename_list[k].split('.fits')[0]\n claim = ''\n if len(arr_x)>=2:\n if os.path.exists('fit_result_detect/{0}/'.format(qsoid))==False:\n os.mkdir('fit_result_detect/{0}/'.format(qsoid))\n claim = claim+\"\\nThis {0} is likely to be a {1} system (based on multi-peaks)!!!\".format(filename_list[k], 'BH'*len(arr_x))\n plt.imshow(QSO_img, origin='lower', norm=LogNorm())\n for i in range(len(arr_x)):\n plt.text(arr_x[i], arr_y[i],'BH{0}'.format(i))\n plt.plot(arr_x[i], arr_y[i],'ro')\n plt.savefig('fit_result_detect/{0}/proof-BHBH.pdf'.format(qsoid))\n if show_plot == 1:\n plt.show()\n else:\n plt.close()\n\n #Fit central as twoD Gaussian Anyway. \n twoD_Gau_p_PSF = fit_data_twoD_Gaussian(data_process_list[k].PSF_list[0])\n frz = int(center/2)\n twoD_Gau_p_data = fit_data_twoD_Gaussian(QSO_img[frz:-frz,frz:-frz])\n q_PSF = twoD_Gau_p_PSF[3]/twoD_Gau_p_PSF[4]\n q_PSF = min(q_PSF, 1/q_PSF)\n q_data = twoD_Gau_p_data[3]/twoD_Gau_p_data[4]\n q_data = min(q_data, 1/q_data)\n if abs((q_data-q_PSF)/q_PSF) > 0.15 : #!!! Set the level as 15% mismatch to PSF\n if os.path.exists('fit_result_detect/{0}/'.format(qsoid))==False:\n os.mkdir('fit_result_detect/{0}/'.format(qsoid))\n claim = claim+\"\\nThis {0} is also likely to have closed dual AGN pair (based on FWHM)!!!\".format(filename_list[k])\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12.5, 10))\n ax1.imshow(QSO_img[frz:-frz,frz:-frz], origin='lower', cmap='gist_heat', norm=LogNorm(), vmin=1.e-4 , vmax = np.max(QSO_img) )\n ax1.set_title('Data')\n QSO_2D_fitted = twoD_Gaussian(len(QSO_img[frz:-frz,frz:-frz]), *twoD_Gau_p_data)\n ax2.imshow(QSO_2D_fitted.reshape(len(QSO_img[frz:-frz,frz:-frz]), len(QSO_img[frz:-frz,frz:-frz])), origin='lower', cmap='gist_heat', norm=LogNorm(), vmin=1.e-4 , vmax = np.max(QSO_img) )\n ax2.set_title('fitted Gaussian Image')\n \n ax3.imshow(data_process_list[k].PSF_list[0], origin='lower', cmap='gist_heat', norm=LogNorm())\n ax3.set_title('PSF image')\n plt.savefig('fit_result_detect/{0}/proof-2close.pdf'.format(qsoid))\n if show_plot == 1:\n plt.show()\n else:\n plt.close()\n pyfits.PrimaryHDU(data_process_list[k].PSF_list[0]).writeto('fit_result_detect/{0}/{1}_PSF.fits'.format(qsoid,filename_list[k]),overwrite=True)\n\n if os.path.exists('fit_result_detect/{0}/'.format(qsoid)) == True and fit_data == True:\n print(\"Fiting the: \"+ filename_list[k])\n print(claim)\n print(\"Comparing the fitting Chisq:\")\n write_result = open('fit_result_detect/{0}/fit_result.txt'.format(qsoid),'w') \n write_result.write(\"#The fitting information:\\n\")\n #==============================================================================\n # fitting the QSO as a BH + Sersic \n #==============================================================================\n for ft in range(1): #The fitting rounds for each band\n print(\"fitting the QSO as one BH + Sersic \")\n fit_time = ft\n tag = 'fit_result_detect/{0}/fit_image0_PS+Sersic_fittime-{1}'.format(qsoid,fit_time+1)\n _fit_sepc = FittingSpeficy(data_process_list[k])\n _fit_sepc.prepare_fitting_seq(point_source_num = 1)\n _fit_sepc.build_fitting_seq()\n _fit_run = FittingProcess(_fit_sepc, savename = tag)\n _fit_run.run(algorithm_list = ['PSO'], setting_list= [None]) \n _fit_run.translate_result()\n _fit_run.plot_final_qso_fit(target_ID = qsoid, save_plot = True, show_plot = show_plot)\n source_result_0, ps_result_0 = _fit_run.final_result_galaxy, _fit_run.final_result_ps\n host_mag, AGN_mag = source_result_0[0]['magnitude'], ps_result_0[0]['magnitude']\n c_miss = np.sqrt((source_result_0[0]['center_x']-ps_result_0[0]['ra_image'])**2+(source_result_0[0]['center_y']-ps_result_0[0]['dec_image'])**2)\n reduced_Chisq_0 = _fit_run.reduced_Chisq\n write_result.write(\"1. Fitting as a regular QSO,i.e. one PS + Sersic:\\n\")\n write_result.write(\"Reduced Chisq: \"+repr(round(reduced_Chisq_0,3)))\n write_result.write(\"\\nHost mag: \"+repr(round(host_mag,3)))\n write_result.write(\"\\nAGN mag: \"+repr(round(AGN_mag,3)))\n write_result.write(\"\\nPS Sersic center offset (arcsec): \"+repr(round(float(c_miss),3)) + \"; \")\n write_result.write(\"\\n=======================================================\\n\")\n tag_name = tag + \"_qso_final_plot\"\n print(call(\"mv {0} {1}\".format(tag_name+'.pdf', tag+\"_chisq_\"+repr(round(reduced_Chisq_0,1)))+'.pdf', shell=True))\n #==============================================================================\n # fitting the QSO as a BHBH \n #==============================================================================\n for ft in range(1):\n print(\"fitting the QSO as {0} point sources\".format(len(arr_x)))\n num_BHBH = max(len(arr_x), 2)\n fit_time = ft #len(glob.glob(\"fit_result_detect/{0}/fit_image_*_SB_profile_annuli*.pdf\".format(file_name_seq[k])))\n tag = 'fit_result_detect/{0}/fit_image1_PSPS_fittime-{1}'.format(qsoid,fit_time+1)\n _fit_sepc = FittingSpeficy(data_process_list[k])\n del _fit_sepc.apertures[0]\n _fit_sepc.prepare_fitting_seq(point_source_num = num_BHBH, neighborhood_size = neighborhood_size, threshold = threshold)\n _fit_sepc.build_fitting_seq()\n _fit_run = FittingProcess(_fit_sepc, savename = tag)\n _fit_run.run(algorithm_list = ['PSO'], setting_list= [None]) \n _fit_run.translate_result()\n _fit_run.plot_final_qso_fit(target_ID = qsoid, save_plot = True, show_plot = show_plot) \n source_result_1, ps_result_1 = _fit_run.final_result_galaxy, _fit_run.final_result_ps \n AGN_mags = [ps_result_1[i]['magnitude'] for i in range(len(ps_result_1))]\n if len(ps_result_1) == 2:\n c_miss = np.sqrt((ps_result_1[0]['ra_image']-ps_result_1[1]['ra_image'])**2+(ps_result_1[0]['dec_image']-ps_result_1[1]['dec_image'])**2)\n elif len(ps_result_1) > 2:\n c_miss = [np.sqrt((ps_result_1[0]['ra_image']-ps_result_1[1]['ra_image'])**2+(ps_result_1[0]['dec_image']-ps_result_1[1]['dec_image'])**2)]\n c_miss.append(np.sqrt((ps_result_1[1]['ra_image']-ps_result_1[2]['ra_image'])**2+(ps_result_1[1]['dec_image']-ps_result_1[2]['dec_image'])**2))\n c_miss.append(np.sqrt((ps_result_1[2]['ra_image']-ps_result_1[0]['ra_image'])**2+(ps_result_1[2]['dec_image']-ps_result_1[0]['dec_image'])**2))\n c_miss = np.average(c_miss)\n reduced_Chisq_1 = _fit_run.reduced_Chisq\n write_result.write(\"2. Fitting as {0}PS:\\n\".format(len(ps_result_1)))\n write_result.write(\"Reduced Chisq: \"+repr(round(reduced_Chisq_1,3)))\n write_result.write(\"\\nAGN mag: \")\n for i in range(len(ps_result_1)):\n write_result.write(repr(round(AGN_mags[i],3))+' ')\n write_result.write(\"\\n\")\n for i in range(len(ps_result_1)):\n write_result.write(\"AGN{0} position: \".format(i))\n write_result.write(\"x: \"+repr(round(ps_result_1[i]['ra_image'][0],3))+' y: '+repr(round(ps_result_1[i]['dec_image'][0],3))+ \"; \") \n write_result.write(\"\\nPS PS center offset (arcsec): \"+repr(round(float(c_miss),3)))\n write_result.write(\"\\n=======================================================\\n\")\n tag_name = tag + \"_qso_final_plot\" \n print(call(\"mv {0} {1}\".format(tag_name+'.pdf', tag+\"_chisq_\"+repr(round(reduced_Chisq_1,1)))+'.pdf', shell=True))\n #==============================================================================\n # fitting the QSO as a BHBH + Sersic \n #==============================================================================\n for ft in range(1):\n print(\"fitting the QSO as {0} point sources + Sersic\".format(len(arr_x)))\n num_BHBH = max(len(arr_x), 2)\n fit_time = ft\n tag = 'fit_result_detect/{0}/fit_image2_PSPS+Sersic_fittime-{1}'.format(qsoid,fit_time+1)\n _fit_sepc = FittingSpeficy(data_process_list[k])\n _fit_sepc.prepare_fitting_seq(point_source_num = num_BHBH, neighborhood_size = neighborhood_size, threshold = threshold)\n _fit_sepc.build_fitting_seq()\n _fit_run = FittingProcess(_fit_sepc, savename = tag)\n _fit_run.run(algorithm_list = ['PSO'], setting_list= [None]) \n _fit_run.translate_result()\n _fit_run.plot_final_qso_fit(target_ID = qsoid, save_plot = True, show_plot = show_plot) \n source_result_2, ps_result_2 = _fit_run.final_result_galaxy, _fit_run.final_result_ps \n host_mag = source_result_2[0]['magnitude']\n AGN_mags = [ps_result_2[i]['magnitude'] for i in range(len(ps_result_2))]\n if len(ps_result_2) == 2:\n c_miss = np.sqrt((ps_result_2[0]['ra_image']-ps_result_2[1]['ra_image'])**2+(ps_result_2[0]['dec_image']-ps_result_2[1]['dec_image'])**2)\n elif len(ps_result_2) > 2:\n c_miss = [np.sqrt((ps_result_2[0]['ra_image']-ps_result_2[1]['ra_image'])**2+(ps_result_2[0]['dec_image']-ps_result_2[1]['dec_image'])**2)]\n c_miss.append(np.sqrt((ps_result_2[1]['ra_image']-ps_result_2[2]['ra_image'])**2+(ps_result_2[1]['dec_image']-ps_result_2[2]['dec_image'])**2))\n c_miss.append(np.sqrt((ps_result_2[2]['ra_image']-ps_result_2[0]['ra_image'])**2+(ps_result_2[2]['dec_image']-ps_result_2[0]['dec_image'])**2))\n c_miss = np.average(c_miss)\n reduced_Chisq_2 = _fit_run.reduced_Chisq\n write_result.write(\"3. Fitting as {0}PS + Sersic:\\n\".format(len(ps_result_2)))\n write_result.write(\"Reduced Chisq: \"+repr(round(reduced_Chisq_2,3)))\n write_result.write(\"\\nHost mag: \"+repr(round(host_mag,3)))\n write_result.write(\"\\nAGN mag: \")\n for i in range(len(ps_result_2)):\n write_result.write(repr(round(AGN_mags[i],3))+' ')\n write_result.write(\"\\n\")\n for i in range(len(ps_result_2)):\n write_result.write(\"AGN{0} position: \".format(i))\n write_result.write(\"x: \"+repr(round(ps_result_2[i]['ra_image'][0],3))+' y: '+repr(round(ps_result_2[i]['dec_image'][0],3))+ \"; \")\n write_result.write(\"\\nPS PS center offset (arcsec): \"+repr(round(float(c_miss),3)))\n write_result.write(\"\\n=======================================================\\n\")\n tag_name = tag + \"_qso_final_plot\" \n image_host_2, image_ps_2 = _fit_run.image_host_list, _fit_run.image_ps_list\n objs_img = np.zeros_like(image_host_2[0])\n if len(image_host_2)>1:\n for i in range(1,len(image_host_2)):\n objs_img += image_host_2[i]\n fitsFile = pyfits.open(image_folder+filename_list[k])\n file_header = copy.deepcopy(fitsFile[1].header)\n qso_center = data_process_list[k].target_pos\n file_header['CRPIX1'] = file_header['CRPIX1']-qso_center[0]+len(QSO_img)/2\n file_header['CRPIX2'] = file_header['CRPIX2']-qso_center[1]+len(QSO_img)/2\n pyfits.PrimaryHDU(QSO_img-image_ps_2-objs_img,header=file_header).writeto('fit_result_detect/{0}/data-BHBH(host).fits'.format(qsoid),overwrite=True)\n print(call(\"mv {0} {1}\".format(tag_name+'.pdf', tag+\"_chisq_\"+repr(round(reduced_Chisq_2,1)))+'.pdf', shell=True) )\n \n #Plot the PSPS potision in the step3 and save plot\n plt.imshow(QSO_img, origin='lower', norm=LogNorm())\n for i in range(len(ps_result_2)):\n x, y = -ps_result_2[i]['ra_image'][0]/data_process_list[k].deltaPix + len(QSO_img)/2, ps_result_2[i]['dec_image'][0]/data_process_list[k].deltaPix+len(QSO_img)/2\n plt.text(x, y,' PS{0}'.format(i))\n plt.plot(x, y,'ro')\n plt.savefig('fit_result_detect/{0}/step3_fitted_PSPSpos.pdf'.format(qsoid))\n if show_plot == 1:\n plt.show()\n else:\n plt.close()\n \n write_result.close() \n#os.system('say \"your program has finished\"')\nprint(\"Program has finished\")\n#\n","sub_path":"projects/Search_Duals/find_binaries_v5.py","file_name":"find_binaries_v5.py","file_ext":"py","file_size_in_byte":17591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"379656815","text":"\"\"\"Definition of the ArticleFolder content type\n\"\"\"\n\nfrom zope.interface import implements, directlyProvides\nfrom zope.component import adapter, getUtility, getMultiAdapter\n\nfrom Acquisition import aq_inner, aq_parent\n\n# Import conditionally, so we don't introduce a hard depdendency\ntry:\n from plone.i18n.normalizer.interfaces import IUserPreferredURLNormalizer\n from plone.i18n.normalizer.interfaces import IURLNormalizer\n URL_NORMALIZER = True\nexcept ImportError:\n URL_NORMALIZER = False\n\nfrom Products.Archetypes import atapi\nfrom Products.ATContentTypes.content import folder\nfrom Products.ATContentTypes.content import schemata\n\nfrom weka.content import contentMessageFactory as _\nfrom weka.content.interfaces import IArticleFolder\nfrom weka.content.config import PROJECTNAME\n\nArticleFolderSchema = folder.ATFolderSchema.copy() + atapi.Schema((\n\n # -*- Archetypes field definitions here ... -*-\n \n atapi.StringField(\n name='seoTitle',\n searchable=True,\n widget=atapi.StringWidget(\n label=_(u\"SEO Title\"),\n description=_(u\"Enter SEO title. This will be used for title-to-id generation an will appear as part of the url.\"),\n ),\n required=True,\n ),\n\n))\n\n# Set storage on fields copied from ATFolderSchema, making sure\n# they work well with the python bridge properties.\n\nArticleFolderSchema['title'].storage = atapi.AnnotationStorage()\nArticleFolderSchema['description'].storage = atapi.AnnotationStorage()\nArticleFolderSchema['seoTitle'].storage = atapi.AnnotationStorage()\n\nschemata.finalizeATCTSchema(ArticleFolderSchema, folderish=True, moveDiscussion=False)\n\nclass ArticleFolder(folder.ATFolder):\n \"\"\"A folder containing articles\"\"\"\n implements(IArticleFolder)\n\n portal_type = \"ArticleFolder\"\n _at_rename_after_creation = True\n schema = ArticleFolderSchema\n \n # In order to make editing this folder more staight forward\n # we move the seoTitle field closer to the actual title [Chris]\n # code section after schema\n schema.moveField('seoTitle', after='title')\n # end code section after schema\n\n title = atapi.ATFieldProperty('title')\n seo_title = atapi.ATFieldProperty('seoTitle')\n description = atapi.ATFieldProperty('description')\n text = atapi.ATFieldProperty('text')\n \n def generateNewId(self):\n \"\"\"Suggest an id for this object based on seo title.\n This id is used when automatically renaming an object after creation.\n \"\"\"\n title = self.seo_title\n # Can't work w/o a title\n if not title:\n return None\n\n # Don't do anything without the plone.i18n package\n if not URL_NORMALIZER:\n return None\n\n if not isinstance(title, unicode):\n charset = 'utf-8'\n title = unicode(title, charset)\n\n request = getattr(self, 'REQUEST', None)\n if request is not None:\n return IUserPreferredURLNormalizer(request).normalize(title)\n\n return queryUtility(IURLNormalizer).normalize(title)\n\natapi.registerType(ArticleFolder, PROJECTNAME)\n","sub_path":"src/weka.content/weka/content/content/articlefolder.py","file_name":"articlefolder.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"389522372","text":"\"\"\"\nVAE for MNIST\n\"\"\"\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# flag settings\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('model_dir', './model', \"\"\"Path to the model directory.\"\"\")\ntf.app.flags.DEFINE_string('data_dir', './data', \"\"\"Path to the data directory.\"\"\")\ntf.app.flags.DEFINE_integer('max_iter', 10000, \"\"\"Maximum number of iteration\"\"\")\ntf.app.flags.DEFINE_integer('batch_size', 64, \"\"\"Mini-Batch Size\"\"\")\n\ndef cnn_model_fn(features, labels, mode):\n\n with tf.name_scope(\"ENCODE\"):\n # dense layer #1\n dense = tf.layers.dense(\n inputs=features['x'],\n units=400,\n activation=tf.nn.relu,\n name='DENSE-1'\n )\n\n # dense layer #2 (Mean)\n latent_mean = tf.layers.dense(\n inputs=dense,\n units=20,\n name='MEAN'\n )\n\n # dense layer #3 (LogVariance)\n # Instead of using var as latent variable since\n # it may leads to INF (Log(Var)) in initialization\n latent_logvar = tf.layers.dense(\n inputs=dense,\n units=20,\n name='LOGVAR'\n )\n\n with tf.name_scope(\"REPRAM\"):\n if mode == tf.estimator.ModeKeys.TRAIN:\n eps = tf.random_normal(tf.shape(latent_logvar))\n z = tf.add(latent_mean, tf.multiply(eps, tf.exp(latent_logvar)))\n else:\n z = latent_mean\n \n with tf.name_scope(\"DECODE\"):\n\n # dense layer #4\n dense = tf.layers.dense(\n inputs=z,\n units=400,\n activation=tf.nn.relu,\n name='DENSE-2'\n )\n\n recon_img = tf.layers.dense(\n inputs=dense,\n units=28 * 28,\n activation=tf.nn.sigmoid,\n name='RECON'\n )\n\n tf.summary.image('input', tf.reshape(features['x'], [-1, 28, 28, 1]), max_outputs=2)\n tf.summary.image('output', tf.reshape(recon_img, [-1, 28, 28, 1]), max_outputs=2)\n\n with tf.name_scope(\"LOSS\"):\n recon_err = tf.losses.log_loss(features['x'], recon_img, reduction=tf.losses.Reduction.SUM)\n regul = -0.5 * tf.reduce_sum(1 + latent_logvar - tf.square(latent_mean) - tf.exp(latent_logvar))\n loss = recon_err + regul\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss)\n \n elif mode == tf.estimator.ModeKeys.TRAIN:\n # adam optimizer performs well with drop-out layer\n optimizer = tf.train.AdamOptimizer(\n learning_rate=0.001\n )\n\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\ndef main(_):\n # create train input function\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)\n\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': mnist.train.images},\n batch_size=FLAGS.batch_size,\n num_epochs=None,\n shuffle=True\n )\n\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={'x': mnist.validation.images},\n batch_size=FLAGS.batch_size,\n num_epochs=None,\n shuffle=False\n )\n\n estimator = tf.estimator.Estimator(\n model_fn=cnn_model_fn,\n model_dir=FLAGS.model_dir\n )\n\n train_spec = tf.estimator.TrainSpec(\n input_fn=train_input_fn, max_steps=FLAGS.max_iter\n )\n\n eval_spec = tf.estimator.EvalSpec(\n input_fn=eval_input_fn,\n start_delay_secs=30,\n throttle_secs=20\n )\n\n tf.estimator.train_and_evaluate(\n estimator=estimator,\n train_spec=train_spec,\n eval_spec=eval_spec\n )\n\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main)\n","sub_path":"vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"643419130","text":"import unittest\n\nimport lss.wrappers.factory\n\n\nclass TestFactoryMethods(unittest.TestCase):\n\n def test_list_of_supported_packages(self):\n factory = lss.wrappers.factory.PackageManagersFactory()\n supported_managers = factory.list_supported_package_managers()\n self.assertEqual(supported_managers, ['dnf'])\n\n def test_factory_returns_dnf(self):\n factory = lss.wrappers.factory.PackageManagersFactory()\n package_manager = factory.get_package_manager('dnf')\n self.assertEqual(package_manager.__class__.__name__, 'Dnf')\n\n def test_factory_returns_none_for_non_existing_manager(self):\n factory = lss.wrappers.factory.PackageManagersFactory()\n package_manager = factory.get_package_manager('dummy input')\n self.assertEqual(package_manager, None)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/wrappers/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"151156968","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom torch import empty\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass DQN(nn.Module):\n \"\"\"Initialize a deep Q-learning network\n \n Hints:\n -----\n Original paper for DQN\n https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf\n \n This is just a hint. You can build your own structure.\n \"\"\"\n def __init__(self):\n \"\"\"\n You can add additional arguments as you need.\n In the constructor we instantiate modules and assign them as\n member variables.\n \"\"\"\n super(DQN, self).__init__()\n ###########################\n # YOUR IMPLEMENTATION HERE #\n # modified based on the official DQN guide\n # I heard that initializer matters. here is he initializer\n nn.init.kaiming_uniform_(empty(3, 5), a=0, mode='fan_in', nonlinearity='leaky_relu')\n\n # this structure is mentioned in this vanilla paper, quote as below:\n # The exact architecture, shown schematically in Fig. 1, is as follows.\n # The input tothe neural network consists of an 84x84x4 image produced by the preprocess-ing mapw.\n # The first hidden layer convolves 32 filters of 8x8 \n # with stride 4 with the input image and applies a rectifier nonlinearity[31,32].\n self.conv1 = nn.Conv2d(4, 32, kernel_size=8, stride=4)\n self.bn1 = nn.BatchNorm2d(32)\n # The second hidden layer con-volves 64 filters of 434 with stride 2, \n # again followed by a rectifier nonlinearity.\n self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n self.bn2 = nn.BatchNorm2d(64)\n # This is followed by a third convolutional layer that convolves 64 filters of 3x3 \n # with stride 1 followed by a rectifier. \n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n self.bn3 = nn.BatchNorm2d(64)\n # The final hidden layer is fully-connected and con-sists of 512 rectifier units. \n ## here it is (9-3+1)^2*64=7*7*64=3136\n self.fc4 = nn.Linear(3136, 512)\n # The output layer is a fully-connected linear layer with a single output for each valid action. \n # The number of valid actions varied between 4 and 18 on the games we considered.\n self.fc5 = nn.Linear(512, 4)\n \n def forward(self, x):\n \"\"\"\n In the forward function we accept a Tensor of input data and we must return\n a Tensor of output data. We can use Modules defined in the constructor as\n well as arbitrary operators on Tensors.\n \"\"\"\n ###########################\n # YOUR IMPLEMENTATION HERE #\n # modified based on the official DQN guide\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = F.relu(self.fc4(x.view(x.size(0), -1)))\n x = self.fc5(x)\n ###########################\n return x\n","sub_path":"project3/dqn_model.py","file_name":"dqn_model.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"563040988","text":"import flask\nfrom flask import request, jsonify, render_template\nimport feedparser\n\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/api', methods=['GET'])\ndef rss():\n\tif 'site' in request.args:\n\t\twebsite = request.args['site']\n\t\tprint(website)\n\telse:\n\t\treturn 'Error: No site provided. Please enter a site.'\n\n\tfeed = feedparser.parse(website)\n\treturn feed\n\n\n@app.route('/')\ndef index():\n\timport database\n\tfeed = database.returndb()\n\treturn render_template('index.html', feed = feed)\n\t'''\n\timport database\n\treturn database.returndb()\n\t'''\napp.run()\n\n\n'''\nimport feedparser\n\n\nNewsFeed = feedparser.parse(\"http://rss.cnn.com/rss/cnn_topstories.rss\")\n\nfor entry in NewsFeed.entries:\n\tprint(entry.link) \n\nprint(NewsFeed.entries[1].keys())\n'''","sub_path":"ContentAggregator/aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"154800857","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nimport seaborn as sns\nimport pandas as pd\n\nfrom sepia.DataContainer import DataContainer\n\nsns.set()\n\n\nclass SepiaData(object):\n \"\"\"\n Data object used for SepiaModel, containing potentially both `sim_data` and `obs_data` objects of type `sepia.DataContainer`.\n\n :var numpy.ndarray/NoneType x_sim: controllable inputs/experimental conditions, shape (n, p) or None\n :var numpy.ndarray/NoneType t_sim: non-controllable inputs, shape (n, q) or None\n :var numpy.ndarray y_sim: simulation outputs, shape (n, ell_sim)\n :var numpy.ndarray/NoneType y_ind_sim: indices for multivariate y, shape (ell_sim, ), required if ell_sim > 1\n :var numpy.ndarray/NoneType x_obs: controllable inputs for observation data, shape (m, p) or None\n :var numpy.ndarray/list/NoneType y_obs: observed outputs, shape (m, ell_obs), or list length m of 1D arrays (for ragged y_ind_obs), or None\n :var numpy.ndarray/list/NoneType y_ind_obs: vector of indices for multivariate y, shape (l_obs, ), or list length m of 1D arrays (for ragged y_ind_obs), or None\n :var bool sim_only: is it simulation-only data?\n :var bool scalar_out: is the output y scalar?\n :var bool ragged_obs: do the observations have ragged (non-shared) multivariate indices across instances?\n :var numpy.ndarray/list x_cat_ind: indices of x that are categorical (0 = not cat, int > 0 = how many categories)\n :var numpy.ndarray/list t_cat_ind: indices of t that are categorical (0 = not cat, int > 0 = how many categories)\n :var numpy.ndarray/list/NoneType xt_sim_sep: for separable design, list of kronecker composable matrices\n :var bool dummy_x: is there a dummy x? (used in problems where no x is provided)\n :var bool sep_design: is there a Kronecker separable design?\n \"\"\"\n\n def __init__(self, x_sim=None, t_sim=None, y_sim=None, y_ind_sim=None, x_obs=None, y_obs=None, Sigy=None, y_ind_obs=None,\n x_cat_ind=None, t_cat_ind=None, xt_sim_sep=None):\n \"\"\"\n Create SepiaData object. Many arguments are optional depending on the type of model.\n Users should instantiate with all data needed for the desired model. See documentation pages for more detail.\n\n :param numpy.ndarray/NoneType x_sim: controllable inputs/experimental conditions, shape (n, p), or None\n :param numpy.ndarray/NoneType t_sim: non-controllable inputs, shape (n, q), or None\n :param numpy.ndarray y_sim: simulation outputs, shape (n, ell_sim)\n :param numpy.ndarray/NoneType y_ind_sim: indices for multivariate y, shape (ell_sim, ), required if ell_sim > 1\n :param numpy.ndarray/NoneType x_obs: controllable inputs for observation data, shape (m, p) or None\n :param numpy.ndarray/list/NoneType y_obs: observed outputs, shape (m, ell_obs), or list length m of 1D arrays (for ragged y_ind_obs), or None\n :param numpy.ndarray/list/NoneType y_ind_obs: vector of indices for multivariate y, shape (l_obs, ), or list length m of 1D arrays (for ragged y_ind_obs), or None\n :param numpy.ndarray/NoneType Sigy: optional observation covariance matrix (default is identity)\n :param numpy.ndarray/list/NoneType x_cat_ind: indices of x that are categorical (0 = not cat, int > 0 = how many categories), or None\n :param numpy.ndarray/list/NoneType t_cat_ind: indices of t that are categorical (0 = not cat, int > 0 = how many categories), or None\n :param numpy.ndarray/list/NoneType xt_sim_sep: for separable design, list of kronecker composable matrices; it is a list of 2 or\n more design components that, through Kronecker expansion, produce the full input space (`x` and `t`) for the simulations.\n :raises: TypeError if shapes not conformal or required data missing.\n\n .. note: At least one of x_sim and t_sim must be provided, and y_sim must always be provided.\n\n \"\"\"\n self.sep_design = xt_sim_sep is not None\n self.dummy_x = (not self.sep_design and x_sim is None) or \\\n (self.sep_design and y_obs is not None and x_obs is None)\n self.sim_only = y_obs is None\n\n # Initial Checks\n if y_sim is None:\n raise TypeError('y_sim is required to set up model.')\n if not self.sep_design:\n if y_obs is not None and ((x_obs is None and x_sim is not None) or (x_obs is not None and x_sim is None)):\n raise ValueError('x_sim and x_obs must both be either not None or None (which is the no-x model case)')\n if x_sim is None and t_sim is None:\n raise TypeError('At least one of x_sim or t_sim is required to set up model.')\n\n if self.dummy_x:\n if y_obs is not None:\n x_obs = 0.5 * np.ones((len(y_obs), 1)) # sets up dummy x_obs\n if not self.sep_design: # set up dummy_x in x_sim, or delays until sep/kron processing just below\n x_sim = 0.5 * np.ones((t_sim.shape[0], 1))\n\n if self.sep_design:\n if x_sim is not None or t_sim is not None:\n raise ValueError('Cannot specify x_sim or t_sim if separable design is supplied')\n if self.dummy_x: # augment the composed design with dummy_x column\n xt_sim_sep.insert(0,np.array([0.5]).reshape(1,1))\n # Expand out the design from the components by kronecker product into x_sim and t_sim (as needed)\n temp_des=xt_sim_sep[-1]\n for ndes in reversed(xt_sim_sep[:-1]):\n r1,r2=np.meshgrid(np.arange(ndes.shape[0]),np.arange(temp_des.shape[0]))\n temp_des=np.hstack((ndes[r1.reshape(-1,order='F'),:],temp_des[r2.reshape(-1,order='F'),:]))\n # separate the composed design into x and t components\n if self.sim_only: # Emulator-only model\n x_sim=temp_des # the design can only be attributed to x's\n else: # extract the shape\n p=x_obs.shape[1]\n x_sim=temp_des[:,:p]\n t_sim=temp_des[:,p:]\n\n # At this point, dummy_x should be place if needed\n # if it's a separable design, that's composed and split into x_sim and t_sim appropriately\n # the separable design components will be used in logLik and predict, nobody else needs to worry about it now\n # (except carrying it along in SetupModel\n\n self.sim_data = DataContainer(x=x_sim, y=y_sim, t=t_sim, y_ind=y_ind_sim, xt_sep_design=xt_sim_sep)\n\n self.scalar_out = (self.sim_data.y.shape[1] == 1)\n\n if self.sim_only:\n self.obs_data = None\n else:\n if x_sim.shape[1] != x_obs.shape[1]:\n raise TypeError('x_sim and x_obs do not contain the same number of variables/columns.')\n self.obs_data = DataContainer(x=x_obs, y=y_obs, y_ind=y_ind_obs, Sigy=Sigy)\n self.sim_only = False\n self.ragged_obs = isinstance(y_obs, list)\n\n # Set up Sigy\n if not self.sim_only:\n if self.obs_data.Sigy is None:\n if self.ragged_obs:\n ell_obs = [self.obs_data.y[i].shape for i in range(len(self.obs_data.y))]\n self.obs_data.Sigy = [np.atleast_2d(np.diag(np.ones(ell_obs[i]))) for i in range(len(ell_obs))]\n else:\n ell_obs = self.obs_data.y.shape[1]\n self.obs_data.Sigy = np.diag(np.ones(ell_obs))\n\n # Process categorical indices\n if x_cat_ind is not None:\n if len(x_cat_ind) != x_sim.shape[1]:\n raise TypeError('x_cat_ind length should equal p.')\n for i, ci in enumerate(x_cat_ind):\n if ci > 0 and ci != np.max(x_sim[:, i]):\n raise TypeError('Nonzero values of x_cat_ind should equal number of categories.')\n else:\n x_cat_ind = np.zeros(x_sim.shape[1])\n self.x_cat_ind = x_cat_ind\n if t_cat_ind is not None:\n if t_sim is None:\n raise TypeError('Cannot use t_cat_ind if t_sim is not provided.')\n if len(t_cat_ind) != t_sim.shape[1]:\n raise TypeError('t_cat_ind length should equal p.')\n for i, ci in enumerate(t_cat_ind):\n if ci > 0 and ci != np.max(t_sim[:, i]):\n raise TypeError('Nonzero values of t_cat_ind should equal number of categories.')\n else:\n if t_sim is None:\n t_cat_ind = []\n else:\n t_cat_ind = np.zeros(t_sim.shape[1])\n self.t_cat_ind = t_cat_ind\n\n # Prints pretty representation of the SepiaData object for users to check their setup.\n def __str__(self):\n res = ''\n res += 'This SepiaData instance implies the following:\\n'\n if self.sim_only:\n res += 'This is a simulator (eta)-only model, y dimension %d\\n' % self.sim_data.y.shape[1]\n res += 'm = %5d (number of simulated data)\\n' % self.sim_data.x.shape[0]\n res += 'p = %5d (number of inputs)\\n' % self.sim_data.x.shape[1]\n if self.sim_data.t is not None:\n res += 'q = %5d (number of additional simulation inputs)\\n' % self.sim_data.t.shape[1]\n if self.scalar_out:\n res += 'pu = 1 (univariate response dimension)\\n'\n elif self.sim_data.K is not None:\n res += 'pu = %5d (transformed response dimension)\\n' % self.sim_data.K.shape[0]\n else:\n res += 'pu NOT SET (transformed response dimension); call method create_K_basis \\n'\n else:\n if self.ragged_obs:\n res += 'This is a simulator and obs model, sim y dimension %d, obs y dimension ragged\\n' % self.sim_data.y.shape[1]\n else:\n res += 'This is a simulator and obs model, sim y dimension %d, obs y dimension %d\\n' % (self.sim_data.y.shape[1], self.obs_data.y.shape[1])\n res += 'n = %5d (number of observed data)\\n' % self.obs_data.x.shape[0]\n res += 'm = %5d (number of simulated data)\\n' % self.sim_data.x.shape[0]\n res += 'p = %5d (number of inputs)\\n' % self.sim_data.x.shape[1]\n res += 'q = %5d (number of additional simulation inputs to calibrate)\\n' % self.sim_data.t.shape[1]\n if self.scalar_out:\n res += 'pu = 1 (univariate response dimension)'\n else:\n if self.sim_data.K is not None and self.obs_data.K is not None:\n res += 'pu = %5d (transformed response dimension)\\n' % self.sim_data.K.shape[0]\n else:\n res += 'pu NOT SET (transformed response dimension); call method create_K_basis\\n'\n if self.obs_data.D is not None:\n if self.ragged_obs:\n res += 'pv = %5d (transformed discrepancy dimension)\\n' % self.obs_data.D[0].shape[0]\n else:\n res += 'pv = %5d (transformed discrepancy dimension)\\n' % self.obs_data.D.shape[0]\n else:\n res += 'pv not set, indicating (unusual case of) no discrepancy; call method create_D_basis to fix \\n'\n # Info on separable design, if that's in place.\n if self.sep_design:\n res += 'This is a separable simulation design with components: \\n'\n for ii in range(len(self.sim_data.xt_sep_design)):\n res += ' x component %d has m = %5d (simulated data design size) \\n' % (\n ii, self.sim_data.xt_sep_design[ii].shape[0])\n res += ' x component %d has p = %5d (number of inputs) \\n' % (\n ii, self.sim_data.xt_sep_design[ii].shape[1])\n # Print info on categorical variables\n if np.any(np.array(self.x_cat_ind) > 0):\n res += 'Categorical x input variables:\\n'\n for i, ci in enumerate(self.x_cat_ind):\n if ci > 0:\n res += 'x index %d with %d categories\\n' % (i, ci)\n if np.any(np.array(self.t_cat_ind) > 0):\n res += 'Categorical t input variables:\\n'\n for i, ci in enumerate(self.t_cat_ind):\n if ci > 0:\n res += 't index %d with %d categories\\n' % (i, ci)\n return res\n\n def transform_xt(self, x_notrans=None, t_notrans=None, x=None, t=None):\n \"\"\"\n Transforms sim_data x and t and obs_data x to lie in [0, 1], columnwise, or applies\n same transformation to new x and t.\n\n :param list/NoneType x_notrans: column indices of x that should not be transformed or None\n :param list/NoneType t_notrans: column indices of t that should not be transformed or None\n :param numpy.ndarray/NoneType x: new x values to transform to [0, 1] using same rules as original x data or None\n :param numpy.ndarray/NoneType t: new t values to transform to [0, 1] using same rules as original t data or None\n :returns: tuple of x_trans, t_trans if x and t arguments provided; otherwise returns (None, None)\n\n .. note:: A column is not transformed if min/max of the column values are equal, if the column is categorical,\n or if the user specifies no transformation using x_notrans or t_notrans arguments.\n\n \"\"\"\n\n x_trans, t_trans = None, None\n if x_notrans is None:\n x_notrans = []\n if t_notrans is None:\n t_notrans = []\n if x_notrans is True:\n x_notrans = np.arange(self.sim_data.x.shape[1])\n\n # making notes to transform the separable design elements, if needed\n transform_sep = False\n\n # Transform x to unit hypercube\n # if not computed, compute orig x min and orig x max, accounting for notrans_x, all equal x, and categorical x\n if self.sim_data.orig_x_min is None or self.sim_data.orig_x_max is None or self.sim_data.x_trans is None:\n if self.sep_design:\n transform_sep=True\n nx = self.sim_data.x.shape[1]\n orig_x_min = np.min(self.sim_data.x, 0, keepdims=True)\n orig_x_max = np.max(self.sim_data.x, 0, keepdims=True)\n # If any xmin/xmax are equal, don't transform\n xmm = orig_x_max - orig_x_min\n x_notrans = list(set(x_notrans) | set([i for i in range(nx) if xmm[:, i] == 0]))\n # If there are cat inds, do not transform\n if self.x_cat_ind is not None:\n x_notrans = list(set(x_notrans) | set([i for i in range(nx) if self.x_cat_ind[i] > 0]))\n orig_x_min[:, x_notrans] = 0\n orig_x_max[:, x_notrans] = 1\n self.sim_data.x_trans = (self.sim_data.x - orig_x_min) / (orig_x_max - orig_x_min)\n self.sim_data.orig_x_min = orig_x_min\n self.sim_data.orig_x_max = orig_x_max\n if not self.sim_only:\n self.obs_data.orig_x_min = orig_x_min\n self.obs_data.orig_x_max = orig_x_max\n self.obs_data.x_trans = (self.obs_data.x - orig_x_min) / (orig_x_max - orig_x_min)\n # If a new x was passed in, transform it\n if x is not None:\n x_trans = (x - self.sim_data.orig_x_min) / (self.sim_data.orig_x_max - self.sim_data.orig_x_min)\n # Transform t to unit hypercube\n if self.sim_data.t is not None:\n if t_notrans is True:\n t_notrans = np.arange(self.sim_data.t.shape[1])\n # if not computed, compute orig t min and orig t max, accounting for notrans_t, all equal t, and categorical t\n if self.sim_data.orig_t_min is None or self.sim_data.orig_t_max is None or self.sim_data.t_trans is None:\n nt = self.sim_data.t.shape[1]\n orig_t_min = np.min(self.sim_data.t, 0, keepdims=True)\n orig_t_max = np.max(self.sim_data.t, 0, keepdims=True)\n # If any tmin/tmax are equal, don't transform\n tmm = orig_t_max - orig_t_min\n t_notrans = list(set(t_notrans) | set([i for i in range(nt) if tmm[:, i] == 0]))\n # If there are cat inds, do not transform\n if self.t_cat_ind is not None:\n t_notrans = list(set(t_notrans) | set([i for i in range(nt) if self.t_cat_ind[i] > 0]))\n orig_t_min[:, t_notrans] = 0\n orig_t_max[:, t_notrans] = 1\n self.sim_data.t_trans = (self.sim_data.t - orig_t_min) / (orig_t_max - orig_t_min)\n self.sim_data.orig_t_min = orig_t_min\n self.sim_data.orig_t_max = orig_t_max\n if not self.sim_only:\n self.obs_data.orig_t_min = orig_t_min\n self.obs_data.orig_t_max = orig_t_max\n # If a new t was passed in, transform it\n if t is not None:\n t_trans = (t - self.sim_data.orig_t_min) / (self.sim_data.orig_t_max - self.sim_data.orig_t_min)\n\n if transform_sep:\n self.sim_data.xt_sep_design_orig = self.sim_data.xt_sep_design.copy()\n if self.sim_data.orig_t_min is not None:\n sep_min = np.hstack((self.sim_data.orig_x_min, self.sim_data.orig_t_min))\n sep_max = np.hstack((self.sim_data.orig_x_max, self.sim_data.orig_t_max))\n else:\n sep_min = self.sim_data.orig_x_min\n sep_max = self.sim_data.orig_x_max\n tind=0\n for ii,dele in enumerate(self.sim_data.xt_sep_design):\n dlen = dele.shape[1]\n self.sim_data.xt_sep_design[ii] = \\\n (dele - sep_min[0,tind:tind+dlen]) / (sep_max[0,tind:tind+dlen] - sep_min[0,tind:tind+dlen])\n tind = tind + dlen\n\n return x_trans, t_trans\n\n def standardize_y(self, center=True, scale='scalar', y_mean=None, y_sd=None):\n \"\"\"\n Standardizes both `sim_data` and `obs_data` outputs y based on sim_data.y mean/SD.\n\n :param bool center: subtract simulation mean (across observations)?\n :param string/bool scale: how to rescale: 'scalar': single SD over all demeaned data, 'columnwise': SD for each column of demeaned data, False: no rescaling\n :param numpy.ndarray/float/NoneType y_mean: y_mean for sim; optional, should match length of y_ind_sim or be scalar\n :param numpy.ndarray/float/NoneType y_sd: y_sd for sim; optional, should match length of y_ind_sim or be scalar\n \"\"\"\n if center:\n if y_mean is None:\n self.sim_data.orig_y_mean = np.mean(self.sim_data.y, 0)\n else:\n self.sim_data.orig_y_mean = y_mean\n else:\n self.sim_data.orig_y_mean = 0.\n y_dm = self.sim_data.y - self.sim_data.orig_y_mean\n if y_sd is not None:\n self.sim_data.orig_y_sd = y_sd\n else:\n if scale == 'scalar':\n self.sim_data.orig_y_sd = np.std(y_dm, ddof=1)\n elif scale == 'columnwise':\n self.sim_data.orig_y_sd = np.std(y_dm, ddof=1, axis=0)\n elif scale is False:\n self.sim_data.orig_y_sd = 1.\n else:\n raise ValueError('standardize_y: invalid value for scale parameter, allowed are {''scalar'',''columnwise'',False}')\n self.sim_data.y_std = y_dm/self.sim_data.orig_y_sd\n if not self.sim_only:\n if not self.scalar_out and not np.isscalar(self.sim_data.orig_y_mean):\n if self.ragged_obs:\n orig_y_mean = []\n for i in range(len(self.obs_data.y)):\n orig_y_mean.append(np.interp(self.obs_data.y_ind[i], self.sim_data.y_ind.squeeze(), self.sim_data.orig_y_mean))\n else:\n orig_y_mean = np.interp(self.obs_data.y_ind.squeeze(), self.sim_data.y_ind.squeeze(), self.sim_data.orig_y_mean)\n self.obs_data.orig_y_mean = orig_y_mean\n else:\n if self.ragged_obs:\n self.obs_data.orig_y_mean = [self.sim_data.orig_y_mean for i in range(len(self.obs_data.y))]\n else:\n self.obs_data.orig_y_mean = self.sim_data.orig_y_mean\n if not self.scalar_out and not np.isscalar(self.sim_data.orig_y_sd):\n if self.ragged_obs:\n orig_y_sd = []\n for i in range(len(self.obs_data.y)):\n orig_y_sd.append(np.interp(self.obs_data.y_ind[i], self.sim_data.y_ind.squeeze(), self.sim_data.orig_y_sd))\n else:\n orig_y_sd = np.interp(self.obs_data.y_ind, self.sim_data.y_ind, self.sim_data.orig_y_sd)\n self.obs_data.orig_y_sd = orig_y_sd\n else:\n if self.ragged_obs:\n self.obs_data.orig_y_sd = [self.sim_data.orig_y_sd for i in range(len(self.obs_data.y))]\n else:\n self.obs_data.orig_y_sd = self.sim_data.orig_y_sd\n def cov_norm(ysd):\n if np.isscalar(ysd):\n return ysd**2\n ysd=ysd.reshape((1,-1))\n return(ysd.T @ ysd)\n if self.ragged_obs:\n ty_std=[]; tSigy_std=[]\n for i in range(len(self.obs_data.y)):\n ty_std.append( (self.obs_data.y[i] - self.obs_data.orig_y_mean[i]) / self.obs_data.orig_y_sd[i] )\n tSigy_std.append(self.obs_data.Sigy[i] / cov_norm(self.obs_data.orig_y_sd[i]) )\n else:\n ty_std = (self.obs_data.y - self.obs_data.orig_y_mean) / self.obs_data.orig_y_sd\n tSigy_std = self.obs_data.Sigy / cov_norm(self.obs_data.orig_y_sd)\n self.obs_data.y_std = ty_std\n self.obs_data.Sigy_std=tSigy_std\n\n def create_K_basis(self, n_pc=0.995, K=None):\n \"\"\"\n Creates `K_sim` and `K_obs` basis functions using PCA on sim_data.y_std, or using given `K_sim` matrix.\n\n :param float/int n_pc: proportion in [0, 1] of variance, or an integer number of components\n :param numpy.ndarray/None K: a basis matrix on sim indices of shape (n_basis_elements, ell_sim) or None\n\n .. note:: if standardize_y() method has not been called first, it will be called automatically by this method.\n \"\"\"\n if self.scalar_out:\n if n_pc == 1:\n print('Scalar output, using pu = 1 basis.')\n self.sim_data.K = np.zeros((n_pc, 1))\n self.scalar_out = False\n return\n else:\n print('Scalar output, no basis used.')\n return\n if K is not None:\n if not isinstance(K,np.ndarray):\n raise TypeError('create_K_basis: K specified must be a numpy ndarray')\n if len(K.shape)!=2 or K.shape[1]!=self.sim_data.y.shape[1]:\n raise ValueError('create_K_basis: must be 2D, and K and y_sim must have the same second dimension')\n self.sim_data.K = K\n else:\n self.compute_sim_PCA_basis(n_pc)\n # interpolate PC basis to observed, if present\n if not self.sim_only:\n pu = self.sim_data.K.shape[0]\n if self.ragged_obs:\n K_obs = []\n for ki in range(len(self.obs_data.y)):\n K_obs_tmp = np.zeros((pu, self.obs_data.y_ind[ki].shape[0]))\n for i in range(pu):\n K_obs_tmp[i, :] = np.interp(self.obs_data.y_ind[ki], self.sim_data.y_ind, self.sim_data.K[i, :])\n K_obs.append(K_obs_tmp)\n else:\n K_obs = np.zeros((pu, self.obs_data.y_ind.shape[0]))\n for i in range(pu):\n K_obs[i, :] = np.interp(self.obs_data.y_ind, self.sim_data.y_ind, self.sim_data.K[i, :])\n self.obs_data.K = K_obs\n\n def compute_sim_PCA_basis(self, n_pc):\n # Does PCA basis computation on sim_data.y_std attribute, sets K attribute to calculated basis.\n # Used internally by create_K_basis.\n # :param float/int n_pc: number of components or a proportion of variance explained, in [0, 1].\n y_std = self.sim_data.y_std\n if y_std is None:\n print('WARNING: y not standardized, applying default standardization before PCA...')\n self.standardize_y()\n U, s, V = np.linalg.svd(y_std.T, full_matrices=False)\n s2 = np.square(s)\n if n_pc < 1:\n cum_var = s2 / np.sum(s2)\n pu = np.sum(np.cumsum(cum_var) < n_pc) + 1\n else:\n pu = int(n_pc)\n self.sim_data.K = np.transpose(np.dot(U[:, :pu], np.diag(s[:pu])) / np.sqrt(y_std.shape[0]))\n\n def create_D_basis(self, D_type='constant', D_obs=None, D_sim=None, norm=True):\n \"\"\"\n Create `D_obs`, `D_sim` discrepancy bases. Can specify a type of default basis (constant/linear) or provide matrices.\n\n :param string D_type: 'constant' or 'linear' to set up constant or linear D_sim and D_obs\n :param numpy.ndarray/list/NoneType D_obs: a basis matrix on obs indices of shape (n_basis_elements, ell_obs),\n or list of matrices for ragged observations.\n :param numpy.ndarray/NoneType D_sim: a basis matrix on sim indices of shape (n_basis_elements, sim_obs).\n :param bool norm: normalize D basis?\n\n .. note:: `D_type` parameter is ignored if `D_obs` and `D_sim` are provided.\n \"\"\"\n # Return early if sim only or univariate output\n if self.sim_only:\n print('Model only has simulation data, skipping discrepancy...')\n return\n if self.scalar_out:\n print('Model has univariate output, skipping discrepancy...')\n return\n # Check if passed in D_sim/D_obs are correct shape and if so, set them into objects\n if D_sim is not None:\n if not D_sim.shape[1] == self.sim_data.y.shape[1]:\n raise TypeError('D_sim basis shape incorrect; second dim should match ell_sim')\n self.sim_data.D = D_sim\n if D_obs is not None:\n if self.ragged_obs:\n for i in range(len(D_obs)):\n if not D_obs[i].shape[1] == (self.obs_data.y[i].shape[1] if self.obs_data.y[i].ndim == 2 else self.obs_data.y[i].shape[0]):\n raise TypeError('D basis shape incorrect; second dim should match ell_obs')\n else:\n if not D_obs.shape[1] == self.obs_data.y.shape[1]:\n raise TypeError('D_obs basis shape incorrect; second dim should match ell_obs')\n self.obs_data.D = D_obs\n elif D_type == 'constant':\n if self.ragged_obs:\n self.obs_data.D = [np.ones((1, self.obs_data.y[i].shape[0])) for i in range(len(self.obs_data.y))]\n else:\n self.obs_data.D = np.ones((1, self.obs_data.y.shape[1]))\n self.sim_data.D = np.ones((1, self.sim_data.y.shape[1]))\n elif D_type == 'linear':\n self.sim_data.D = np.vstack([np.ones(self.sim_data.y.shape[1]), self.sim_data.y_ind])\n if self.ragged_obs:\n self.obs_data.D = [np.vstack([np.ones(self.obs_data.y[i].shape[0]), self.obs_data.y_ind[i]]) for i in range(len(self.obs_data.y))]\n else:\n self.obs_data.D = np.vstack([np.ones(self.obs_data.y.shape[1]), self.obs_data.y_ind])\n # Normalize D to match priors\n if norm:\n if D_sim is not None:\n norm_scl = np.sqrt(np.max(np.dot(self.sim_data.D, self.sim_data.D.T)))\n self.sim_data.D /= norm_scl\n if self.ragged_obs:\n for i in range(len(self.obs_data.D)):\n self.obs_data.D[i] /= norm_scl\n else:\n self.obs_data.D /= norm_scl\n else:\n if self.ragged_obs:\n norm_scl = np.sqrt(np.max(np.dot(self.obs_data.D[0], self.obs_data.D[0].T)))\n for i in range(len(self.obs_data.D)):\n self.obs_data.D[i] /= norm_scl\n else:\n norm_scl = np.sqrt(np.max(np.dot(self.obs_data.D, self.obs_data.D.T)))\n self.obs_data.D /= norm_scl\n\n def plot_K_basis(self, max_plots=4, obs=True):\n \"\"\"\n Plots K basis elements for both sim and obs indices (if applicable). Only applies to multivariate-output models.\n\n TODO: Lamy should be 1/Sigy_std\n\n :param int max_plots: maximum number of principal components to plot\n :return: tuple containing matplotlib figure objects: (fig_sim, fig_obs) or just fig_sim if no observed data is present\n \"\"\"\n # Return early if scalar out or basis not set up\n if self.scalar_out:\n print('Scalar output, no K basis to plot.')\n return\n if self.sim_data.K is None:\n print('K basis not set up, call create_K_basis() first.')\n return\n # Plot sim basis\n pu = self.sim_data.K.shape[0]\n ncol = 5\n nrow = int(np.ceil((min(pu, max_plots) + 1) / ncol)) # add 1 for mean line\n fig_sim, axs_sim = plt.subplots(nrow, ncol, figsize=(12, 2 * nrow))\n fig_sim.tight_layout()\n for i, ax in enumerate(axs_sim.flatten()):\n if i == 0: # plot mean line\n ax.plot(self.sim_data.y_ind, np.mean(self.sim_data.K,axis=0))\n ax.set_title('sim mean')\n ax.set_ylabel('sim K basis')\n ax.set_xlabel('sim y_ind')\n elif i < pu+1:\n ax.plot(self.sim_data.y_ind, self.sim_data.K.T[:,i-1])\n ax.set_title('PC %d' % (i))\n ax.set_xlabel('sim y_ind')\n else:\n ax.axis('off')\n # If obs are present and requested, plot obs basis\n if not self.sim_only and obs:\n if self.ragged_obs:\n pu = np.array([k.shape[0] for k in self.obs_data.K])\n if np.all(pu == pu[0]): pu = pu[0]\n else: raise ValueError('first dimension in lists not equal')\n else:\n pu = self.obs_data.K.shape[0]\n ncol = 5\n nrow = int(np.ceil((min(pu,max_plots) + 1) / ncol)) # add 1 for mean line\n fig_obs, axs_obs = plt.subplots(nrow,ncol,figsize=(12, 2 * nrow))\n fig_obs.tight_layout()\n for i,ax in enumerate(axs_obs.flatten()):\n if i == 0: # plot mean line\n if self.ragged_obs: ax.plot(self.obs_data.y_ind[i],np.mean(self.obs_data.K[i],axis=0))\n else: ax.plot(self.obs_data.y_ind, np.mean(self.obs_data.K,axis=0))\n ax.set_title('obs mean')\n ax.set_ylabel('obs K basis')\n ax.set_xlabel('obs y_ind')\n elif i < pu+1:\n if self.ragged_obs: ax.plot(self.obs_data.y_ind[i],self.obs_data.K[i].T[:,i-1])\n else: ax.plot(self.obs_data.y_ind, self.obs_data.K.T[:,i-1])\n ax.set_title('PC %d' % (i))\n ax.set_xlabel('obs y_ind')\n else:\n ax.axis('off')\n return(fig_sim,fig_obs)\n else:\n return fig_sim\n\n def plot_K_weights(self, max_u_plot=5):\n \"\"\"\n Plots K basis weights for both sim and obs data (if applicable). Only applies to multivariate-output models.\n\n TODO: Lamy should be 1/Sigy_std\n\n :param int max_u_plot: max number of u's for which to plot vertical line over histogram of w's\n :return: tuple containing matplotlib figure objects: (fig_uw, fig_v) or just fig_uw if no discrepancy is specified\n \"\"\"\n # Return early if scalar out or basis not set up\n if self.scalar_out:\n print('Scalar output, no K weights to plot.')\n return\n if self.sim_data.K is None:\n print('K basis not set up, call create_K_basis() first.')\n return\n # Compute sim K weights\n pu = self.sim_data.K.shape[0]\n ncol = 5\n nrow = int(np.ceil(pu / ncol))\n w = np.dot(np.linalg.pinv(self.sim_data.K).T, self.sim_data.y_std.T).T\n\n fig_uw, axs_uw = plt.subplots(nrow,ncol,figsize=(10,2*nrow))\n fig_uw.tight_layout()\n\n # Compute obs K weights if obs are present\n if not self.sim_only and self.obs_data.K is not None:\n # set pu\n if self.ragged_obs:\n pu = np.array([k.shape[0] for k in self.obs_data.K])\n if np.all(pu == pu[0]): pu = pu[0]\n else: raise ValueError('first dimension in lists not equal')\n else:\n pu = self.obs_data.K.shape[0]\n \n # No D\n if self.obs_data.D is None:\n pv = 0\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n # compute u\n if self.ragged_obs:\n u = []\n for i in range(len(self.obs_data.y_ind)):\n DK = self.obs_data.K[i]\n Lamy = np.eye(self.obs_data.y_ind[i].shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T])\n u.append(np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std[i].T])).T)\n u = np.array(u)\n else:\n DK = self.obs_data.K\n Lamy = np.eye(self.obs_data.y_ind.shape[0]) # Identity with size len(y_ind) how to do this with ragged?\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n u = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std.T])).T\n \n nrow = int(np.ceil(pu / ncol))\n if u.shape[1] == w.shape[1]:\n for i,ax in enumerate(axs_uw.flatten()):\n if i < w.shape[1]:\n limit = abs(max(max(w[:,i].min(), w[:,i].max(), key=abs),\\\n max(u[:,i].min(), u[:,i].max(), key=abs), key=abs))\n ax.set_xlim([-1.25*limit,1.25*limit])\n bins_uw = np.linspace(-limit,limit,15,endpoint=True)\n ax.set_xlabel('PC %d wt' % (i+1))\n ax.set_xlim([-limit,limit])\n ax.hist(w[:,i],bins=bins_uw,label='w',density=True)\n for j in range(min(u.shape[0],max_u_plot)):\n ax.axvline(u[j,i],color='darkorange',label='u' if j==0 else '_')\n ax.legend(prop={'size': 6})\n else:\n ax.axis('off')\n return fig_uw\n \n else: # do u and w independently\n raise ValueError('u.shape[1] != w.shape[1]')\n \n else: # D\n if self.ragged_obs:\n pv = np.array([d.shape[0] for d in self.obs_data.D])\n if np.all(pv == pv[0]): pv = pv[0]\n else: raise ValueError('first dimension in lists not equal')\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n u = []\n v = []\n for i in range(len(self.obs_data.D)):\n DK = np.concatenate([self.obs_data.D[i], self.obs_data.K[i]])\n Lamy = np.eye(self.obs_data.y_ind[i].shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std[i].T]))\n v.append(vu[:pv].T)\n u.append(vu[pv:].T)\n u = np.array(u)\n v = np.array(v)\n else:\n pv = self.obs_data.D.shape[0]\n DK = np.concatenate([self.obs_data.D, self.obs_data.K]) # (pu+pv, ell_obs)\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n Lamy = np.eye(self.obs_data.y_ind.shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std.T]))\n v = vu[:pv, :].T\n u = vu[pv:, :].T\n \n if u.shape[1] == w.shape[1]:\n for i,ax in enumerate(axs_uw.flatten()):\n if i < w.shape[1]:\n limit = abs(max(max(w[:,i].min(), w[:,i].max(), key=abs),\\\n max(u[:,i].min(), u[:,i].max(), key=abs), key=abs))\n ax.set_xlim([-1.1*limit,1.1*limit])\n bins_uw = np.linspace(-limit,limit,15,endpoint=True)\n ax.set_xlabel('PC %d wt' % (i+1))\n ax.hist(w[:,i],bins=bins_uw,label='w',density=True)\n for j in range(min(u.shape[0],max_u_plot)):\n ax.axvline(u[j,i],color='darkorange',label='u' if j==0 else '_')\n ax.legend(prop={'size': 6})\n else:\n ax.axis('off')\n else: \n raise ValueError('u.shape[1] != w.shape[1]')\n\n # V\n nrow = int(np.ceil(pv / ncol))\n fig_v, axs_v = plt.subplots(nrow,ncol,figsize=(10,2*nrow))\n fig_v.tight_layout()\n for i,ax in enumerate(axs_v.flatten()):\n if i < v.shape[1]:\n ax.hist(v[:,i],density=True)\n ax.set_xlabel('D %d wt : v' % (i+1))\n else:\n ax.axis('off')\n return (fig_uw, fig_v)\n\n \n def plot_u_w_pairs(self, max_plots=5, save=False):\n \"\"\"\n Plots principal component basis weights for both sim and obs data (if applicable). Only applies to multivariate-output models.\n\n :param int max_plots: max number of principal components to plot\n :return: matplotlib figure fig_g: seaborn pairs figure\n \"\"\"\n # Return early if scalar out or basis not set up\n if self.scalar_out:\n print('Scalar output, no K weights to plot.')\n return\n if self.sim_data.K is None:\n print('K basis not set up, call create_K_basis() first.')\n return\n pu = self.sim_data.K.shape[0]\n w = np.dot(np.linalg.pinv(self.sim_data.K).T, self.sim_data.y_std.T).T\n \n if not self.sim_only and self.obs_data.K is not None:\n if self.ragged_obs:\n pu = np.array([k.shape[0] for k in self.obs_data.K])\n if np.all(pu == pu[0]): pu = pu[0]\n else: raise ValueError('first dimension in lists not equal')\n else:\n pu = self.obs_data.K.shape[0]\n\n # No D\n if self.obs_data.D is None:\n pv = 0\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n if self.ragged_obs:\n u = []\n for i in range(len(self.obs_data.K)):\n DK = self.obs_data.K[i]\n Lamy = np.eye(self.obs_data.y_ind[i].shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n u.append(np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std[i].T])).T)\n u = np.array(u)\n else:\n DK = self.obs_data.K\n Lamy = np.eye(self.obs_data.y_ind.shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n u = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std.T])).T\n \n else: # D\n if self.ragged_obs:\n pv = np.array([d.shape[0] for d in self.obs_data.D])\n if np.all(pv == pv[0]): pv = pv[0]\n else: raise ValueError('first dimension in lists not equal')\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n u = []\n v = []\n for i in range(len(self.obs_data.D)):\n DK = np.concatenate([self.obs_data.D[i], self.obs_data.K[i]])\n Lamy = np.eye(self.obs_data.y_ind[i].shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std[i].T]))\n v.append(vu[:pv].T)\n u.append(vu[pv:].T)\n u = np.array(u)\n v = np.array(v)\n else:\n pv = self.obs_data.D.shape[0]\n DK = np.concatenate([self.obs_data.D, self.obs_data.K]) # (pu+pv, ell_obs)\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n Lamy = np.eye(self.obs_data.y_ind.shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std.T]))\n v = vu[:pv, :].T\n u = vu[pv:, :].T\n\n # change u,w to match max_plots\n if w.shape[1]>max_plots: \n w = w[:,0:max_plots]\n print('Plotting up to',max_plots,'pairs. Change with parameter \\'max_plots\\'')\n col_names = []\n for i in range(w.shape[1]): col_names.append('w{}'.format(i+1))\n w_df = pd.DataFrame(data=w,columns=col_names)\n if u.shape[1]>max_plots: u = u[:,0:max_plots]\n\n lims = max(np.maximum(np.max(np.abs(w),axis=0),np.max(np.abs(u),axis=0))*1.1)\n with sns.plotting_context(\"notebook\", font_scale=1):\n g = sns.PairGrid(w_df)\n g.map_diag(sns.distplot)\n g.map_offdiag(sns.scatterplot)\n for i in range(g.axes.shape[1]): # rows\n for j in range(g.axes.shape[0]): # columns\n g.axes[i,j].set_xlim(-lims,lims); g.axes[i,j].set_ylim(-lims,lims)\n if i == j:\n for k in range(u.shape[0]):\n g.axes[i,i].axvline(u[k,i],color='darkorange',label='u{}'.format(i+1) if k==0 else \"_\")\n g.axes[i,i].legend(facecolor='white')\n else:\n g.axes[i,j].scatter(u[:,j],u[:,i],c='darkorange',label='(u{},u{})'.format(j+1,i+1))\n g.axes[i,j].legend(facecolor='white')\n if save: plt.savefig(save,dpi=300)\n return g.fig\n\n def plot_K_residuals(self):\n \"\"\"\n Plots residuals after projection to K basis. Only applies to multivariate-output models.\n :return: tuple containing matplotlib figure objects: (fig_u, fig_v) or just fig_noD if no discrepancy is specified\n \"\"\"\n # Return early if scalar out or basis not set up\n if self.scalar_out:\n print('Scalar output, no K residuals to plot.')\n return\n if self.sim_data.K is None:\n print('K basis not set up, call create_K_basis() first.')\n return\n if not self.sim_only and self.obs_data.K is not None:\n if isinstance(self.obs_data.K, list):\n print('plot_K_residuals cannot yet handle ragged observations')\n return\n pu = self.obs_data.K.shape[0]\n if self.obs_data.D is None:\n pv = 0\n DK = self.obs_data.K\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n Lamy = np.eye(self.obs_data.y_ind.shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n u = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std.T])).T\n proj = np.dot(u, DK)\n resid = self.obs_data.y_std - proj\n \n fig_noD, axs_noD = plt.subplots(1,3,figsize=(4,6))\n \n axs_noD[0].plot(self.obs_data.y_ind, self.obs_data.y_std.squeeze().T)\n axs_noD[0].set_title('obs y_std')\n axs_noD[0].set_xlabel('obs y_ind')\n \n axs_noD[1].plot(self.obs_data.y_ind, proj.squeeze().T)\n axs_noD[1].set_title('obs projection reconstruction')\n axs_noD[1].set_xlabel('obs y_ind')\n \n axs_noD[2].plot(self.obs_data.y_ind, resid.squeeze().T, '-')\n axs_noD[2].set_title('obs projection residual')\n axs_noD[2].set_xlabel('obs y_ind')\n return fig_noD\n else:\n pv = self.obs_data.D.shape[0]\n DK = np.concatenate([self.obs_data.D, self.obs_data.K]) # (pu+pv, ell_obs)\n DKridge = 1e-6 * np.diag(np.ones(pu + pv)) # (pu+pv, pu+pv)\n Lamy = np.eye(self.obs_data.y_ind.shape[0])\n DKprod = np.linalg.multi_dot([DK, Lamy, DK.T]) # (pu+pv, pu+pv)\n vu = np.dot(np.linalg.inv(DKprod + DKridge), np.linalg.multi_dot([DK, Lamy, self.obs_data.y_std.T]))\n v = vu[:pv, :].T\n u = vu[pv:, :].T\n ncol = 5\n nrow = int(np.ceil(pu / ncol))\n fig_u,axs_u = plt.subplots(nrow,ncol,figsize=(8, 2 * nrow))\n for i, ax in enumerate(axs_u.flatten()):\n if i < pu:\n ax.hist(u[:, i])\n ax.set_xlabel('PC %d wt' % (i+1))\n else:\n ax.axis('off')\n \n nrow = int(np.ceil(pv / ncol))\n fig_v,axs_v = plt.subplots(nrow,ncol,figsize=(8, 2 * nrow))\n for i,ax in enumerate(axs_v.flatten()):\n if i < pv:\n ax.hist(v[:, i])\n ax.set_xlabel('D %d wt' % (i+1))\n else:\n ax.axis('off')\n return (fig_u,fig_v)\n\n def plot_data(self,which_x=None,x_min=None,x_max=None,y_min=None,y_max=None,n_neighbors=3,max_sims=50,save=None):\n \"\"\"\n Plots observed data and simulation runs on the same axis with n_neighbors nearest simulations\n in x-space. Only applies to multivariate-output models with both simulation and observed data.\n \n :param list/NoneType which_x: optionally sets which x_obs indices to plot\n :param float x_min: sets x lower limit on plot\n :param float x_max: sets x upper limit on plot\n :param float y_min: sets y lower limit on plot\n :param float y_max: sets y upper limit on plot\n :param int n_neighbors: sets number of nearest simulations to highlight\n :param int max_sims: sets maximum number of simulation runs to plot\n :return matplotlib figure fig: figure object of plot\n \"\"\"\n if self.sim_only:\n print('plot_data does not currently work for sim_only models.')\n return\n if self.scalar_out:\n print('plot_data does not currently work for univariate output models.')\n return\n\n n = self.obs_data.x.shape[0]\n m = self.sim_data.x.shape[0]\n\n # plot up to 4 input space points\n if n > 4:\n # if no which_x or given which_x is out of bounds\n if which_x is None or (which_x is not None and not np.all(which_x)-1)):\n # choose 4 equally space input points to plot\n which_x = np.linspace(0,n-1,4,dtype=int)\n x_plot = self.obs_data.x[which_x,:]\n else:\n which_x = np.arange(0,n,1,dtype=int)\n x_plot = self.obs_data.x\n n_plots = x_plot.shape[0]\n\n # get axis limits\n if self.ragged_obs:\n if x_min is None: x_min = min(min([np.amin(k) for k in self.obs_data.y_ind]),np.amin(self.sim_data.y_ind))\n if x_max is None: x_max = max(max([np.amax(k) for k in self.obs_data.y_ind]),np.amax(self.sim_data.y_ind))\n if y_min is None: y_min = min(min([np.amin(k) for k in self.obs_data.y]),np.amin(self.sim_data.y))\n if y_max is None: y_max = max(max([np.amax(k) for k in self.obs_data.y]),np.amax(self.sim_data.y))\n else:\n if x_min is None: x_min = min(np.amin(self.obs_data.y_ind),np.amin(self.sim_data.y_ind))\n if x_max is None: x_max = max(np.amax(self.obs_data.y_ind),np.amax(self.sim_data.y_ind)) \n if y_min is None: y_min = min(np.amin(self.obs_data.y),np.amin(self.sim_data.y))\n if y_max is None: y_max = max(np.amax(self.obs_data.y),np.amax(self.sim_data.y))\n\n # nearest neighbots\n # find closest sim input points to each x_plot observed input points\n # ith column of near_sim_idx contains the n_neighbors nearest sim_design points (by index)\n # for ith point in x_plot\n near_sim_idx = None\n # this checks that x is not set up as a dummy, if it is, nearest neighbors in x space doesn't mean anything\n if m>2 and not np.all(self.sim_data.x.flatten() == self.sim_data.x.flatten()[0]) and \\\n (self.obs_data.x.shape[0]==1 or not np.all(self.obs_data.x.flatten() == self.obs_data.x.flatten()[0])): \n n_neighbors = min(min(n_neighbors,m),7)\n near_sim_idx = np.zeros(shape=(n_neighbors,n_plots),dtype=int)\n for i in range(n_plots):\n dist = np.argsort(np.linalg.norm(self.sim_data.x-x_plot[i,:],axis=1))\n near_sim_idx[:,i] = dist[0:n_neighbors]\n\n # Generate plot for each x_plot (x_obs) point\n fig = plt.figure(figsize=[12,12],constrained_layout=True)\n gs = GridSpec(2,2,figure=fig)\n axs = np.array([fig.add_subplot(gs[0,0]),\\\n fig.add_subplot(gs[0,1]),\\\n fig.add_subplot(gs[1,0]),\\\n fig.add_subplot(gs[1,1])])\n for i in range(4):\n if i < n_plots:\n # axis limits, ticks, and labels\n axs[i].set_xlim([x_min, x_max])\n axs[i].set_ylim([y_min, y_max])\n #axs[i].xaxis.set_ticks(np.linspace(x_min,x_max,10,endpoint=True))\n #axs[i].yaxis.set_ticks(np.linspace(y_min,y_max,10,endpoint=True))\n axs[i].set_title(\"x_obs point {}\".format(i+1))\n axs[i].set_xlabel(\"y_ind (native)\")\n axs[i].set_ylabel(\"y (native)\")\n\n # simulations all\n if m>max_sims:\n sims_idx = np.linspace(0,m-1,max_sims,dtype=int)\n else:\n sims_idx = range(m)\n for j in sims_idx:\n axs[i].plot(self.sim_data.y_ind, np.transpose(self.sim_data.y)[:,j],color='lightgrey',\\\n linestyle=\"--\",label=\"Simulation runs\" if j==0 else \"_\")\n\n # simulations - nearest neighbors\n if near_sim_idx is not None:\n colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k') # for nearest sims\n for j in range(n_neighbors):\n axs[i].plot(self.sim_data.y_ind,np.transpose(self.sim_data.y)[:,near_sim_idx[j,i]],\\\n linestyle=\"--\",\\\n color=colors[j],label=\"Nearest Sim {}\".format(j+1))\n\n # true data curve and \"real data points\"\n if self.ragged_obs:\n axs[i].plot(self.obs_data.y_ind[i], self.obs_data.y[which_x[i]],'--ko',label=\"Obs data\")\n else:\n axs[i].plot(self.obs_data.y_ind, self.obs_data.y[which_x[i]],'--ko',label=\"Obs data\")\n\n # legend\n axs[i].legend()\n #axs[i].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n\n else:\n axs[i].axis('off')\n \n if save is not None: fig.savefig(save,dpi=300,bbox_inches='tight')\n return fig\n\n\n\n","sub_path":"sepia/SepiaData.py","file_name":"SepiaData.py","file_ext":"py","file_size_in_byte":53065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"511120975","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# NCTR, Nile Center for Technology Research\n# Copyright (C) 2011-2012 NCTR ().\n#\n##############################################################################\n\nfrom osv import fields,osv\n\nclass res_partner_year(osv.osv):\n \"\"\"\n To add the abblitiy to mange the supplier of the specific year \"\"\"\n\n _description='Partner Year'\n _name = 'res.partner.year'\n _columns = {\n 'name': fields.char('Year Name', required=True, size=64, translate=True),\n 'active' : fields.boolean('Active', help=\"The active field allows you to hide the category without removing it.\"),\n 'partner_ids': fields.many2many('res.partner', 'res_partner_year_rel', 'year_id', 'partner_id', 'Partners'),\n }\n\n\nclass res_partner(osv.osv):\n \"\"\"\n To add year id to res partner \"\"\"\n\n _inherit = \"res.partner\"\n _columns = {\n 'year_id': fields.many2many('res.partner.year', 'res_partner_year_rel', 'partner_id', 'year_id', 'Years'),\n }\n#\n# Model definition\n#\n\nclass custom_res_partner_category(osv.osv):\n \"\"\"\n Add parent and child relation to partner category\"\"\"\n\n _inherit = 'res.partner.category'\n _columns = {\n 'type': fields.selection([('supplier', 'Supplier'),('accountant', 'Accountant'),('project', 'project')],'Type' ),\n 'parent_id': fields.many2one('res.partner.category', 'Parent Category', select=True),\n\t\t}\n \n def onchange_parent_id(self, cr, uid, ids,parent_id=False):\n \"\"\" \n Read type of parent of category to set it when changing a partner.\n \n @param parent_id: Changed parent id\n @return: Dictionary of values of parent type \n \"\"\"\n res = {}\n if parent_id:\n parent_type = self.pool.get('res.partner.category').browse(cr, uid, parent_id)\n result={'type': parent_type.type}\n res = {'value': result}\n return res\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"v_7/GDS/shamil_v3/purchase_custom/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"258971528","text":"from keras.losses import binary_crossentropy\nimport tensorflow as tf\n\n\n\ndef get_loss(choice):\n if choice == 'bce':\n loss = 'binary_crossentropy'\n # elif choice == 'w_bce':\n # pos_class_weight = load_class_weights(root=root, split=split)\n # loss = weighted_binary_crossentropy_loss(pos_class_weight)\n\n elif choice == 'dice':\n loss = dice_loss\n # elif choice == 'w_mar':\n # pos_class_weight = load_class_weights(root=root, split=split)\n # loss = margin_loss(margin=0.4, downweight=0.5, pos_weight=pos_class_weight)\n # elif choice == 'mar':\n # loss = margin_loss(margin=0.4, downweight=0.5, pos_weight=1.0)\n elif choice == 'bce_dice':\n loss = bce_dice_loss\n\n\n # if net.find('caps') != -1:\n # return {'out_seg': loss, 'out_recon': 'mse'}, {'out_seg': 1., 'out_recon': recon_wei}\n # else:\n return loss\n\n\n#dice_hard用于评估\ndef dice_hard(y_true, y_pred, threshold=0.5, axis=[1,2], smooth=1e-5):\n\n y_pred = tf.cast(y_pred > threshold, dtype=tf.float32)\n y_true = tf.cast(y_true > threshold, dtype=tf.float32)\n inse = tf.reduce_sum(tf.multiply(y_pred, y_true), axis=axis)\n l = tf.reduce_sum(y_pred, axis=axis)\n r = tf.reduce_sum(y_true, axis=axis)\n ## old axis=[0,1,2,3]\n # hard_dice = 2 * (inse) / (l + r)\n # epsilon = 1e-5\n # hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon)\n ## new haodong\n hard_dice = (2. * inse + smooth) / (l + r + smooth)\n ##\n hard_dice = tf.reduce_mean(hard_dice)\n return hard_dice\n\n#dice_soft用于loss\ndef dice_soft(y_true, y_pred, loss_type='jaccard', axis=[1,2], smooth=1e-5, from_logits=False):\n\n if not from_logits:\n # transform back to logits\n _epsilon = tf.convert_to_tensor(1e-7, y_pred.dtype.base_dtype)\n y_pred = tf.clip_by_value(y_pred, _epsilon, 1 - _epsilon)\n y_pred = tf.log(y_pred / (1 - y_pred))\n\n inse = tf.reduce_sum(y_pred * y_true, axis=axis)\n if loss_type == 'jaccard':\n l = tf.reduce_sum(y_pred * y_pred, axis=axis)\n r = tf.reduce_sum(y_true * y_true, axis=axis)\n elif loss_type == 'sorensen':\n l = tf.reduce_sum(y_pred, axis=axis)\n r = tf.reduce_sum(y_true, axis=axis)\n else:\n raise Exception(\"Unknow loss_type\")\n ## old axis=[0,1,2,3]\n # dice = 2 * (inse) / (l + r)\n # epsilon = 1e-5\n # dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1\n ## new haodong\n dice = (2. * inse + smooth) / (l + r + smooth)\n ##\n dice = tf.reduce_mean(dice)\n return dice\n\n\ndef dice_loss(y_true, y_pred, from_logits=False):\n return 1-dice_soft(y_true, y_pred,axis=[1,2], from_logits=False)\n\ndef bce_dice_loss(y_true, y_pred):\n return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)\n\n\n\n\n\n","sub_path":"my_seg_keras/v3_unet_street/loss_choice.py","file_name":"loss_choice.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"7978437","text":"peso = [1,2,3,4]\nbeneficio = [7,3,9,1]\ncapacidadMochila = 20\nmatriz = []\n\ndef mochila(peso,beneficio,capacidadMochila):\n for k in range(capacidadMochila+1):\n matriz[0][k] = 0\n for i in range(1,len(peso)+1):\n for k in range(capacidadMochila+1):\n if k >= peso[i-1]:\n matriz[i][k]=max(matriz[i-1][k],matriz[i-1][k-peso[i-1]]+beneficio[i-1])\n else:\n matriz[i][k]=matriz[i-1][k]\n","sub_path":"mochila.py","file_name":"mochila.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"402138619","text":"#!/usr/bin/env python3\n\n# USAGE\n# With default parameters\n# python3 recognise.py\n# OR specifying the encodings, screen resolution\n# python3 recognise.py -e encodings.pickle -r 240\n\n# Acknowledgement\n# This code is adapted from:\n# https://www.pyimagesearch.com/2018/06/18/face-recognition-with-opencv-python-and-deep-learning/\n\n# import the necessary packages\nimport argparse\nimport pickle\nimport time\nfrom imutils.video import VideoStream\nimport face_recognition\nimport imutils\nimport cv2\n\n\nclass Recognise:\n \"\"\"\n recognise user\n \"\"\"\n\n def getuser(self):\n \"\"\"\n implementation of facial recognise\n\n Return:\n name of user\n \"\"\"\n\n # construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"-e\",\n \"--encodings\",\n default=\"encodings.pickle\",\n help=\"path to serialized db of facial encodings\")\n ap.add_argument(\n \"-r\",\n \"--resolution\",\n type=int,\n default=240,\n help=\"Resolution of the video feed\")\n ap.add_argument(\n \"-d\",\n \"--detection-method\",\n type=str,\n default=\"hog\",\n help=\"face detection model to use: either `hog` or `cnn`\")\n args = vars(ap.parse_args())\n\n # load the known faces and embeddings\n print(\"[INFO] loading encodings...\")\n data = pickle.loads(open(args[\"encodings\"], \"rb\").read())\n\n # initialize the video stream and then allow the camera sensor to warm\n # up\n print(\"[INFO] starting video stream...\")\n vs = VideoStream(src=0).start()\n time.sleep(2.0)\n\n # loop over frames from the video file stream\n while True:\n # grab the frame from the threaded video stream\n frame = vs.read()\n\n # convert the input frame from BGR to RGB then resize it to have\n # a width of 750px (to speedup processing)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n rgb = imutils.resize(frame, width=args[\"resolution\"])\n\n # detect the (x, y)-coordinates of the bounding boxes\n # corresponding to each face in the input frame, then compute\n # the facial embeddings for each face\n boxes = face_recognition.face_locations(\n rgb, model=args[\"detection_method\"])\n encodings = face_recognition.face_encodings(rgb, boxes)\n names = []\n\n # loop over the facial embeddings\n for encoding in encodings:\n # attempt to match each face in the input image to our known\n # encodings\n matches = face_recognition.compare_faces(\n data[\"encodings\"], encoding)\n name = \"Unknown\"\n\n # check to see if we have found a match\n if True in matches:\n # find the indexes of all matched faces then initialize a\n # dictionary to count the total number of times each face\n # was matched\n matchedIdxs = [i for (i, b) in enumerate(matches) if b]\n counts = {}\n\n # loop over the matched indexes and maintain a count for\n # each recognized face face\n for i in matchedIdxs:\n name = data[\"names\"][i]\n counts[name] = counts.get(name, 0) + 1\n\n # determine the recognized face with the largest number\n # of votes (note: in the event of an unlikely tie Python\n # will select first entry in the dictionary)\n name = max(counts, key=counts.get)\n\n # update the list of names\n names.append(name)\n\n # loop over the recognized faces\n for name in names:\n # print to console, identified person\n print(\"Person found: {}\".format(name))\n # Set a flag to sleep the cam for fixed time\n vs.stop()\n return name\n","sub_path":"reception-pi/facialrecognition/recognise.py","file_name":"recognise.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"552009284","text":"from typing import List\nfrom heapq import *\n\n\"\"\"\nGiven an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...]\nfind the minimum number of conference rooms required.\n\nEg: [[0, 30], [5, 10], [15, 20]]\nOutput : 2\n\nEg: [[7,10], [2,4]]\nOutput : 1\n\nApproach:\n\nWhen a room is taken, the room can not be used for another meeting until the current meeting is over.\nAs soon as the current meeting is finished, the room can be used for another meeting.\nWe can sort the meetings by start timestamps and sequentially assign each meeting to a room.\nEach time when we assign a room for a meeting, we check if any meeting is finished so that the room can be reused.\nIn order to efficiently track the earliest ending meeting, we can use a min heap.\nWhenever an old meeting ends before a new meeting starts, we reuse the room (i.e., do not add more room).\nOtherwise, we need an extra room (i.e., add a room).\n\"\"\"\n\n\nclass Solution:\n\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n \"\"\"\n Time Complexity - O(nlogn)\n 'n' is the number of intervals\n Space Complexity - O(n)\n \"\"\"\n # edge case\n if not intervals:\n return 0\n\n # sort by start time\n intervals.sort(key=lambda x: x[0])\n\n # first meeting will always need one room\n # just add the first end time\n pq = [intervals[0][1]]\n\n # start from the second meeting\n for interval in range(1, len(intervals)):\n # start and end time of the current meeting\n start_time, end_time = intervals[interval][0], intervals[interval][1]\n # start_time of current meeting > end_time of earlist available room\n # this means we can use the earliest available room for the current meeting\n if start_time >= pq[0]:\n heappop(pq)\n # ending time of the current meeting\n heappush(pq, end_time)\n return len(pq)\n\n\nif __name__ == '__main__':\n print(Solution().minMeetingRooms([[0, 30], [5, 10], [15, 20]]))\n print(Solution().minMeetingRooms([[2, 15], [36, 45], [9, 29], [16, 23], [4, 9]]))\n print(Solution().minMeetingRooms([[7, 10], [2, 4]]))\n","sub_path":"253_meeting_rooms_II.py","file_name":"253_meeting_rooms_II.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"566071413","text":"import datetime\nimport logging\nimport urllib.parse\n\nimport ballpark\nimport pandas as pd\nimport requests\nimport json\n\nfrom finapps.stocks.dao import MarketDataDAO\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass DigestError(Exception):\n pass\n\n\nclass StockService:\n \"\"\"\n Encapsulates business logic for the Stocks application\n \"\"\"\n def __init__(self, dao: MarketDataDAO, alpha_vantage_api_key: str):\n self.dao = dao\n self.api_key = alpha_vantage_api_key\n\n def historical_data_load(self, symbol: str):\n \"\"\"\n Perform historical data load for the given symbol\n \"\"\"\n LOGGER.info(f'[{symbol}] downloading historical data')\n df = StockService.alpha_vantage_download(symbol, self.api_key, historical=True)\n LOGGER.info(f'[{symbol}] persisting {len(df)} records')\n self.dao.insert(df)\n LOGGER.info(f'[{symbol}] done')\n\n def incremental_data_load(self, symbol: str):\n \"\"\"\n Perform incremental data load for the given symbol\n \"\"\"\n LOGGER.info(f'[{symbol}] downloading incremental data')\n df = StockService.alpha_vantage_download(symbol, self.api_key, historical=False)\n LOGGER.info(f'[{symbol}] persisting {len(df)} records')\n self.dao.insert(df)\n LOGGER.info(f'[{symbol}] done')\n\n def min_max_dates(self, symbol: str):\n \"\"\"\n Obtain the minimum and maximum persisted market dates for given symbol\n \"\"\"\n return self.dao.min_max_dates(symbol)\n\n def maybe_get_data(self, symbol: str, as_of_date: datetime.date, look_back=5):\n \"\"\"\n Attempt to return persisted market data for the given symbol and date. If no data is\n available for the given date, this function will look-back a maximum number of days to find data.\n \"\"\"\n lo_date = as_of_date - datetime.timedelta(days=look_back)\n df = self.dao.select(symbol, lo_date, as_of_date)\n if df.empty:\n LOGGER.warning(f\"[{symbol}] has no data for {as_of_date.strftime('%Y-%m-%d')}\")\n return df\n else:\n data = df.sort_values(by='MKT_DATE', ascending=False).head(1)\n if data['MKT_DATE'].iloc[0].to_pydatetime() != as_of_date:\n LOGGER.info(f\"[{symbol}] has no data for {as_of_date.strftime('%Y-%m-%d')}, \"\n f\"using closest available date \"\n f\"{data['MKT_DATE'].iloc[0].to_pydatetime().strftime('%Y-%m-%d')}\")\n return data\n\n def digest(self, symbol: str, as_of_date: datetime.date) -> pd.DataFrame:\n \"\"\"\n Create a performance digest for the given symbol on the given date. The digest compares\n the performance on the given date to 1d,1m,3m,6m,1y,3yr prior.\n \"\"\"\n def digest_str(value, compare_value, ballpark_value=False):\n pct = int((compare_value - value) / value * 100)\n if pct > 0:\n indicator = '↑'\n elif pct < 0:\n indicator = '↓'\n else:\n indicator = '→'\n\n if ballpark_value:\n value = ballpark.business(value)\n else:\n value = f'{value:,.0f}'\n\n return f'{value} ({indicator} {pct}%)'\n\n # strict - must have data for the digest mkt data. known as the zero'th date.\n as_of_data = self.maybe_get_data(symbol, as_of_date, look_back=0)\n if as_of_data.empty:\n raise DigestError(f\"Missing market data for as-of-date {as_of_date.strftime('%Y-%m-%d')}\")\n\n config = {\n '1d': as_of_date - datetime.timedelta(days=1),\n '1m': as_of_date - datetime.timedelta(days=30),\n '3m': as_of_date - datetime.timedelta(days=90),\n '6m': as_of_date - datetime.timedelta(days=180),\n '1y': as_of_date - datetime.timedelta(days=365),\n '3y': as_of_date - datetime.timedelta(days=365 * 3)\n }\n data_by_label = {label: self.maybe_get_data(symbol, d) for label, d in config.items()}\n\n # strict - there must be data for every configured market date\n no_data = [df for df in data_by_label.values() if df is None]\n if len(no_data) > 0:\n raise DigestError(f'Missing market data. Check market data is available. {json.dumps(config)}')\n\n # strict - there must be no duplicate market dates\n dates = [df['MKT_DATE'].values[0] for df in data_by_label.values()]\n if len(set(dates)) != len(dates):\n raise DigestError(f'Duplicate market data. Check market data is available. {json.dumps(config)}')\n\n # stick the label on each data frame and concat them all\n for label, df in data_by_label.items():\n df['LABEL'] = label\n\n digest = pd.concat(data_by_label.values())\n digest = digest.sort_values(by=['MKT_DATE'], ascending=False)\n\n # create digest columns, comparing everything to as of date\n as_of_close = as_of_data.iloc[0]['MKT_CLOSE']\n as_of_vol = as_of_data.iloc[0]['MKT_VOLUME']\n digest['CLOSE'] = digest['MKT_CLOSE'].apply(lambda x: digest_str(x, as_of_close))\n digest['VOLUME'] = digest['MKT_VOLUME'].apply(lambda x: digest_str(x, as_of_vol, ballpark_value=True))\n\n # reduce columns\n digest = digest[['LABEL', 'CLOSE', 'VOLUME']]\n\n #\n # create the digest - this is a 'tall' to 'wide' transform.\n # Example Input:\n # DATE PRICE VOLUME\n # 2019-12-02 100 5000\n # 2019-12-03 200 6000\n #\n # Example Output:\n # MEASURE 2019-12-01 2019-12-03\n # PRICE 100 200\n # VOLUME 5000 6000\n #\n metrics = ['CLOSE', 'VOLUME']\n digest = digest.melt(\n id_vars='LABEL',\n value_vars=metrics,\n var_name='METRIC'\n )\n digest = digest.pivot(index='METRIC', columns='LABEL', values='value')\n\n # prefix the as-of-date values\n as_of_label = datetime.datetime.strftime(as_of_date, '%a %d %b %y')\n as_of_close = f'{as_of_close:,.0f}'\n as_of_vol = ballpark.business(as_of_vol)\n digest.insert(loc=0, column=as_of_label, value=[as_of_close, as_of_vol])\n\n # prefix the symbol\n digest.insert(loc=0, column='SYMBOL', value=[symbol]*len(metrics))\n\n digest = digest.reset_index()\n return digest\n\n @staticmethod\n def alpha_vantage_download(symbol: str, api_key: str, historical=False):\n def str_to_date(d: str):\n if len(d) != 10:\n raise ValueError('Expected string of length 10 (yyyy-mm-dd)')\n return datetime.date(int(d[0:4]), int(d[5:7]), int(d[8:10]))\n\n def to_pandas(vantage_data, ts_name):\n pandas_data = []\n for date, item in vantage_data[ts_name].items():\n pandas_data.append({\n 'MKT_SYMBOL': symbol,\n 'MKT_DATE': str_to_date(date),\n 'MKT_OPEN': float(item['1. open']),\n 'MKT_HIGH': float(item['2. high']),\n 'MKT_LOW': float(item['3. low']),\n 'MKT_CLOSE': float(item['4. close']),\n 'MKT_VOLUME': float(item['5. volume'])\n })\n return pd.DataFrame(pandas_data)\n\n def get_vantage_data(vantage_function):\n url = \"https://www.alphavantage.co/query\"\n params = urllib.parse.urlencode({\n 'function': vantage_function,\n 'outputsize': 'full' if historical else 'compact',\n 'symbol': symbol,\n 'apikey': api_key})\n response = requests.get(url, params)\n if not response.ok:\n raise RuntimeError(f'Alpha Vantage request failed with status ({response.status_code}). '\n f'URL was {url} and params were {params}')\n doc = response.json()\n return doc\n\n return to_pandas(get_vantage_data('TIME_SERIES_DAILY'), 'Time Series (Daily)')\n","sub_path":"finapps/stocks/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":8110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"21462610","text":"\"\"\"\nweight 행렬에 경사 하강법(gradient descent) 적용\n\"\"\"\nimport numpy as np\nfrom ch03.ex11 import softmax\nfrom ch04.ex03 import cross_entropy\nfrom ch04.ex05 import numerical_gradient\n\n\nclass SimpleNetwork:\n def __init__(self):\n np.random.seed(1230)\n self.W = np.random.randn(2,3) # randn : 정규분포(normal distribution)를 따르는 랜덤 넘버 생성\n # -> 가중치 행렬의 초기값들을 임의로 설정\n\n def predict(self, x):\n z = x.dot(self.W)\n y = softmax(z)\n return y\n\n def loss(self, x, y_true):\n \"\"\"\n 손실함수(loss function) - 분류문제(cross entropy)\n \"\"\"\n y_pred = self.predict(x)\n ce = cross_entropy(y_pred, y_true)\n return ce\n\n def gradient(self, x, t):\n \"\"\" W 행렬에 대한 손실함수의 기울기 \"\"\"\n fn = lambda W: self.loss(x, t)\n return numerical_gradient(fn, self.W)\n\n\nif __name__ == '__main__':\n network = SimpleNetwork()\n print('W:', network.W)\n\n # x = [0.6, 0.9]일 때, y_true = [0, 0, 1]이라고 가정\n x = np.array([0.6, 0.9])\n y_true = np.array([0.0, 0.0, 1.0])\n print('x :', x)\n print('y_true :', y_true)\n\n # y_pred = network.predict(x)\n # print('y_pred :', y_pred)\n #\n # ce = network.loss(x, y_true)\n # print('cross entropy :', ce)\n #\n # # y_pred값이 y_true값에 근사할 수 있도록 만드는 W 행렬의 값을 찾아야 함\n # # -> y_pred와 y_true값의 오차를 이용한 cross entropy를 줄여나가는 gradient를 계산하면 됨\n #\n # g1 = network.gradient(x, y_true)\n # print('g1 :', g1) # 결과: (2,3)행렬, 각 W값의 변화율\n # print()\n #\n # lr = 0.1\n # network.W -= lr * g1\n # print('W :', network.W)\n # print('y_pred:', network.predict(x))\n # print('ce =', network.loss(x, y_true))\n\n # 55 ~ 63번째 라인을 for문안에서 100번 반복\n lr = 0.6\n step = 100\n for i in range(step):\n gradient = network.gradient(x, y_true)\n network.W -= lr * gradient\n print(f'\\n>>> {i+1}번째 시행')\n print('gradient :\\n', gradient)\n print('W :\\n', network.W)\n print('y_pred :', network.predict(x))\n print('cross entropy :', network.loss(x, y_true))\n print(f'max probability : {network.predict(x).argmax()+1}번째')\n","sub_path":"ch04/ex08.py","file_name":"ex08.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"204116557","text":"# NESTED LOOP\n\nlistku = [\n ['a', 'b', 'c'],\n ['d', 'e', 'f'],\n ['g', 'h', 'i']\n]\n\n# mengakses list di dalam list\n\nfor i in listku: # mengakses ketiga baris di dalam list\n for y in i: # mengakses ketiga elemen di dalam baris\n print(y)\n\n#list di dalam list di dalam list\ndata = [\n [\n ['Andi', 'Budi', 'Caca'],\n ['Deni', 'Euis', 'Fafa'],\n ['Gigi', 'Hani', 'Inne']\n ],\n [\n ['Janu', 'Koko', 'Lani'],\n ['Momo', 'Nina', 'Opik'],\n ['Peni', 'Qiqi', 'Rogi']\n ],\n\n]\n\nfor listku in data:\n for baris in listku:\n for elemen in baris:\n print(elemen)\n\n\n\nprint()","sub_path":"Purwadhika 05-jumat/0.py","file_name":"0.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"49277667","text":"'''\nAuthor: Vikram Iyer\nGithub: https://github.com/vikramriyer\nLinkedIn: https://linkedin.com/in/vikramriyer\nMedium: https://medium.com/@TheLariat\n\nProblem Statement:\nGiven 2 arrays of integers, which x and y cordinates of a point in a grid,\nfind the min distance it takes to go from x[0],y[0] to A[-1],B[-1] where are the\n2 arrays\n\nConstraint: from a position x, y there are 8 moves possible which take 1 unit time\nx, y+1\nx, y-1\nx+1, y\nx-1, y\nx+1, y-1\nx-1, y+1\nx+1, y+1\nx-1, y-1\n\nExamples:\nInput:\nA = [0, 1, 1]\nB = [0, 1, 2]\nOutput: 2\n\nInput:\nA = [0, 1, 2]\nB = [0, 2, 2]\nOutput: 3\n\nSolution Approach: 1\n\n'''\nimport unittest\n\ndef sol_n2(A, B):\n pass\n '''\n 1. for each coordinate,\n check if the next point is_one_unit_away(),\n if yes:\n then increment count and continue\n else:\n while loop till is_one_unit_away():\n find_a_point_closest_to_next_coordinate()\n increment counter\n '''\n\ndef sol_n(A, B):\n '''\n Pseudo Code::\n for each of the coordinate:\n find the individual distance from the current point to next point\n then take the max of the above individual distances and add to final result\n '''\n n= len(A)\n result = 0\n for i in range(0,n-1):\n min_steps_in_A = abs(A[i]-A[i+1])\n min_steps_in_B = abs(B[i]-B[i+1])\n result += max(min_steps_in_A, min_steps_in_B)\n return result\n\nclass ArrayRotationTest(unittest.TestCase):\n def test_1(self):\n result = sol_n([0,1,1],[0,1,2])\n expected = 2\n self.assertEqual(result, expected)\n result = sol_n([0,2,2],[0,2,3])\n expected = 3\n self.assertEqual(result, expected)\n result = sol_n([0,8,9],[0,5,10])\n expected = 13\n self.assertEqual(result, expected)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"a100DaysOfCode/day6_code/min_steps_in_infinite_grid.py","file_name":"min_steps_in_infinite_grid.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"271227000","text":"#!/usr/bin/env python\n\nimport logging\nfrom pynput import keyboard\n\n\nimport numpy as np\nfrom pycrazyswarm import *\nimport sys\nimport signal\n\nimport rospy\nfrom std_msgs.msg import String\n\n\n\nZ = 0.3\nsleepRate = 30\n\ndef signal_handler(signal, frame):\n\tsys.exit(0)\n\nclass GestureDrone:\n\n def __init__(self, cf):\n self.cf = cf\n self.velocity = 0.75\n self.ang_velocity = 120\n self.takeoff_height = 0.3\n\n self.sleeptime = 0.5\n self.msg= ''\n #self.max_hight = 0.8\n #self.hight = 0.0\n print ('Press u to take off!')\n self.listener()\n\n\n\n\n def signal_callback(self, msg):\n\n # if signal == 'm': #fix_position\n # self.cf.goTo(self.cf.position(), yaw=0, duration=0.5)\n\n # if signal == 'w': #start_forward\n # self.cf.cmdVelocityWorld(np.array([self.velocity, 0, 0]), yawRate=0)\n if msg.data == 'THREE' :#start_back\n self.cf.cmdVelocityWorld(np.array([-self.velocity, 0, 0]), yawRate=0)\n \n if msg.data == 'TWO': #start_forward\n self.cf.cmdVelocityWorld(np.array([self.velocity, 0, 0]), yawRate=0)\n \n #if msg.data == 'FIVE' :#start_up\n # self.cf.cmdVelocityWorld(np.array([0, 0, self.velocity]), yawRate=0)\n\n if msg.data == 'FOUR': #start_down\n self.cf.cmdVelocityWorld(np.array([0, 0, -self.velocity]), yawRate=0)\n\n # if signal == 'd': #start_right\n # self.cf.cmdVelocityWorld(np.array([0, -self.velocity, 0]), yawRate=0)\n # if signal == 'c': #start_down\n # self.cf.cmdVelocityWorld(np.array([0, 0, -self.velocity]), yawRate=0)\n # if signal == 'z': #start_up\n # self.cf.cmdVelocityWorld(np.array([0, 0, self.velocity]), yawRate=0)\n\n\n if msg.data == '':\n print(\"fixed\")\n #print('Kill engines')\n\t #cf.cmdStop()\n self.cf.cmdVelocityWorld(np.array([0, 0, 0]), yawRate=0)\n #return False\n\n #if key.char == 'q':\n # self.cf.start_turn_left(self.ang_velocity)\n #if key.char == 'e':\n # self.cf.start_turn_right(self.ang_velocity)\n\n # def on_release (self, key):\n # self.cf.cmdVelocityWorld(np.array([0, 0, 0]), yawRate=0)\n\n def slide_callback(self, msg):\n\n if msg.data == 'SLIDE RIGHT' :#start_right\n self.cf.cmdVelocityWorld(np.array([0, -self.velocity, 0]), yawRate=0)\n\n if msg.data == 'SLIDE LEFT' :#start_left\n self.cf.cmdVelocityWorld(np.array([0, self.velocity, 0]), yawRate=0)\n \n if msg.data == 'SLIDE UP': #take_off\n print (\"takeoff\")\n self.cf.takeoff(targetHeight=self.takeoff_height, duration=1.0)\n\n if msg.data == 'SLIDE DOWN': #land\n print (\"land\")\n self.cf.land(0.05, duration=1.0)\n\n if msg.data == '':\n print(\"fixed\")\n self.cf.cmdVelocityWorld(np.array([0, 0, 0]), yawRate=0)\n \n\n def listener(self):\n #rospy.init_node('drone_RTcommands', anonymous=True)\n handsignal_subscriber = rospy.Subscriber('/hand/signal', String, self.signal_callback)\n handslide_subscriber = rospy.Subscriber('/hand/direction', String, self.slide_callback)\n #cf.cmdVelocityWorld(np.array([self.velocity, 0, 0]), yawRate=0)\n rospy.spin()\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nif __name__ == '__main__':\n\n swarm = Crazyswarm()\n timeHelper = swarm.timeHelper\n allcfs = swarm.allcfs\n\n #drone = KeyboardDrone(allcfs.crazyflies[0])\n #with keyboard.Listener(on_press=drone.on_press, on_release=drone.on_release) as listener:\n # listener.join()\n drone = GestureDrone(allcfs.crazyflies[0])\n\n #try:\n #Testing our function\n #rospy.init_node('drone_RTcommands', anonymous=True)\n #handsignal_subscriber = rospy.Subscriber('/hand/signal', String, signal_callback())\n #handslide_subscriber = rospy.Publisher('/hand/direction', String, queue_size=10)\n #handforward_publisher = rospy.Publisher('/hand/forward', String, queue_size=10)\n\n # execute()\n\n #except rospy.ROSInterruptException: pass\n","sub_path":"ros_ws/src/crazyswarm/scripts/backup/joyFromGesture.py","file_name":"joyFromGesture.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"204562598","text":"import PyQt5\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pybox2d\nimport logging\nfrom pybox2d import JointType\n\nimport pyqtgraph.parametertree.parameterTypes as pTypes\nfrom pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType\n\nfrom . tools import *\nfrom . recording_renderer import *\ndef isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n\ndef same_point(a,b):\n a = pg.Point(a)\n b = pg.Point(b)\n return isclose(a.x(), b.x()) and isclose(a.y(), b.y())\n \n\nclass PgDebugDraw(pybox2d.DebugDraw):\n def __init__(self, canvas):\n super(PgDebugDraw,self).__init__(float_colors=False)\n\n self.canvas = canvas\n self.framework_settings = self.canvas.framework_settings\n self.painter = None\n self.option = None\n self.widget = None\n\n #self.ppm = ppm\n #self.ippm = 1.0 / self.ppm\n self.outline_width = 0.01\n self.segment_width = 0.01\n self._bounding_box = [ [0,0],[0,0]]\n\n self.joint_colors = {\n JointType.unknown_joint : (230, 25, 75),\n JointType.revolute_joint : (255,0, 0),\n JointType.prismatic_joint : (255, 225, 25),\n JointType.distance_joint : (0, 130, 200),\n JointType.pulley_joint : (245, 130, 48),\n JointType.mouse_joint : (145, 30, 180),\n JointType.gear_joint : (70, 240, 240),\n JointType.wheel_joint : (240, 50, 230),\n JointType.weld_joint : (210, 245, 60),\n JointType.friction_joint : (250, 190, 190),\n JointType.rope_joint : (0, 128, 128),\n JointType.motor_joint : (230, 190, 255),\n }\n\n def set_painter(self, painter, option, widget):\n self.painter = painter\n self.option = option\n self.widget = widget\n\n def reset_bounding_box(self):\n self._bounding_box = [ [0,0],[0,0]]\n\n def _update_bounding_box(self, p):\n for c in range(2):\n if p[c] < self._bounding_box[0][c]:\n self._bounding_box[0][c] = p[c]\n\n if p[c] > self._bounding_box[1][c]:\n self._bounding_box[1][c] = p[c]\n\n def draw_solid_circle(self, center, radius, axis, color):\n\n\n painter = self.painter\n with SaveRestore(painter):\n \n p = QtCore.QPointF(center[0],center[1])\n\n\n \n with SaveRestore(painter):\n brush_color = QtGui.QColor(*color)\n pen = QtGui.QPen(brush_color, self.outline_width, QtCore.Qt.SolidLine)\n painter.setBrush(QtGui.QBrush(brush_color))\n painter.setPen(pen)\n painter.drawEllipse(p, radius, radius)\n\n # with SaveRestore(painter):\n # axis_color = QtGui.QColor(*[30.0 * c for c in color])\n # pen = QtGui.QPen(axis_color, self.outline_width, QtCore.Qt.SolidLine)\n # painter.setPen(pen)\n # raxis = [float(radius)*float(a) for a in axis]\n # painter.drawLine( pg.Point(center), \n # pg.Point(center[0] + raxis[0], center[1] +raxis[1]))\n \n def draw_circle(self, center, radius, color):\n\n\n color = QtGui.QColor(*color)\n\n p = QtCore.QPointF(*center)\n pen = QtGui.QPen(color, self.outline_width, QtCore.Qt.SolidLine)\n with SaveRestore(self.painter):\n self.painter.setPen(pen)\n self.painter.drawEllipse(p, radius, radius)\n\n def draw_segment(self,v1, v2, color):\n\n\n color = QtGui.QColor(*color)\n pen = QtGui.QPen(color, self.outline_width)\n if True:#not same_point(v1,v2):\n with SaveRestore(self.painter):\n self.painter.setPen(pen)\n qline = QtCore.QLineF(pg.Point(v1), pg.Point(v2))\n #print(v1, v2, qline)\n self.painter.drawLine(qline)\n \n def draw_polygon(self,vertices, color):\n\n\n color = QtGui.QColor(*color)\n pen = QtGui.QPen(color, self.outline_width)#, QtCore.Qt.SolidLine)\n with SaveRestore(self.painter):\n self.painter.setPen(pen)\n self.painter.drawConvexPolygon(numpy_to_qpoly(vertices))\n\n def draw_solid_polygon(self,vertices, color):\n\n brush_color = QtGui.QColor(*color)\n pen_color = brush_color\n\n pen = QtGui.QPen(pen_color, self.outline_width)#, QtCore.Qt.SolidLine)\n brush = QtGui.QBrush(brush_color)\n with SaveRestore(self.painter):\n self.painter.setBrush(brush)\n self.painter.setPen(pen)\n self.painter.drawConvexPolygon(numpy_to_qpoly(vertices))\n\n def draw_particles(self, centers, radius, colors=None):\n #print(\"draw\")\n painter = self.painter\n if True:#colors is None or not self.framework_settings.draw_colored_particles:\n color = (1,0,0)\n brush_color = QtGui.QColor(*[255.0 * c for c in color])\n pen_color = brush_color\n pen = QtGui.QPen(pen_color, radius, QtCore.Qt.SolidLine)\n brush = QtGui.QBrush(brush_color)\n\n\n with SaveRestore(painter):\n #pen.setCapStyle(QtCore.Qt.RoundCap);\n painter.setBrush(QtGui.QBrush(brush_color))\n painter.setPen(pen)\n #print(centers)\n painter.drawPoints(numpy_to_qpoly(centers))\n else:\n with SaveRestore(painter):\n for center,color in zip(centers,colors):\n\n pen_color = QtGui.QColor(color)\n pen = QtGui.QPen(pen_color, radius, QtCore.Qt.SolidLine)\n painter.setPen(pen)\n \n painter.drawPoints(numpy_to_qpoly(centers))\n\n def draw_joint(self, joint):\n \n anchor_a = joint.anchor_a\n anchor_b = joint.anchor_b\n joint_color = self.joint_colors[joint.type]\n\n color = QtGui.QColor(*joint_color)\n pen = QtGui.QPen(color, self.outline_width*3, QtCore.Qt.SolidLine)\n with SaveRestore(self.painter):\n self.painter.setPen(pen)\n if not same_point(anchor_a, anchor_b):\n self.painter.drawLine(pg.Point(anchor_a), pg.Point(anchor_b))\n\n anchor_rad = self.outline_width * 5\n pen = QtGui.QPen(color, self.outline_width, QtCore.Qt.SolidLine)\n\n self._update_bounding_box([c + anchor_rad for c in anchor_a])\n self._update_bounding_box([c - anchor_rad for c in anchor_b])\n\n with SaveRestore(self.painter):\n self.painter.setPen(pen)\n self.painter.drawEllipse(pg.Point(anchor_a), anchor_rad, anchor_rad)\n self.painter.drawEllipse(pg.Point(anchor_b), anchor_rad, anchor_rad)\n\nclass PgBatchDebugDraw(pybox2d.BatchDebugDraw):\n def __init__(self, debug_draw_graphics_object):\n super(PgBatchDebugDraw, self).__init__()\n\n self.debug_draw_graphics_object = debug_draw_graphics_object\n self.painter = None\n self.paint_option = None\n self.widget = None\n\n #self.ppm = ppm\n #self.ippm = 1.0 / self.ppm\n self.outline_width = 0.01\n self.segment_width = 0.01 \n\n def set_painter(self, painter, option, widget):\n self.painter = painter\n self.paint_option = option\n self.widget = widget\n\n def drawing_aabb(self, aabb):\n #print(\"aabb\",aabb)\n lower_bound = aabb.lower_bound\n upper_bound = aabb.upper_bound\n shape = upper_bound - lower_bound \n \n # enlarge bb\n extended_lower_bound = lower_bound - shape/2.0\n extended_upper_bound = lower_bound + shape/2.0\n extended_shape = extended_upper_bound - extended_lower_bound \n rect = QtCore.QRectF(pg.Point(extended_lower_bound), pg.Point(extended_shape))\n self.debug_draw_graphics_object._bounding_rect = rect\n\n def draw_solid_polygons(self, points, connect, color):\n if points.size > 0:\n path = QtGui.QPainterPath()\n path.moveTo(pg.Point(points[0,:]))\n path2 = pg.arrayToQPath(x=points[:,0], y=points[:,1], connect=connect)\n path.addPath(path2)\n painter = self.painter\n with SaveRestore(painter):\n path.setFillRule(QtCore.Qt.WindingFill)\n brush_color = QtGui.QColor(*[255.0 * c for c in color])\n pen_color = QtGui.QColor(*[100.0 * c for c in color])\n pen = QtGui.QPen(pen_color, self.outline_width)#, QtCore.Qt.SolidLine)\n brush = QtGui.QBrush(brush_color)\n painter.setPen(pen)\n painter.setBrush(brush)\n path.moveTo(0,0)\n painter.drawPath(path)\n\n def draw_polygons(self, points, connect, color):\n path = pg.arrayToQPath(x=points[:,0], y=points[:,1], connect=connect)\n painter = self.painter\n with SaveRestore(painter):\n pen_color = QtGui.QColor(*[255.0 * c for c in color])\n pen = QtGui.QPen(pen_color, self.outline_width)#, QtCore.Qt.SolidLine)\n painter.setPen(pen)\n path.moveTo(0,0)\n #painter.setBrush(QtCore.Qt.NoBrush)\n painter.drawPath(path)\n\n\n def draw_segments(self, points, connect, color):\n path = pg.arrayToQPath(x=points[:,0], y=points[:,1], connect=connect)\n painter = self.painter\n with SaveRestore(painter):\n pen_color = QtGui.QColor(*[255.0 * c for c in color])\n pen = QtGui.QPen(pen_color, self.outline_width)#, QtCore.Qt.SolidLine)\n painter.setPen(pen)\n #painter.setBrush(pen_color)\n painter.drawPath(path)\n\n def draw_particles(self, centers, radius, colors=None):\n #print(\"draw parts\")\n painter = self.painter\n if True:#colors is None or not self.framework_settings.draw_colored_particles:\n color = (1,0,0)\n brush_color = QtGui.QColor(*[255.0 * c for c in color])\n pen_color = brush_color\n pen = QtGui.QPen(pen_color, radius, QtCore.Qt.SolidLine)\n brush = QtGui.QBrush(brush_color)\n\n\n with SaveRestore(painter):\n #pen.setCapStyle(QtCore.Qt.RoundCap);\n painter.setBrush(QtGui.QBrush(brush_color))\n painter.setPen(pen)\n #print(centers)\n painter.drawPoints(numpy_to_qpoly(centers))\n else:\n with SaveRestore(painter):\n for center,color in zip(centers,colors):\n\n pen_color = QtGui.QColor(color)\n pen = QtGui.QPen(pen_color, radius, QtCore.Qt.SolidLine)\n painter.setPen(pen)\n \n painter.drawPoints(numpy_to_qpoly(centers))\n\n def draw_circles(self, centers, radii, color):\n pass\n\nclass DebugDrawGraphicsObject(pg.GraphicsObject):\n def __init__(self, parent = None):\n pg.GraphicsObject.__init__(self,parent)\n self.framework = None\n self.world = None\n self.framework_settings = None\n \n # debug draw\n self.debug_draw = PgDebugDraw(self)\n \n\n # batch debug draw\n self.batch_debug_draw = PgBatchDebugDraw(self)\n\n\n # RECORDING RENDERER\n self.recording_renderer = RecordingRenderer()\n\n self._build_param()\n \n self.not_run = True\n self._bounding_rect = None\n self._supress_events = False\n\n def set_example(self, example):\n\n self.framework = example\n self.world = self.framework.world\n self.framework_settings = self.framework.framework_settings\n\n self.world.set_batch_debug_draw(self.batch_debug_draw)\n #self.world.set_debug_draw(self.debug_draw)\n\n # init shape bits\n self._supress_events = True\n self._init_param_values()\n self._supress_events = False\n self.on_debug_draw_bits_changed()\n\n self.not_run = True\n self._bounding_rect = None\n\n flags = ['particle']\n self.debug_draw.append_flags(flags)\n\n\n def _init_param_values(self):\n\n assert self.framework is not None, \"framework is None (maybe missing set_example?)\"\n fms = self.framework_settings\n batch_draw_debug_data_opts = self.batch_debug_draw.options\n draw_bit_param = self.parameter.param('draw_bits')\n draw_bit_param.param('draw shapes').setValue(fms.draw_shapes)\n draw_bit_param.param('draw joints').setValue( fms.draw_joints)\n draw_bit_param.param('draw aabb').setValue(fms.draw_aabbs)\n draw_bit_param.param('draw pairs').setValue(fms.draw_pairs)\n draw_bit_param.param('draw center of mass').setValue(fms.draw_coms)\n draw_bit_param.param('draw particle').setValue(fms.draw_particles)\n\n batch_draw_debug_data_opts.draw_shapes = fms.draw_shapes\n batch_draw_debug_data_opts.draw_joints = fms.draw_joints\n batch_draw_debug_data_opts.draw_aabbs = fms.draw_aabbs\n batch_draw_debug_data_opts.draw_coms = fms.draw_coms\n batch_draw_debug_data_opts.draw_particles = fms.draw_particles\n\n def _build_param(self):\n #fms = self.framework_settings\n #batch_draw_debug_data_opts = self.batch_debug_draw.options\n params = [\n {'name': 'draw_bits', 'type': 'group', 'children': \n [\n {'name': 'draw shapes', 'type': 'bool', 'value': False},\n {'name': 'draw joints', 'type': 'bool', 'value': False},\n {'name': 'draw aabb', 'type': 'bool', 'value': False},\n {'name': 'draw pairs', 'type': 'bool', 'value': False},\n {'name': 'draw center of mass', 'type': 'bool', 'value': False},\n {'name': 'draw particle', 'type': 'bool', 'value': False},\n ]\n },\n {'name': 'particles', 'type': 'group', 'children': \n [\n {'name': 'colored', 'type': 'bool', 'value': False},\n ]\n }\n ]\n\n # batch_draw_debug_data_opts.draw_shapes = fms.draw_shapes\n # batch_draw_debug_data_opts.draw_joints = fms.draw_joints\n # batch_draw_debug_data_opts.draw_aabbs = fms.draw_aabbs\n # batch_draw_debug_data_opts.draw_coms = fms.draw_coms\n\n self.parameter = Parameter.create(name='Debug Draw', type='group', children=params)\n draw_bit_param = self.parameter.param('draw_bits')\n for child in draw_bit_param.children():\n child.sigValueChanged.connect(self.on_debug_draw_bits_changed)\n\n def on_debug_draw_bits_changed(self):\n if not self._supress_events:\n fms = self.framework_settings\n batch_draw_debug_data_opts = self.batch_debug_draw.options\n draw_bit_param = self.parameter.param('draw_bits')\n self.debug_draw.clear_flags(['shape','joint','aabb','pair','center_of_mass','particle'])\n\n flags = []\n\n \n fms.draw_shapes = draw_bit_param.param('draw shapes').value()\n fms.draw_joints = draw_bit_param.param('draw joints').value()\n fms.draw_aabbs = draw_bit_param.param('draw aabb').value()\n fms.draw_pairs = draw_bit_param.param('draw pairs').value()\n fms.draw_coms = draw_bit_param.param('draw center of mass').value()\n fms.draw_particles = draw_bit_param.param('draw particle').value()\n\n\n batch_draw_debug_data_opts.draw_shapes = fms.draw_shapes\n batch_draw_debug_data_opts.draw_joints = fms.draw_joints\n batch_draw_debug_data_opts.draw_aabbs = fms.draw_aabbs\n batch_draw_debug_data_opts.draw_coms = fms.draw_coms\n batch_draw_debug_data_opts.draw_particles = fms.draw_particles\n print(batch_draw_debug_data_opts.draw_particles)\n #if(draw_bit_param.param('draw shapes').value()):\n # flags.append('shape')\n #if(draw_bit_param.param('draw joints').value()):\n # flags.append('joint')\n #if(draw_bit_param.param('draw aabb').value()):\n # flags.append('aabb')\n #if(draw_bit_param.param('draw pairs').value()):\n # flags.append('pair')\n #if(draw_bit_param.param('draw center of mass').value()):\n # flags.append('center_of_mass')\n if(draw_bit_param.param('draw particle').value()):\n flags.append('particle')\n\n self.debug_draw.append_flags(flags)\n\n def physics_to_canvas(self, p):\n return (p[0], p[1])\n def canvas_to_physics(self, p):\n return (p[0], p[1])\n\n def paint_from_recording_renderer(self, painter, option, widget):\n rec = self.recording_renderer.recordings\n for center, radius, axis, c in rec['solid_circle']:\n self.debug_draw.draw_segment(center, radius, axis, c)\n for center, radius, c in rec['circle']:\n self.debug_draw.draw_segment(center, radius, c)\n for v1, v2, c in rec['segment']:\n self.debug_draw.draw_segment(v1, v2, c)\n\n self.recording_renderer.reset_recordings()\n\n\n\n\n\n def paint(self, painter, option, widget):\n current_pixel_size = self.pixelSize()\n p = 0.5 * current_pixel_size[0] + 0.5 * current_pixel_size[1]\n\n self.debug_draw.outline_width = (p) * 1.0\n self.debug_draw.segment_width = (p) * 1.0\n self.batch_debug_draw.outline_width = (p) * 1.0\n self.batch_debug_draw.segment_width = (p) * 1.0\n\n\n with SaveRestore(painter):\n self.debug_draw.set_painter(painter, option, widget)\n self.batch_debug_draw.set_painter(painter, option, widget)\n\n self.debug_draw.reset_bounding_box()\n self.world.draw_debug_data()\n self.world.batch_draw_debug_data()\n self.not_run = False\n\n self.paint_from_recording_renderer(painter, option, widget)\n\n def boundingRect(self):\n if self._bounding_rect is None:\n return QtCore.QRectF()\n else:\n return self._bounding_rect\n","sub_path":"liquidfun/Box2D/pybox2d/testbed/framework/framework/pg_gui/debug_draw.py","file_name":"debug_draw.py","file_ext":"py","file_size_in_byte":18204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"416503860","text":"import sys\n\n# function used to tidy up the display of values\ndef tidy_output(val):\n # if value has a trailing zero, remove it\n if int(val) - val == 0:\n return int(val)\n \n # else return to 2.d.p\n return round(val,2)\n\n# function to calculate the lbtt \n# this could be done recursively \ndef calculate_basic_lbtt(price):\n # first do error checking on type and value for cases where prior validation has not occurred\n if type(price) not in [float, int]:\n raise TypeError('The price must be either an integer or a float, no strings are permitted')\n \n if price < 0:\n raise ValueError('The price cannot be negative')\n \n # define the tax bounds and the tax associated with each bound\n tax_bounds = {250000:0.05, 325000:0.1, 750000:0.12}\n # extract just the bound values and reverse their order as shown in the dictionary above\n bound_vals = list(tax_bounds.keys())\n bound_vals.reverse()\n \n # check the price against the first bound value\n if price <= bound_vals[len(bound_vals)-1]:\n return 0 # base case, don't iterate if this is the case, negative values are also caught here if not validated\n \n # base case not satisfied so now we iterate down the tax bounds from greatest to smallest\n total_tax = 0\n for idx in range(0, len(bound_vals)):\n # if the price is greater than the bound, \n # tax all of the value above that bound to get the difference in tax for that bound\n # then add the result to the running tax\n # update the new price to be the value of the current bound, so that for all lower bounds, we tax the full amount\n # for each bound until we reach the last one, updating the total tax as we go\n if price > bound_vals[idx]:\n difference = price - bound_vals[idx]\n price = bound_vals[idx]\n total_tax += difference*tax_bounds[bound_vals[idx]] \n \n # return the final tax to 2.d.p\n return round(total_tax, 2)\n\n# function used to validate the price\ndef validate_price(price):\n try:\n # try converting the price to a float and check the value is positive\n purchase_amount = float(price)\n if purchase_amount > 0:\n # valid price\n return True\n else:\n # not greater than 0\n return False\n except ValueError:\n # not a float / int\n return False \n\n# used to recieve input from the user if it is not already provided\ndef recieve_input():\n valid = False\n purchase_amount = 0\n # validate the input to ensure it is a number\n # keep asking until a valid value is given\n while not valid:\n purchase_amount = input(\"\\nPlease enter the purchase price of the house: \")\n # validate the price, ask again is invalid\n if validate_price(purchase_amount):\n purchase_amount = float(purchase_amount)\n valid = True\n else:\n print(\"The price entered was not valid, please enter a positive number.\") \n \n purchase_amount = round(purchase_amount, 2) # ensure it is to 2.d.p for pennies\n return purchase_amount\n\n# enter the program logic here\nif __name__ == \"__main__\":\n # get the input price, first test if a price is provided in the program arguments\n price = 0\n if len(sys.argv) == 2:\n # there is a program argument\n price = sys.argv[1]\n if validate_price(price):\n # if the provided price is valid, use it and ensure it is a float\n price = float(price)\n else:\n # it is not valid so get the user to enter is manually\n print(\"The provided argument was not valid.\")\n price = recieve_input()\n else:\n # no provided argument so get user input\n price = recieve_input()\n \n # with a valid house purchase price, calculate the tax\n lbtt = calculate_basic_lbtt(price)\n # display the tax and tidy the output to remove trailing decimal zeros / round to 2.d.p\n print(\"The tax on the property valued at £{} comes to £{}\".format(tidy_output(price), tidy_output(lbtt)))\n \n # as a small extra, calculate the effective tax rate and then display it in a tidy manner\n effective_tax_rate = (lbtt/price)*100\n to_print = None\n if effective_tax_rate > 1:\n # print tax rate to 2.d.p\n to_print = round(effective_tax_rate, 2) \n else:\n # only print the first significant digit if tax rate below zero\n to_print = eval(\"%.0e\" % (effective_tax_rate))\n \n print(\"The effective tax rate is {}%\".format(to_print))","sub_path":"lbtt_calc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"167545818","text":"import numpy as np\nfrom nilearn.image import load_img\nfrom brainiak.searchlight.searchlight import Searchlight\nfrom scipy import stats\nimport nibabel as nib\n\n# Take subject ID as input\nsubjs = ['MES_022817_0','MES_030217_0','MES_032117_1','MES_040217_0','MES_041117_0','MES_041217_0','MES_041317_0','MES_041417_0','MES_041517_0','MES_042017_0','MES_042317_0','MES_042717_0','MES_050317_0','MES_051317_0','MES_051917_0','MES_052017_0','MES_052017_1','MES_052317_0','MES_052517_0','MES_052617_0','MES_052817_0','MES_052817_1','MES_053117_0','MES_060117_0','MES_060117_1']\n\ndatadir = '/jukebox/norman/jamalw/MES/'\nmask_img = load_img(datadir + 'data/MNI152_T1_2mm_brain_mask.nii')\nmask_img = mask_img.get_data()\nglobal_outputs_all = np.zeros((91,109,91,len(subjs)))\n\n\n# Definte function that takes the difference between within vs. between genre comparisons\ndef corr2_coeff(AB,msk,myrad,bcast_var):\n if not np.all(msk):\n return None\n A,B = (AB[0], AB[1])\n A = A.reshape((-1,A.shape[-1]))\n B = B.reshape((-1,B.shape[-1]))\n corr_eye = np.identity(8)\n corrAB = np.corrcoef(A.T,B.T)[16:,:16]\n classical_within = corrAB[0:8,0:8]\n ClassicalWithinAvgOn = np.mean(classical_within[corr_eye == 1])\n ClassicalBtwnAvgOff = np.mean(classical_within[corr_eye == 0])\n diff = ClassicalWithinAvgOn - ClassicalBtwnAvgOff\n\n # compute difference score for permuted matrices \n np.random.seed(0)\n diff_perm_holder = []\n for i in range(100):\n A_perm = np.random.permutation(A.T)\n B_perm = np.random.permutation(B.T)\n corr_eye = np.identity(8)\n corrAB_perm = np.corrcoef(A_perm,B_perm)[16:,:16]\n classical_within_perm = corrAB_perm[0:8,0:8]\n ClassicalWithinAvgOn_perm = np.mean(classical_within_perm[corr_eye == 1])\n ClassicalBtwnAvgOff_perm = np.mean(classical_within_perm[corr_eye == 0])\n diff_perm = ClassicalWithinAvgOn_perm - ClassicalBtwnAvgOff_perm\n diff_perm_holder.append(diff_perm)\n\n z = (diff - np.mean(diff_perm_holder))/np.std(diff_perm_holder)\n return z\n\nfor i in range(len(subjs)):\n # Load functional data and mask data\n data1 = load_img(datadir + 'subjects/' + subjs[i] + '/data/avg_reorder1.nii')\n data2 = load_img(datadir + 'subjects/' + subjs[i] + '/data/avg_reorder2.nii')\n data1 = data1.get_data()\n data2 = data2.get_data()\n\n np.seterr(divide='ignore',invalid='ignore')\n\n # Create and run searchlight\n sl = Searchlight(sl_rad=1,max_blk_edge=5)\n sl.distribute([data1,data2],mask_img)\n sl.broadcast(None)\n print('Running Searchlight...')\n global_outputs = sl.run_searchlight(corr2_coeff)\n global_outputs_all[:,:,:,i] = global_outputs \n \n# Plot and save searchlight results\nglobal_outputs_avg = np.mean(global_outputs_all,3)\nmaxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg,None)])\nminval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg,None)])\nglobal_outputs = np.array(global_outputs_avg, dtype=np.float)\nglobal_nonans = global_outputs[np.not_equal(global_outputs,None)]\nglobal_nonans = np.reshape(global_nonans,(91,109,91))\nmin1 = np.min(global_nonans[~np.isnan(global_nonans)])\nmax1 = np.max(global_nonans[~np.isnan(global_nonans)])\nimg = nib.Nifti1Image(global_nonans,np.eye(4))\nimg.header['cal_min'] = min1\nimg.header['cal_max'] = max1\nnib.save(img,'classical_within_permuted.nii.gz')\nnp.save('classical_within_permuted',global_nonans)\n\n\n#import matplotlib.pyplot as plt\n#for (cnt, img) in enumerate(global_outputs):\n #plt.imshow(img,vmin=minval,vmax=maxval)\n #plt.colorbar()\n #plt.savefig(datadir + 'searchlight_images/' + 'img' + str(cnt) + '.png')\n #plt.clf()\n\n\n","sub_path":"ClassicalSearchlight.py","file_name":"ClassicalSearchlight.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"304496230","text":"#!/usr/bin/python\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler\n\ndef plot_data(data):\n plt.figure(1)\n plt.subplot(211)\n plt.plot(data[2], data[0], 'ro')\n plt.subplot(212)\n plt.plot(data[2], data[1], 'ro')\n plt.show()\n\ndef ex1(file_path, alpha, epoch_range=100000, step=10000):\n raw_data = np.loadtxt(file_path, delimiter=',', dtype=int, unpack=True)\n m = len(raw_data[0])\n\n #plot_data(raw_data)\n\n #scale data\n data = np.column_stack((raw_data[0], raw_data[1]))\n scaler = MinMaxScaler(copy=False, feature_range=(0, 1))\n data = scaler.fit_transform(data)\n #data = tf.keras.utils.normalize(raw_data)\n\n # reshape\n train_y = np.reshape(raw_data[2], [m,1])\n train_x = np.reshape(np.column_stack((np.ones(m), data)), [m,3])\n\n # build tf model\n with tf.device('/cpu:0'):\n X = tf.placeholder(dtype=tf.float32, shape=[m, 3])\n Y = tf.placeholder(dtype=tf.float32, shape=[m, 1])\n W = tf.get_variable(\"theta\", shape=[3, 1], dtype=tf.float32)\n b = tf.Variable(m, dtype=tf.float32)\n\n pred = tf.matmul(X, W)\n J = tf.reduce_mean(tf.square(tf.subtract(pred,Y)))\n optimizer = tf.train.GradientDescentOptimizer(alpha).minimize(J)\n acc, acc_up = tf.metrics.accuracy(data['y'], pred)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(epoch_range):\n _, c, _ = sess.run([optimizer, J, acc_up], feed_dict={X: train_x, Y: train_y})\n if (epoch % step == 0):\n print(\"epoch: {}, cost={}, W={}, acc={}\".format(epoch, c, sess.run(W), sess.run(acc)))\n\n # predict price on original data(don't try this at home ;)\n for i in range(m):\n x1 = [[raw_data[0][i], raw_data[1][i]]]\n y = [raw_data[2][i]]\n x = scaler.transform(x1)\n x = [[1.0, x[0][0], x[0][1]]]\n print(\"predict:{} = {}/{}\".format(x1, sess.run(tf.matmul(x, W)), y))\n \nif __name__ == '__main__':\n ex1('ex1data2.txt', 0.005)\n","sub_path":"linear_regression/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"635612288","text":"class Mot(object):\n \"\"\"Classe représentant un mot sous forme d'ARN\"\"\"\n def __init__(self, chaine):\n super(Mot, self).__init__()\n self.chaine = chaine\n\n def __str__(self) :\n return self.chaine\n\n def __eq__(self, other) :\n return self.chaine == other.chaine\n\n def __hash__(self) :\n return hash(self.chaine)\n\n\n def reverse(self) :\n \"\"\"Retourne le reverse du Mot\"\"\"\n return Mot(self.chaine[::-1])\n\n def complem(self) :\n \"\"\"Retourne le complémentaire du Mot\"\"\"\n res = \"\"\n for lettre in self.chaine :\n if lettre == 'A' :\n res+='U'\n elif lettre == 'U' :\n res+='A'\n elif lettre == 'G' :\n res+='C'\n else :\n res+='G'\n return Mot(res)\n\n def revCompl(self) :\n \"\"\"Retourne le reverse complémentaire du Mot\"\"\"\n return self.complem().reverse()\n\n def algoNaif(self,mot,normal=True,reverse=False,complem=False,revCompl=False) :\n \"\"\"\n Renvoie l'indice des occurences du Mot donné dans le Mot courant.\n Utilise un algorithme de recherche naïf\n \"\"\"\n lesMots = []\n res = []\n\n #initialisation de la liste de mots à rechercher. (permet de prendre en compte les reverse et/ou complémentaires )\n if normal :\n lesMots.append(mot)\n if reverse :\n lesMots.append(mot.reverse())\n if complem :\n lesMots.append(mot.complem())\n if revCompl :\n lesMots.append(mot.revCompl())\n\n #algorithme naïf\n n = len(self.chaine)\n m = len(mot.chaine)\n\n j = 0\n for motTest in lesMots :\n for i in range(n-m+1) :\n trouve = True\n for j in range(m) :\n if not motTest.chaine[j] == self.chaine[i+j] :\n trouve = False\n break\n if trouve :\n res.append(i)\n\n return res\n\n def algoKMP(self,mot,normal=True,reverse=False,complem=False,revCompl=False) :\n \"\"\"\n Renvoie l'indice des occurences du Mot donné dans le Mot courant.\n Utilise un algorithme de recherche Knuth-Morris-Pratt\n \"\"\"\n lesMots = []\n res = []\n\n #initialisation de la liste de mots à rechercher. (permet de prendre en compte les reverse et/ou complémentaires )\n if normal :\n lesMots.append(mot)\n if reverse :\n lesMots.append(mot.reverse())\n if complem :\n lesMots.append(mot.complem())\n if revCompl :\n lesMots.append(mot.revCompl())\n\n #algorithme Knuth-Morris-Pratt\n n = len(self.chaine)\n m = len(mot.chaine)\n\n for motTest in lesMots :\n\n longestPrefixSuffixArray = self.getLongestPrefixSuffixArray(motTest)\n i = 0\n j = 0\n\n while i < n:\n if motTest.chaine[j] == self.chaine[i] :\n i += 1\n j += 1\n\n if j == m :\n res.append(i - j)\n j = longestPrefixSuffixArray[j - 1]\n\n elif i < n and motTest.chaine[j] != self.chaine[i] :\n if j != 0 :\n j = longestPrefixSuffixArray[j - 1]\n else :\n i += 1\n return res\n\n def getLongestPrefixSuffixArray(self, mot):\n \"\"\"\n Préparation du tableau utilisé par algoKMP\n \"\"\"\n n = len(self.chaine)\n m = len(mot.chaine)\n longestPrefixSuffixArray = [0] * m\n oldPrefixSuffixLength = 0\n\n i = 1\n\n while i < m :\n if mot.chaine[i] == mot.chaine[oldPrefixSuffixLength] :\n oldPrefixSuffixLength += 1\n longestPrefixSuffixArray[i] = oldPrefixSuffixLength\n i+=1\n else :\n if oldPrefixSuffixLength != 0 :\n oldPrefixSuffixLength = longestPrefixSuffixArray[oldPrefixSuffixLength - 1]\n else :\n longestPrefixSuffixArray[i] = 0\n i += 1\n\n return longestPrefixSuffixArray\n\n def occurencesMotsTailleN(self, N) :\n \"\"\"\n Retourne les occurences de tous les sous-mots de taille N présents dans le Mot courant\n Utilise un algorithme de recherche naïf\n \"\"\"\n n = len(self.chaine)\n mapMot = {}\n for i in range(n-N+1) :\n mot = Mot(self.chaine[i:i+N])\n if(not mot in mapMot ):\n mapMot[mot] = [-1]\n for mot in mapMot :\n mapMot[mot] = self.algoNaif(mot, normal=True, revCompl=True)\n return mapMot\n\n def occurencesMotsTailleNKMP(self, N) :\n \"\"\"\n Retourne les occurences de tous les sous-mots de taille N présents dans le Mot courant\n Utilise l'algorithme de recherche Knuth-Morris-Pratt\n \"\"\"\n n = len(self.chaine)\n mapMot = {}\n for i in range(n-N+1) :\n mot = Mot(self.chaine[i:i+N])\n if(not mot in mapMot ):\n mapMot[mot] = [-1]\n for mot in mapMot :\n mapMot[mot] = self.algoKMP(mot, normal=True, revCompl=True)\n return mapMot\n\n\ndef printMapMot(mapMot):\n \"\"\"Imprime une map contenant un mot et le nombre de ses occurences\"\"\"\n for key, value in mapMot.items() :\n print('key = ' + str(key) + '### value = ' + str(value) )\n\ndef printPlot(mapMot,pathname) :\n \"\"\"\n Écrit les coordonnées des occurences données dans le fichier \"pathname\"\n Permet de dessiner un dotplot\n \"\"\"\n f = open(pathname,'w')\n for mot,liste in mapMot.items() :\n for i in liste :\n for j in liste :\n f.write(str(i)+'\\t'+str(j)+'\\n')\n f.close()\n\ndef lecture(pathname) :\n \"\"\"Crée un Mot contenu dans le fichier pathname\"\"\"\n f = open(pathname,'r')\n #premiere ligne osef\n f.readline()\n chaine = \"\"\n for ligne in f :\n chaine += ligne.rstrip()\n f.close()\n # print(chaine)\n return Mot(chaine)\n\n\n\n\n# Tests\ndef testModifsMots() :\n \"\"\"Teste si les modificateurs du mot (reverse, complem, revCompl) agissent comme prévu\"\"\"\n print(\"Test modificateurs\")\n mot = Mot(\"ACAUAG\")\n print(\"chaine : ACAUAG\")\n print(\"reverse : \", mot.reverse())\n print(\"complem : \", mot.complem())\n print(\"revCompl : \", mot.revCompl())\n\ndef testAlgoNaif() :\n \"\"\"Teste l'algorithme de recherche naïf\"\"\"\n print(\"Test algoNaif\")\n mot = Mot(\"AA\")\n text = Mot(\"AACGUAACGGAA\")\n print(\"mot : \",mot)\n print(\"text : \", text)\n print (\"occurences : \",text.algoNaif(mot))\n\ndef testAlgoKMP() :\n \"\"\"Teste l'algorithme Knuth-Morris-Pratt\"\"\"\n print(\"Test algoKMP\")\n mot = Mot(\"AA\")\n text = Mot(\"AACGUAACGGAA\")\n print(\"mot : \",mot)\n print(\"text : \", text)\n print (\"occurences : \",text.algoKMP(mot))\n\ndef testLecture() :\n \"\"\"Teste si la lecture d'un fichier fonctionne\"\"\"\n print(\"Test Lecture\")\n mot = lecture(\"donneeTest.fasta\")\n print(mot)\n\ndef createPlotNaif(fileIn, fileOut, N) :\n \"\"\"\n Crée un dotplot fileOut contenant les occurences des sous-mots de taille N dans le fichier fileIn\n Utilise l'algorithme Naïf\n \"\"\"\n print(\"Création de DotPlot -- Naïf\", fileIn)\n print(\"Lecture du fichier\", fileIn)\n mot = lecture(fileIn)\n print(\"Recherche (Naïf) des sous mots de taille\", N)\n mapMot = mot.occurencesMotsTailleN(N)\n print(\"Écriture des résultats dans\", fileOut)\n printPlot(mapMot, fileOut)\n\ndef createPlotKMP(fileIn, fileOut, N) :\n \"\"\"\n Crée un dotplot fileOut contenant les occurences des sous-mots de taille N dans le fichier fileIn\n Utilise l'algorithme Knuth-Morris-Pratt\n \"\"\"\n print(\"Création de DotPlot -- KMP\", fileIn)\n print(\"Lecture du fichier\", fileIn)\n mot = lecture(fileIn)\n print(\"Recherche (KMP) des sous mots de taille\", N)\n mapMotKMP = mot.occurencesMotsTailleNKMP(N)\n print(\"Écriture des résultats dans\", fileOut)\n printPlot(mapMotKMP, fileOut)\n\n# # Décommenter ces lignes pour lancer les tests\n# testModifsMots()\n# testAlgoNaif()\n# testAlgoKMP()\n# testLecture()\n\n\n#createPlotNaif(\"data-mirna/ARNmessager-1.fasta\", \"occurencesARN.dat\", 6)\n#createPlotKMP(\"data-mirna/ARNmessager-1.fasta\", \"occurencesARNKMP.dat\", 6)\n\n#createPlotKMP(\"donneeTest.fasta\", \"occurencesARNKMP.dat\", 2)\n","sub_path":"Mot.py","file_name":"Mot.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"385150686","text":"from __future__ import absolute_import\n\nimport maya.cmds as cmds\n\nfrom rigging.library.base.face import blendshape as rlbf_blendshape\nfrom rigging.library.module import bipedModule as rlm_bipedModule, faceModule as rlm_faceModule\nfrom rigging.library.module.body import pushJointModule as rlmb_pushJointModule\nfrom rigging.library.utils import core as rlu_core\nfrom rigging.tools import utils as rt_utils\n\n# load Plug-ins\nrlu_core.load_matrix_quad_plugin()\n\n# build Spine\nprefix_spine = 'spine'\nprefix_spine_setup = 'spineSetup'\n\n# build Clavicle\nprefix_clav = 'clavicle'\n\n# build Arm\nprefix_arm = 'arm'\nprefix_upperArm = 'upperArm'\nprefix_forearm = 'forearm'\nprefix_wrist = 'wrist'\nprefix_elbow = 'elbow'\nprefix_hand = 'hand'\nprefix_arm_setup = 'armSetup'\nprefix_finger_setup = 'fingerSetup'\nprefix_palm = 'palm'\n\n# build Leg`\nprefix_leg = 'leg'\nprefix_upperLeg = 'upperLeg'\nprefix_lowerLeg = 'lowerLeg'\nprefix_ankle = 'ankle'\nprefix_knee = 'knee'\nprefix_ball = 'ball'\nprefix_toe = 'toe'\nprefix_leg_setup = 'legSetup'\nprefix_foot = 'foot'\n\n# build arm finger\nprefix_thumb = 'thumb'\nprefix_index = 'index'\nprefix_middle = 'middle'\nprefix_ring = 'ring'\nprefix_pinky = 'pinky'\n\nsj_prefix_value = ''\nss_prefix_value = 'Scale'\nsFk_prefix_value = 'Fk'\nsIk_prefix_value = 'Ik'\nsAdd_prefix_value = 'Expand'\n\nfk = 'Fk'\nik = 'Ik'\ndtl = 'Dtl'\nsuffix_controller = 'ctrl'\nsuffix_joint = 'jnt'\n\n# FACE\nprefix_jaw_jnt = 'jaw'\nprefix_head_low_jnt = 'headLow'\nprefix_mouth_jnt = 'mouth'\n\ncheek_low_prefix = 'cheekLow'\ncheek_mid_prefix = 'cheekMid'\ncheek_up_prefix = 'cheekUp'\ncheek_in_up_prefix = 'cheekInUp'\ncheek_in_low_prefix = 'cheekInLow'\ncheek_out_up_prefix = 'cheekOutUp'\ncheek_out_low_prefix = 'cheekOutLow'\n\njaw_prefix = 'jaw'\njaw_tip_prefix = 'jawTip'\n\nhead_prefix = 'head'\nneck_prefix = 'neck'\nneck_inbetween_prefix = 'neckInBtw'\nhead_up_prefix = 'headUp'\nhead_low_prefix = 'headLow'\n\ncolumella_prefix = 'columella'\neye_prefix = 'eye'\neye_aim_prefix = 'eyeAim'\npupil_prefix = 'pupil'\niris_prefix = 'iris'\n\nmentolabial_prefix = 'mentolabial'\nchin_prefix = 'chin'\near_prefix = 'ear'\n\nbrow_tw_prefix = 'browTw'\nbrow_in_prefix = 'browIn'\nbrow_mid_prefix = 'browMid'\nbrow_out_prefix = 'browOut'\nbrows_prefix = 'brow'\nbrow_tip_prefix = 'browTip'\nbrow_center_prefix = 'browCenter'\n\n\n# ======================================================================================================================\n# BIPED BODY CMD\n# ======================================================================================================================\n\ndef biped(clavicle_left=True,\n clavicle_right=True,\n arm_left=True,\n arm_right=True,\n leg_left=True,\n leg_right=True,\n foot_left=True,\n foot_right=True,\n detail_spine_deformer=True,\n detail_arm_deformer=True,\n detail_leg_deformer=True,\n number_arm_detail_ctrl=5,\n number_leg_detail_ctrl=5,\n left_side='LFT',\n right_side='RGT',\n thumb_left=True,\n index_left=True,\n middle_left=True,\n ring_left=True,\n pinky_left=True,\n thumb_right=True,\n index_right=True,\n middle_right=True,\n ring_right=True,\n pinky_right=True,\n game_bind_joint=False,\n size=1.0):\n ## CHECK IF THE RIG EXIST IN THE SCENE\n if cmds.objExists('anim_ctrl'):\n cmds.error('Please rid off the body or facial rig and including the nodes before run the script!')\n\n print('------------------------------')\n print('Biped base script is running...')\n print('------------------------------')\n\n # RUN THE RIG BUILD\n rlm_bipedModule.build_rig(clavicle_left=clavicle_left, clavicle_right=clavicle_right, arm_left=arm_left,\n arm_right=arm_right,\n prefix_spine=prefix_spine,\n prefix_spine_setup=prefix_spine_setup,\n prefix_upperArm=prefix_upperArm, prefix_forearm=prefix_forearm, prefix_clav=prefix_clav,\n prefix_arm=prefix_arm,\n prefix_wrist=prefix_wrist, prefix_elbow=prefix_elbow, prefix_hand=prefix_hand,\n prefix_arm_setup=prefix_arm_setup,\n leg_left=leg_left, leg_right=leg_right, foot_right=foot_right, foot_left=foot_left,\n prefix_upperLeg=prefix_upperLeg,\n prefix_lowerLeg=prefix_lowerLeg, prefix_leg=prefix_leg, prefix_ankle=prefix_ankle,\n prefix_knee=prefix_knee,\n prefix_ball=prefix_ball, prefix_toe=prefix_toe, prefix_foot=prefix_foot,\n prefix_leg_setup=prefix_leg_setup,\n detail_leg_def=detail_leg_deformer, num_leg_dtl_ctrl=number_leg_detail_ctrl - 2,\n prefix_thumb=prefix_thumb, prefix_index=prefix_index, prefix_middle=prefix_middle,\n prefix_ring=prefix_ring,\n prefix_finger_setup=prefix_finger_setup, prefix_pinky=prefix_pinky,\n prefix_palm=prefix_palm,\n detail_spine_deformer=detail_spine_deformer, detail_arm_deformer=detail_arm_deformer,\n number_arm_detail_ctrl=number_arm_detail_ctrl - 2,\n thumb_arm_LFT=thumb_left, index_arm_LFT=index_left, middle_arm_LFT=middle_left,\n ring_arm_LFT=ring_left,\n pinky_arm_LFT=pinky_left,\n thumb_arm_RGT=thumb_right, index_arm_RGT=index_right, middle_arm_RGT=middle_right,\n ring_arm_RGT=ring_right,\n pinky_arm_RGT=pinky_right,\n scale=size, side_LFT=left_side, side_RGT=right_side, sj_prefix_value=sj_prefix_value,\n ss_prefix_value=ss_prefix_value,\n sFk_prefix_value=sFk_prefix_value, sIk_prefix_value=sIk_prefix_value,\n sAdd_prefix_value=sAdd_prefix_value,\n fk=fk, ik=ik, detail=dtl, suffix_joint=suffix_joint,\n game_bind_joint=game_bind_joint\n )\n print('------------------------------')\n print('Biped base is done!')\n\n\n# ======================================================================================================================\n# HEAD AND FACE CMD\n# ======================================================================================================================\n\ndef face_biped( # LIP\n crv_up_lip_template='lipUpTmp_crv',\n crv_low_lip_template='lipLowTmp_crv',\n crv_up_lip_roll_template='lipUpRollTmp_crv',\n crv_low_lip_roll_template='lipLowRollTmp_crv',\n offset_jnt02_bind_lip_pos_to_cheek=1,\n lip_0105_direction=20,\n lip_0204_direction=15,\n\n # NOSE\n crv_nose_template='nose_crv',\n nose_expression_follow_mouth_value=1,\n offset_jnt02_bind_nose_pos=1,\n offset_jnt04_bind_nose_pos=1,\n\n # LID SETUP\n curve_up_lid_template_LFT='lidUpTmpLFT_crv',\n curve_low_lid_template_LFT='lidLowTmpLFT_crv',\n curve_up_lid_template_RGT='lidUpTmpRGT_crv',\n curve_low_lid_template_RGT='lidLowTmpRGT_crv',\n lid01_direction=0,\n lid02_direction=0,\n lid03_direction=30,\n lid04_direction=50,\n lid05_direction=60,\n offset_jnt02_lid_position=1,\n offset_jnt04_lid_position=1,\n\n # LID FOLLOWING THE EYE AIM\n low_lid_following_to_down=30,\n up_lid_following_to_down_low_lid_following_to_up=20,\n up_lid_and_low_lid_to_left_right=40,\n up_lid_following_to_up=20,\n\n # EYEBALL, MOUTH AND EYE AIM\n eyeball_ctrl_direction=29,\n mouth_ctrl_position=1,\n eye_aim_ctrl_position=2,\n\n # LID OUT\n curve_up_lid_out_template_LFT='lidOutUpTmpLFT_crv',\n curve_low_lid_out_template_LFT='lidOutLowTmpLFT_crv',\n curve_up_lid_out_template_RGT='lidOutUpTmpRGT_crv',\n curve_low_lid_out_template_RGT='lidOutLowTmpRGT_crv',\n lid01_out_ctrl_direction=0,\n lid02_out_ctrl_direction=0,\n lid03_out_ctrl_direction=30,\n lid04_out_ctrl_direction=40,\n lid05_out_ctrl_direction=50,\n offset_jnt02_bind_lid_out_position=1,\n offset_jnt04_bind_lid_out_position=1,\n\n # BROW\n brow_in_rotate_grp_offset=0,\n brow_mid_rotate_grp_offset=30,\n brow_out_rotate_grp_offset=45,\n brow_tip_rotate_grp_offset=65,\n\n # BULGE DEFORMER\n bulge=True,\n bulge_mesh='headBulge_geo',\n add_set_bulge=['bodyPartGeoBulge_grp'],\n follicle_mesh='bodyBulgeFol_geo',\n\n # UTILS\n side_LFT='LFT',\n side_RGT='RGT',\n scale=1.0,\n game_bind_joint=False):\n print('----------------------------------------')\n print('head and face base script is running...')\n print('----------------------------------------')\n if cmds.objExists('neck01_jnt'):\n cmds.error('%s%s%s' % ('Please remove the', ' neck01_jnt ', 'joint first!'))\n\n # RUN THE RIG BUILD\n faceRig = rlm_faceModule.build_rig(curve_up_template_lip=crv_up_lip_template,\n curve_low_template_lip=crv_low_lip_template,\n curve_up_template_lip_roll=crv_up_lip_roll_template,\n curve_low_template_lip_roll=crv_low_lip_roll_template,\n curve_template_nose=crv_nose_template,\n position_mouth_ctrl=mouth_ctrl_position,\n\n offset_jnt02_bind_lip_cheek_position=offset_jnt02_bind_lip_pos_to_cheek,\n offset_jnt02_bind_lip_nose_position=offset_jnt02_bind_nose_pos,\n offset_jnt04_bind_lip_nose_position=offset_jnt04_bind_nose_pos,\n scale=scale,\n lip01_cheek_direction=lip_0105_direction,\n lip02_cheek_direction=lip_0204_direction,\n side_LFT=side_LFT,\n side_RGT=side_RGT,\n suffix_controller=suffix_controller,\n\n cheek_low_prefix=cheek_low_prefix,\n cheek_mid_prefix=cheek_mid_prefix,\n cheek_up_prefix=cheek_up_prefix,\n cheek_in_up_prefix=cheek_in_up_prefix,\n cheek_in_low_prefix=cheek_in_low_prefix,\n cheek_out_up_prefix=cheek_out_up_prefix,\n cheek_out_low_prefix=cheek_out_low_prefix,\n jaw_prefix=jaw_prefix,\n jaw_tip_prefix=jaw_tip_prefix,\n head_prefix=head_prefix,\n neck_prefix=neck_prefix,\n neck_in_btw_prefix=neck_inbetween_prefix,\n head_up_prefix=head_up_prefix,\n head_low_prefix=head_low_prefix,\n\n columella_prefix=columella_prefix,\n\n lid02_position_offset=offset_jnt02_lid_position,\n lid04_position_offset=offset_jnt04_lid_position,\n curve_up_lid_template_LFT=curve_up_lid_template_LFT,\n curve_low_lid_template_LFT=curve_low_lid_template_LFT,\n curve_up_lid_template_RGT=curve_up_lid_template_RGT,\n curve_low_lid_template_RGT=curve_low_lid_template_RGT,\n low_lid_following_down=low_lid_following_to_down,\n up_lid_following_down_low_lid_following_up=up_lid_following_to_down_low_lid_following_to_up,\n up_lid_LR_low_lid_LR=up_lid_and_low_lid_to_left_right,\n up_lid_following_up=up_lid_following_to_up,\n\n eye_prefix=eye_prefix,\n pupil_prefix=pupil_prefix,\n iris_prefix=iris_prefix,\n lid01_direction=lid01_direction,\n lid02_direction=lid02_direction,\n lid03_direction=lid03_direction,\n lid04_direction=lid04_direction,\n lid05_direction=lid05_direction,\n position_eye_aim_ctrl=eye_aim_ctrl_position,\n eye_ctrl_direction=eyeball_ctrl_direction,\n eye_aim_prefix=eye_aim_prefix,\n nose_follow_mouth_value=nose_expression_follow_mouth_value,\n\n curve_up_lid_out_LFT=curve_up_lid_out_template_LFT,\n curve_low_lid_out_LFT=curve_low_lid_out_template_LFT,\n jnt02_bind_lip_lid_out_position_offset=offset_jnt02_bind_lid_out_position,\n jnt04_bind_lip_lid_out_position_offset=offset_jnt04_bind_lid_out_position,\n curve_up_lid_out_RGT=curve_up_lid_out_template_RGT,\n curve_low_lid_out_RGT=curve_low_lid_out_template_RGT,\n lid01_out_ctrl_direction=lid01_out_ctrl_direction,\n lid02_out_ctrl_direction=lid02_out_ctrl_direction,\n lid03_out_ctrl_direction=lid03_out_ctrl_direction,\n lid04_out_ctrl_direction=lid04_out_ctrl_direction,\n lid05_out_ctrl_direction=lid05_out_ctrl_direction,\n\n mentolabial_prefix=mentolabial_prefix,\n chin_prefix=chin_prefix,\n ear_prefix=ear_prefix,\n\n brow_tweak_prefix=brow_tw_prefix,\n brow_in_prefix=brow_in_prefix,\n brow_mid_prefix=brow_mid_prefix,\n brow_out_prefix=brow_out_prefix,\n brows_prefix=brows_prefix,\n brow_tip_prefix=brow_tip_prefix,\n brow_center_prefix=brow_center_prefix,\n\n brow_in_rotation_grp_offset=brow_in_rotate_grp_offset,\n brow_mid_rotation_grp_offset=brow_mid_rotate_grp_offset,\n brow_out_rotation_grp_offset=brow_out_rotate_grp_offset,\n brow_tip_rotation_grp_offset=brow_tip_rotate_grp_offset,\n\n bulge=bulge,\n bulge_mesh=bulge_mesh,\n add_set_bulge=add_set_bulge,\n follicle_mesh=follicle_mesh,\n game_bind_joint=game_bind_joint\n )\n\n print('------------------------------')\n print('Facial base is done!')\n\n\n# ======================================================================================================================\n# FACIAL BLENDSHAPE CMD\n# ======================================================================================================================\ndef blendshape(face_blendshape_node_name='face_bsn',\n squash_stretch_bsh_prefix='head',\n roll_up_lip_bsh_prefix='rollLipUp',\n roll_low_lip_bsh_prefix='rollLipLow',\n cheek_out_prefix='cheekOut',\n side_LFT='LFT',\n side_RGT='RGT',\n blendshape_suffix='grp'):\n if face_blendshape_node_name:\n print('------------------------------')\n print('Add facial blendshape..............')\n\n rt_utils.add_attribute(objects=['%s_%s' % (prefix_mouth_jnt, suffix_controller)], long_name=['bshSetup'],\n nice_name=[' '], at=\"enum\",\n en='Bsh Setup', channel_box=True)\n\n controller_up_roll_bsh = rt_utils.add_attribute(objects=['%s_%s' % (prefix_mouth_jnt, suffix_controller)],\n long_name=['rollLipUpBsh'],\n attributeType=\"float\", dv=0, keyable=True)\n\n controller_low_roll_bsh = rt_utils.add_attribute(objects=['%s_%s' % (prefix_mouth_jnt, suffix_controller)],\n long_name=['rollLipLowBsh'],\n attributeType=\"float\", dv=0, keyable=True)\n\n head_low_squash_stretch = rt_utils.add_attribute(objects=['%s_%s' % (prefix_mouth_jnt, suffix_controller)],\n long_name=['squashStretchBsh'],\n attributeType=\"float\", dv=0, keyable=True)\n\n cheek_out_LFT = rt_utils.add_attribute(objects=['%s_%s' % (prefix_mouth_jnt, suffix_controller)],\n long_name=['cheekOutLFT' + '_' + 'Bsh'],\n attributeType=\"float\", dv=0, min=0, keyable=True)\n cheek_out_RGT = rt_utils.add_attribute(objects=['%s_%s' % (prefix_mouth_jnt, suffix_controller)],\n long_name=['cheekOutRGT' + '_' + 'Bsh'],\n attributeType=\"float\", dv=0, min=0, keyable=True)\n rlbf_blendshape.BuildTwoSide(blendshape_node_name=face_blendshape_node_name,\n squash_stretch_prefix=squash_stretch_bsh_prefix,\n roll_low_prefix=roll_low_lip_bsh_prefix,\n roll_up_prefix=roll_up_lip_bsh_prefix,\n cheek_out_prefix=cheek_out_prefix,\n blendshape_suffix=blendshape_suffix,\n mouth_ctrl='%s_%s' % (prefix_mouth_jnt, suffix_controller),\n controller_roll_up_bsh_attr=controller_up_roll_bsh,\n controller_roll_low_bsh_attr=controller_low_roll_bsh,\n squash_stretch_attr=head_low_squash_stretch,\n cheek_out_attr_LFT=cheek_out_LFT,\n cheek_out_attr_RGT=cheek_out_RGT,\n side_LFT=side_LFT,\n side_RGT=side_RGT\n )\n print('Facial blendshape is done!')\n\n\n# ======================================================================================================================\n# EXPAND BODY JOINT CMD\n# ======================================================================================================================\ndef add_expand_joint(number_arm_detail_ctrl=5,\n number_leg_detail_ctrl=5,\n spine_expand_joint=True,\n neck_expand_joint=False,\n clavicle_expand_joint=False,\n ball_expand_joint=True,\n upperArm_expand_joint=True,\n upperLeg_expand_joint=True,\n elbow_expand_joint=True,\n knee_expand_joint=True,\n wrist_expand_joint=True,\n ankle_expand_joint=True,\n thumb_expand_joint=True,\n index_expand_joint=True,\n middle_expand_joint=True,\n ring_expand_joint=True,\n pinky_expand_joint=True,\n left_side='LFT',\n right_side='RGT',\n size=1.0):\n print('------------------------------')\n print('Adding joint deform...........')\n\n rlmb_pushJointModule.PushJoint(number_arm_detail_ctrl=number_arm_detail_ctrl,\n number_leg_detail_ctrl=number_leg_detail_ctrl,\n spine_expand_joint=spine_expand_joint,\n neck_expand_joint=neck_expand_joint,\n clavicle_expand_joint=clavicle_expand_joint,\n ball_expand_joint=ball_expand_joint,\n upperArm_expand_joint=upperArm_expand_joint,\n upperLeg_expand_joint=upperLeg_expand_joint,\n elbow_expand_joint=elbow_expand_joint,\n knee_expand_joint=knee_expand_joint,\n wrist_expand_joint=wrist_expand_joint,\n ankle_expand_joint=ankle_expand_joint,\n left_side=left_side,\n right_side=right_side,\n\n prefix_spine=prefix_spine,\n prefix_arm_setup=prefix_arm_setup,\n prefix_clav=prefix_clav,\n prefix_upperArm=prefix_upperArm,\n prefix_elbow=prefix_elbow,\n prefix_wrist=prefix_wrist,\n prefix_leg_setup=prefix_leg_setup,\n prefix_ball=prefix_ball,\n prefix_upperLeg=prefix_upperLeg,\n prefix_knee=prefix_knee,\n prefix_ankle=prefix_ankle,\n sAdd_prefix_value=sAdd_prefix_value,\n dtl=dtl,\n sj_prefix_value=sj_prefix_value,\n prefix_forearm=prefix_forearm,\n fk=fk,\n prefix_lower_leg=prefix_lowerLeg,\n prefix_FkIk_spine_setup=prefix_spine_setup,\n neck_prefix=neck_prefix,\n\n upArm_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n elbow_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n wrist_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n upArm_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n elbow_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n wrist_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n upLeg_joint_LFT_grp=prefix_leg + 'Joint' + left_side + '_grp',\n knee_joint_LFT_grp=prefix_leg + 'Joint' + left_side + '_grp',\n ankle_joint_LFT_grp=prefix_leg + 'Joint' + left_side + '_grp',\n ball_joint_LFT_grp=prefix_leg + 'Joint' + left_side + '_grp',\n upLeg_joint_RGT_grp=prefix_leg + 'Joint' + right_side + '_grp',\n knee_joint_RGT_grp=prefix_leg + 'Joint' + right_side + '_grp',\n ankle_joint_RGT_grp=prefix_leg + 'Joint' + right_side + '_grp',\n ball_joint_RGT_grp=prefix_leg + 'Joint' + right_side + '_grp',\n neck_joint_grp=prefix_spine + 'Joint_grp',\n spine_joint_grp=prefix_spine + 'Joint_grp',\n\n prefix_thumb=prefix_thumb,\n prefix_index=prefix_index,\n prefix_middle=prefix_middle,\n prefix_ring=prefix_ring,\n prefix_pinky=prefix_pinky,\n thumb_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n index_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n middle_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n ring_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n pinky_joint_LFT_grp=prefix_arm + 'Joint' + left_side + '_grp',\n thumb_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n index_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n middle_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n ring_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n pinky_joint_RGT_grp=prefix_arm + 'Joint' + right_side + '_grp',\n thumb_expand_joint=thumb_expand_joint,\n index_expand_joint=index_expand_joint,\n middle_expand_joint=middle_expand_joint,\n ring_expand_joint=ring_expand_joint,\n pinky_expand_joint=pinky_expand_joint,\n suffix_parent_joint='skn',\n suffix_duplicate_expand_joint='jnt',\n size=size\n )\n\n print('------------------------------')\n print('Adding joint deform is done!')\n","sub_path":"rigging/procedural/CMDRig.py","file_name":"CMDRig.py","file_ext":"py","file_size_in_byte":26959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"312976635","text":"import pickle\nimport threading\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import (\n QWidget, QToolTip, QPushButton, QApplication, QMessageBox, )\n\nfrom .RocAnyl import RocAnyl\n\n\nclass WrappedRocAnyl(RocAnyl, QtWidgets.QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.checkBoxAccur.setChecked(True)\n self.checkBoxConf.setChecked(True)\n self.checkBoxF.setChecked(True)\n self.checkBoxPrecision.setChecked(True)\n self.checkBoxRecall.setChecked(True)\n self.checkBoxTrashhold.setChecked(True)\n self.setWindowTitle('Метрики')\n self.settings = {'accuracy': True,\n 'confidence': True,\n 'F': True,\n 'precision': True,\n 'recall': True,\n 'trashhold': True}\n self.__build_buttons()\n\n def __build_buttons(self):\n self.pushButtonDone.clicked.connect(self.next)\n self.pushButtonBack.clicked.connect(self.back)\n\n def back(self):\n self.hide()\n self.parent.show()\n\n def next(self):\n if self.checkBoxAccur.isChecked() != True:\n self.settings['accuracy'] = False\n if self.checkBoxConf.isChecked() != True:\n self.settings['confidence'] = False\n if self.checkBoxF.isChecked() != True:\n self.settings['F'] = False\n if self.checkBoxPrecision.isChecked() != True:\n self.settings['precision'] = False\n if self.checkBoxRecall.isChecked() != True:\n self.settings['recall'] = False\n if self.checkBoxTrashhold.isChecked() != True:\n self.settings['trashhold'] = False\n with open('settings.py', 'rb') as f:\n data = pickle.load(f)\n data['MODULE_SETTINGS'].update(self.settings)\n with open('settings.py', 'wb') as f:\n pickle.dump(data, f)\n self.hide()\n self.child.show()\n","sub_path":"SmartMedApp/GUI/apps/PredictionApp/WrappedRocAnyl.py","file_name":"WrappedRocAnyl.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"371738786","text":"#Problem available at: https://www.hackerrank.com/challenges/crush/problem\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the arrayManipulation function below.\ndef arrayManipulation(n, queries):\n arr =[0] * (n+2)\n sum_ele = 0\n max_ele = 0\n for q in queries:\n x = q[0]\n y = q[1]\n ele = q[2]\n arr[x] = arr[x]+ele\n arr[y+1] = arr[y+1] - ele\n\n for i in range(0,len(arr)):\n sum_ele = sum_ele + arr[i]\n if(sum_ele>max_ele):\n max_ele = sum_ele\n return(max_ele)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n queries = []\n\n for _ in range(m):\n queries.append(list(map(int, input().rstrip().split())))\n\n result = arrayManipulation(n, queries)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"DataStructures/Array/ArrayManipulation.py","file_name":"ArrayManipulation.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"463039831","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /Users/juancomish/miniconda3/lib/python3.7/site-packages/pyehub/batchruns_Test.py\n# Compiled at: 2019-07-03 19:21:52\n# Size of source mod 2**32: 2743 bytes\n__doc__ = \"\\nA script for testing the batchrun script's code.\\n\"\nimport subprocess, openpyxl as op, numpy as np\nLETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\ndef main():\n start_D = 0.1\n stop_D = 0.5\n steps_D = 9\n start_PV = 50\n stop_PV = 250\n steps_PV = 6\n in_file = 'Individual_Hubs/PV_Diesel/RUN2/Input_Diesel_PV_Tanklimit.xlsx'\n sheet_name = 'Streams'\n cell_to_set = 'D5'\n sheet_name_PV = 'Converters'\n cell_to_set_PV = 'B4'\n model_out_dir = 'Individual_Hubs/PV_Diesel/RUN2'\n model_inputs = 'Individual_Hubs/PV_Diesel/RUN2/temp_inputs.xls'\n sheet_to_read = 'Other'\n cells_to_read = ['B50', 'B52', 'B54', 'B60', 'B110', 'B168', 'B170']\n final_output_file = 'Individual_Hubs/PV_Diesel/RUN2/output_Diesel_Tanklimit.xlsx'\n result_wb = op.Workbook()\n result_sheet = result_wb['Sheet']\n output_cells = ['Input value'] + cells_to_read\n for column, cell in zip(LETTERS, output_cells):\n result_sheet[f\"{column}1\"].value = cell\n\n for roww, valuee in enumerate((np.linspace(start_PV, stop_PV, steps_PV)), start=2):\n wb = op.load_workbook(in_file)\n wb[sheet_name_PV][cell_to_set_PV].value = valuee\n for row, value in enumerate((np.linspace(start_D, stop_D, steps_D)), start=2):\n wb = op.load_workbook(in_file)\n wb[sheet_name][cell_to_set].value = value\n wb.save(model_inputs)\n model_out_file = f\"{model_out_dir}/PV{valuee}_D{value}_TL.xlsx\"\n subprocess.run(['python', 'run.py', '--output', model_out_file])\n wb = op.load_workbook(model_out_file)\n result_sheet[f\"A{row}\"].value = value\n for col, cell_to_read in zip(LETTERS[1:], cells_to_read):\n print(f\"writing {wb[sheet_to_read][cell_to_read].value} to {col}{row} for {value}\")\n result_sheet[f\"{col}{row}\"].value = wb[sheet_to_read][cell_to_read].value\n\n result_wb.save(final_output_file)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/pyehub-1.4.1-py3-none-any/batchruns_Test.cpython-37.py","file_name":"batchruns_Test.cpython-37.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"276727719","text":"#!/usr/local/bin/python3.5\n#-*- coding: UTF-8 -*-\n\nfrom Data import Data\n\n\nclass ThumbCity(Data):\n def __init__(self, pname):\n super(ThumbCity, self).__init__(pname)\n\n def getJson(self, lang):\n try:\n tables = ['Sight', 'Restaurant', 'Hotel', 'Activity']\n query = 'select * from City where name_en = \\\"' + self.name + '\\\" or name_cn = \\\"' + self.name + '\\\";'\n resCity = self.condb.executeCurDictFetchOne(query)\n query = 'select * from Continent;'\n resContinent = self.condb.executeCurDictFetchAll(query)\n data = {}\n data['navigation'] = []\n i = 0\n for res in resContinent:\n query = 'select name_' + lang + ' from Country where continentid = ' + str(res['id']) + ';'\n tmp = self.condb.executeCurFetchAll(query)\n arr = []\n for t in tmp:\n arr.append(t[0])\n data['navigation'].append({})\n data['navigation'][i]['name'] = res['name_' + lang]\n data['navigation'][i]['item'] = arr\n i += 1\n\n data['city'] = {'name' : resCity['name_' + lang]}\n data['city']['iframe_type'] = 'City'\n data['city']['iframe_link'] = resCity['rep_link']\n data['city']['text'] = self.convertEOL(resCity['text_' + lang])\n data['city']['href_name'] = resCity['name_' + lang].replace('&', '|')\n\n hots = []\n # 若hots为空,则所有内容全部不能呈现,与前端代码解析json的方式有关\n for t in tables:\n query = 'select * from ' + t + ' where cityid = ' + str(resCity['id']) + ' and ad_cost != 0 and thumb_link is not null and thumb_link != \\'\\' and (img_link is not null and img_link != \\'\\' or video_link is not null and video_link != \\'\\');'\n res = self.condb.executeCurDictFetchAll(query)\n for row in res:\n row['table'] = t\n hots.append(row)\n hots.sort(key = lambda x: x['ad_cost'], reverse=True)\n data['hot'] = []\n for row in hots:\n # media默认为krpano多图;若图与视频同时存在,则media置为视频\n media = row['video_link']\n media_type_en = 'video'\n media_type_cn = '视频'\n if not media:\n media = row['img_link']\n media_type_en = 'imgs'\n media_type_cn = '图片'\n if not media:\n continue\n thumb = row['thumb_link']\n if thumb is None:\n continue\n tmp = {}\n tmp['id'] = row['id']\n tmp['type'] = 'hot'\n tmp['table'] = row['table']\n #tmp['media-type-cn'] = media_type_cn\n #tmp['media-type-en'] = media_type_en\n tmp['img'] = thumb\n #tmp['main-cell-type'] = 'icon-' + row['table']\n #tmp['a-href'] = media\n tmp['name'] = row['name_' + lang]\n tmp['href_name'] = row['name_' + lang].replace(\"&\", \"|\")\n #tmp['author-en'] = 'BeejeenVR'\n #tmp['author-cn'] = '百见VR'\n tmp['view'] = row['view']\n tmp['like'] = row['adore']\n data['hot'].append(tmp)\n for t in tables:\n data[t.lower()] = []\n query = 'select * from ' + t + ' where cityid = ' + str(resCity['id']) + ' and thumb_link is not null and thumb_link != \\'\\' and (img_link is not null and img_link != \\'\\' or video_link is not null and video_link != \\'\\');'\n res = self.condb.executeCurDictFetchAll(query)\n for row in res:\n # 若图与视频同时存在,则media置为视频\n media = row['video_link']\n media_type_en = 'video'\n media_type_cn = '视频'\n if not media:\n media = row['img_link']\n media_type_en = 'imgs'\n media_type_cn = '图片'\n if not media:\n continue\n thumb = row['thumb_link']\n if thumb is None:\n continue\n tmp = {}\n tmp['id'] = row['id']\n tmp['type'] = t.lower()\n tmp['table'] = t\n #tmp['media-type-en'] = media_type_en\n #tmp['media-type-cn'] = media_type_cn\n #tmp['a-href'] = media\n tmp['href_name'] = row['name_' + lang].replace(\"&\", \"|\")\n tmp['img'] = thumb\n tmp['name'] = row['name_' + lang]\n #tmp['main-cell-type'] = 'icon-' + t\n #tmp['author-en'] = 'BeejeenVR'\n #tmp['author-cn'] = '百见VR'\n tmp['view'] = row['view']\n tmp['like'] = row['adore']\n data[t.lower()].append(tmp)\n except Exception as e:\n print('EXCEPTION FROM ThumbCity getJson:')\n print(e)\n return data\n","sub_path":"beejeen-master/model/ThumbCity.py","file_name":"ThumbCity.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629379395","text":"from __future__ import print_function\nimport arcpy\nimport os\nfrom datetime import datetime\nfrom collections import OrderedDict\nfrom random import random\nfrom math import sqrt, pi, cos, sin\nimport logging\nimport logging.handlers\n\nMAX_ITS = 100000\nPOINTS = []\nFAIL_COUNT = 0\n\n\nclass PseudoRandomAbsenceGenerator(object):\n\n def __init__(self):\n\n self.label = \"Pseudo-random Absence Generator\"\n self.description = \"Generate a pseudo-random absence point layer\"\n self.canRunInBackground = False\n\n return\n\n def getParameterInfo(self):\n\n param0 = arcpy.Parameter(\n displayName=\"Sample Points Layer\",\n name=\"in_sample_points\",\n datatype=\"GPFeatureLayer\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param0.filter.list = [\"Point\"]\n\n param1 = arcpy.Parameter(\n displayName=\"Point Features ID Field\",\n name=\"in_points_id_field\",\n datatype=\"Field\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param1.parameterDependencies = [\"in_sample_points\"]\n\n param2 = arcpy.Parameter(\n displayName=\"Maximum Offset\",\n name=\"in_offset_max\",\n datatype=\"GPDouble\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param3 = arcpy.Parameter(\n displayName=\"Minimum Offset\",\n name=\"in_offset_min\",\n datatype=\"GPDouble\",\n parameterType=\"Optional\",\n direction=\"Input\")\n\n param4 = arcpy.Parameter(\n displayName=\"Study Area\",\n name=\"in_study_layer\",\n datatype=\"GPFeatureLayer\",\n parameterType=\"Optional\",\n direction=\"Input\")\n\n param4.filter.list = [\"Polygon\"]\n\n param5 = arcpy.Parameter(\n displayName=\"Maximum Proximity\",\n name=\"in_proximity_max\",\n datatype=\"GPDouble\",\n parameterType=\"Optional\",\n direction=\"Input\")\n\n param6 = arcpy.Parameter(\n displayName=\"Output Workspace\",\n name=\"in_out_ws\",\n datatype=\"DEWorkspace\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param6.defaultEnvironmentName = \"workspace\"\n\n param7 = arcpy.Parameter(\n displayName=\"Output Layer Name\",\n name=\"in_out_lyr\",\n datatype=\"GPString\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param8 = arcpy.Parameter(\n displayName=\"Maximum Iterations\",\n name=\"max_its\",\n datatype=\"GPLong\",\n parameterType=\"Required\",\n direction=\"Input\")\n\n param8.value = MAX_ITS\n\n param9 = arcpy.Parameter(\n displayName=\"Pseudo Points\",\n name=\"out_pt_lyr\",\n datatype=\"DEDatasetType\",\n parameterType=\"Derived\",\n direction=\"Output\")\n\n param10 = arcpy.Parameter(\n displayName=\"Study Points\",\n name=\"out_study_pts\",\n datatype=\"DEDatasetType\",\n parameterType=\"Derived\",\n direction=\"Output\")\n\n return [param0, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10]\n\n # def isLicensed(self):\n #\n # return True\n #\n # def updateParameters(self, parameters):\n #\n # return\n #\n # def updateMessages(self, parameters):\n #\n # return\n\n def execute(self, parameters, messages):\n\n add_message = messages.addMessage\n logger = init_log(add_message)\n\n parameter_dictionary = OrderedDict([(p.DisplayName, p.valueAsText) for p in parameters[:-2]])\n add_message(\"Parameter summary:\")\n [add_message(\"{}: {}\".format(k, v)) for k, v in parameter_dictionary.iteritems()]\n\n add_message(parameter_dictionary.values())\n\n in_sample_points, in_points_id_field, in_offset_max, in_offset_min, in_study_layer, in_proximity_max, \\\n in_out_ws, in_out_lyr, max_its = parameter_dictionary.values()\n\n global MAX_ITS, POINTS, FAIL_COUNT\n MAX_ITS = float(max_its)\n POINTS = []\n FAIL_COUNT = 0\n\n # cast inputs to float\n in_offset_max = float(in_offset_max) if in_offset_max not in [None, \"#\"] else 0\n in_offset_min = float(in_offset_min) if in_offset_min not in [None, \"#\"] else 0\n in_proximity_max = float(in_proximity_max) if in_proximity_max not in [None, \"#\"] else 0\n\n # delete existing data\n out_name = os.path.join(in_out_ws, in_out_lyr)\n if arcpy.Exists(out_name):\n arcpy.Delete_management(out_name)\n add_message(\"Existing '{}' deleted\".format(out_name))\n\n # Get spatial reference object from sample point layer and create a new empty feature class\n spat_ref = arcpy.Describe(in_sample_points).SpatialReference\n arcpy.CreateFeatureclass_management(in_out_ws, in_out_lyr, \"POINT\", spatial_reference=spat_ref)\n arcpy.AddField_management(out_name, \"parent_id\", \"TEXT\", field_length=255)\n arcpy.AddField_management(out_name, \"xy_orig\", \"TEXT\", field_length=255)\n arcpy.AddField_management(out_name, \"xy_pseudo\", \"TEXT\", field_length=255)\n arcpy.AddField_management(out_name, \"iterations\", \"TEXT\", field_length=255)\n arcpy.AddField_management(out_name, \"duration\", \"TEXT\", field_length=255)\n arcpy.AddField_management(out_name, \"status\", \"TEXT\", field_length=255)\n add_message(\"Point dataset '{}' created\".format(out_name))\n\n # make the sample points layer\n arcpy.MakeFeatureLayer_management(in_sample_points, \"points_layer\")\n total_points = int(arcpy.GetCount_management(\"points_layer\").getOutput(0))\n add_message(\"Points layer contains {} features in total\".format(total_points))\n study_point_count = total_points\n\n # select features if study area provided\n if in_study_layer:\n arcpy.MakeFeatureLayer_management(in_study_layer, \"study_layer\")\n total_feats = int(arcpy.GetCount_management(\"study_layer\").getOutput(0))\n add_message(\"Study layer contains {} feature(s)\".format(total_feats, in_study_layer))\n\n arcpy.SelectLayerByLocation_management(\"points_layer\", \"WITHIN\", \"study_layer\") #, {search_distance}, {selection_type}, {invert_spatial_relationship})\n study_points = os.path.join(in_out_ws, in_out_lyr + \"_study_points\")\n arcpy.CopyFeatures_management(\"points_layer\", study_points)\n\n study_point_count = int(arcpy.GetCount_management(\"points_layer\").getOutput(0))\n add_message(\"Points layer contains {} features within study area '{}'\".format(study_point_count, in_study_layer))\n study_feats = [f[0] for f in arcpy.da.SearchCursor(\"study_layer\", [\"SHAPE@\"])]\n else:\n study_feats = []\n\n add_message(\"study features: {}\".format(study_feats))\n\n add_message(\"Generating pseudo-points...\")\n\n # get the points in a cursor\n point_rows = arcpy.da.SearchCursor(\"points_layer\", ['SHAPE@', in_points_id_field])\n\n result = []\n row_num = 0\n for point_row in point_rows:\n\n point = point_row[0].centroid\n point_id = point_row[1]\n row_num += 1\n\n x = [point_id]\n x.extend(generate_pseudo_point(point, in_offset_max, in_offset_min, study_feats, in_proximity_max, add_message)) # logger.debug))\n add_message(\"{} of {}: Point {} Coords {} Pseudo-point {} : took {} iterations, {} seconds : {}\".format(row_num, study_point_count, *x))\n result.append(x)\n\n with arcpy.da.InsertCursor(out_name, [\"parent_id\", \"xy_orig\", \"xy_pseudo\", \"iterations\", \"duration\", \"status\", \"SHAPE@XY\"]) as ICur:\n for v in result:\n add_message(v)\n ICur.insertRow(v) # insert it into the feature class\n\n arcpy.SetParameterAsText(9, out_name)\n arcpy.SetParameterAsText(10, study_points if in_study_layer else in_sample_points)\n\n return\n\n\ndef generate_pseudo_point(point, max_offset, min_offset=0, study_features=[], max_proximity=0, print_func=print):\n\n global POINTS, FAIL_COUNT\n\n n, unsolved = 0, True\n start = datetime.now()\n\n xy = \"{}, {}\".format(point.X, point.Y)\n\n while unsolved:\n if n > MAX_ITS:\n POINTS.append(arcpy.Point(-9999, -9999))\n FAIL_COUNT += 1\n return xy, \"{}, {}\".format(-9999, -9999), n-1, str(datetime.now() - start), \"maximum iterations reached\", (-9999, -9999)\n\n n += 1\n\n u = random()\n v = random()\n\n w = max_offset * sqrt(u)\n t = 2.0 * pi * v\n x = point.X + w * cos(t)\n y = point.Y + w * sin(t)\n\n if min_offset:\n if sqrt((x - point.X)**2 + (y - point.Y)**2) < min_offset:\n print_func(\"too close to original\")\n # REJECTED\n continue\n\n # RE-USING POINT OBJECT !!\n point.X, point.Y = x, y\n\n # is point within study area\n if study_features:\n contained = False\n for s in study_features: # is point within study area\n if not s.contains(point):\n # REJECTED\n contained = False\n print_func(\"NOT CONTAINED\")\n break\n else:\n contained = True\n\n if not contained:\n continue\n\n # is point too close to previously generated pseudo-points\n if max_proximity:\n too_close = False\n for p in POINTS:\n # print_func([type(p), type(point)])\n if p.distanceTo(point) < max_proximity:\n too_close = True\n print_func(\"too close to previous pseudo-points\")\n # REJECTED\n break\n\n if too_close:\n continue\n\n # if execution gets here, we should have a solution\n unsolved = False\n\n POINTS.append(arcpy.PointGeometry(point))\n print_func(\"Pseudo-point count: {}\".format(len(POINTS)))\n\n return xy, \"{}, {}\".format(point.X, point.Y), n, str(datetime.now() - start), \"solved\", (point.X, point.Y) # xy\n\n\ndef init_log(print_func=print):\n\n log_filename = 'pseudo-absences.log'\n\n logger = logging.getLogger('pseudo-absence')\n logger.setLevel(logging.DEBUG)\n\n handler = logging.handlers.RotatingFileHandler(log_filename, maxBytes=2000000, backupCount=5)\n\n logger.addHandler(handler)\n\n filename = handler.baseFilename\n\n print_func(\"Logging file at {}\".format(filename))\n\n return logger\n","sub_path":"pseudo_point.py","file_name":"pseudo_point.py","file_ext":"py","file_size_in_byte":10719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"340847189","text":"import re\nfrom app import app, db\nfrom app.utilities import get_quote_json\nfrom app.models import User, Post, Tag, SiteConfig, Comment\nfrom flask import render_template, flash, redirect, url_for, request, jsonify, make_response\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom app.forms import LoginForm, RegistrationForm, PostForm, SiteSettingForm, CommentForm, VisitorSettingForm, THEME_CHOICES\nfrom jinja2 import evalcontextfilter, Markup, escape\nfrom markdown2 import Markdown\n\n_paragraph_re = re.compile(r'(?:\\r\\n|\\r|\\n){2,}')\n\n@app.template_filter()\n@evalcontextfilter\ndef nl2br(eval_ctx, value):\n result = u'\\n\\n'.join(u'

%s

' % p.replace('\\n', '
\\n') \\\n for p in _paragraph_re.split(escape(value)))\n if eval_ctx.autoescape:\n result = Markup(result)\n return result\n\n@app.route('/author/', methods=['GET'])\ndef post_by_author(author):\n\ttags = Tag.query.all()\n\tconfig = SiteConfig.query.first()\n\tuser = User.query.filter_by(username=author).first()\n\tquery_type = request.args.get('type')\n\n\tif user:\n\t\tposts = Post.query.filter_by(author=user)\n\t\tif posts:\n\n\t\t\t#return json version of the post if requested\n\t\t\tif query_type and query_type == 'json':\n\t\t\t\treturn jsonify([post.get_as_dict() for post in posts])\n\t\t\telse:\n\t\t\t\treturn render_template('index.html', title='Posts by ' + str(author), posts=posts, tags=tags, config=config)\n\t\telse:\n\t\t\tflash('No posts by this user ' + author)\n\t\t\treturn redirect(url_for('index'))\t\t\t\n\telse:\n\t\tflash('Author not found')\n\t\treturn redirect(url_for('index'))\n\t\n@app.route('/post/', methods=['GET', 'POST'])\ndef post(slug):\n\ttags = Tag.query.all()\n\tconfig = SiteConfig.query.first()\n\tpost = Post.query.filter_by(url_slug=slug).first()\n\tquery_type = request.args.get('type')\n\n\t#return json version of the post if requested\n\tif query_type and query_type == 'json':\n\t\treturn jsonify(post.get_as_dict())\n\telse:\n\t\tcomments = Comment.query.filter_by(post_slug=slug)\n\t\tcommentForm = CommentForm()\n\t\tif commentForm.validate_on_submit():\n\t\t\tcomment = Comment(username=commentForm.username.data,email=commentForm.email.data,body=commentForm.body.data, post_slug=slug)\n\t\t\tdb.session.add(comment)\n\t\t\tdb.session.commit()\n\t\t\treturn redirect(url_for('post',slug=slug))\n\t\treturn render_template('index.html', title=post.title, posts=[post], tags=tags, config=config, commentForm=commentForm, comments=comments)\n\n@app.route('/json/posts', methods=['GET'])\ndef post_json():\n\tposts = Post.query.all()\n\tpost_dicts = {post.id : post.get_as_dict() for post in posts}\n\treturn jsonify(post_dicts)\n\n@app.route('/json/post/', methods=['GET'])\ndef post_json_ID(id):\n\tpost = Post.query.filter_by(id=id).first()\n\treturn jsonify(post.get_as_dict())\n\n@app.route('/posts/', methods=['GET'])\ndef posts(tag):\n\ttags = Tag.query.all()\n\tconfig = SiteConfig.query.first()\n\ttag_obj = Tag.query.filter_by(name=tag).first()\n\tquery_type = request.args.get('type')\n\n\tif tag_obj is not None:\n\t\tposts = tag_obj.posts\n\telse:\n\t\tposts = None\n\n\t#return json version of the post if requested\n\tif query_type and query_type == 'json':\n\t\treturn jsonify([post.get_as_dict() for post in posts])\n\telse:\n\t\treturn render_template('index.html', title='Posts', posts=posts, tags=tags, config=config)\n\n@app.route('/newpost', methods=['GET','POST'])\n@login_required\ndef newpost():\n\tform = PostForm()\n\tif form.validate_on_submit():\n\t\tif form.submit.data:\n\t\t\tpost = Post(title=form.title.data, body=form.post.data, author=current_user)\n\t\t\tpost.set_or_create_tags_from_string(form.tags.data)\n\t\t\tpost.set_slug()\n\t\t\tdb.session.add(post)\n\t\t\tdb.session.commit()\n\t\t\treturn redirect(url_for('index'))\n\t\telse:\n\t\t\tpreview_post = Post(title=form.title.data, body=form.post.data, author=current_user)\n\t\t\tpreview_post.set_or_create_tags_from_string(form.tags.data)\n\t\t\tpreview_post.set_slug()\n\t\t\treturn render_template('new_post.html', title='New Post', form=form, preview_post=preview_post, preview=True)\n\treturn render_template('new_post.html', title='New Post', form=form)\n\n@app.route('/edit/', methods=['GET','POST'])\n@login_required\ndef edit_post(id):\n\tpost = Post.query.filter_by(id=id).first()\n\tif post is not None:\n\t\tform = PostForm()\n\t\tif form.validate_on_submit():\n\t\t\tif form.submit.data:\n\t\t\t\tpost.title = form.title.data\n\t\t\t\tpost.body = form.post.data\n\t\t\t\tpost.set_or_create_tags_from_string(form.tags.data)\n\t\t\t\tpost.set_slug()\n\t\t\t\tdb.session.commit()\n\t\t\t\tflash('Changes have been saved')\n\t\t\t\treturn redirect(url_for('index'))\n\t\t\telse:\n\t\t\t\tpreview_post = Post(title=form.title.data, body=form.post.data, author=current_user)\n\t\t\t\tpreview_post.set_or_create_tags_from_string(form.tags.data)\n\t\t\t\tpreview_post.set_slug()\n\t\t\t\treturn render_template('new_post.html', title='New Post', form=form, preview_post=preview_post, preview=True)\n\t\telif request.method=='GET':\n\t\t\tform.title.data = post.title\n\t\t\tform.post.data = post.body\n\t\t\tform.tags.data = post.tags_as_string()\n\telse:\n\t\tflash('Post does not exist')\n\t\treturn redirect(url_for('index'))\n\treturn render_template('new_post.html', title='Edit Post', form=form)\n\n@app.route('/delete/', methods=['GET'])\n@login_required\ndef delete_post(id):\n\tpost = Post.query.filter_by(id=id).first()\n\tif post is not None:\n\t\tdb.session.delete(post)\n\t\tdb.session.commit()\n\t\tflash('Post have been deleted')\n\t\treturn redirect(url_for('index'))\n\treturn redirect(url_for('edit_post', id=id))\n\n@app.route('/', methods=['GET','POST'])\n@app.route('/index', methods=['GET','POST'])\ndef index():\n\tconfig = SiteConfig.query.first()\n\tpage = request.args.get('page', 1, type=int)\n\n\t#Disable themes for now\n\t#Theme is the cookie for user theme preference\n\t#if 'theme' in request.cookies:\n\t#\tthemeIndex = request.cookies['theme']\n\t#\tif themeIndex is not None:\n\t#\t\tthemeIndex = int(themeIndex)\n\t#\t\ttheme = VisitorSettingForm(theme=themeIndex)\n\t#\telse:\n\t#\t\ttheme = VisitorSettingForm()\t\t\t\n\t#else:\n\t#\ttheme = VisitorSettingForm()\n\ttheme = None\n\t\n\tposts = Post.query.order_by(Post.timestamp.desc()).paginate(\n\t\tpage, app.config['POSTS_PER_PAGE'], False)\n\tnext_url = url_for('index', page=posts.next_num) \\\n\t\tif posts.has_next else None\n\tprev_url = url_for('index', page=posts.prev_num) \\\n\t\tif posts.has_prev else None\n\ttags = Tag.query.all()\n\n\tquotes = get_quote_json()\n\t \n\tresp = make_response(render_template('index.html', title='Home', \n\t\t\tposts=posts.items, \n\t\t\tnext_url=next_url, \n\t\t\tprev_url=prev_url, \n\t\t\ttags=tags, \n\t\t\tconfig=config,\n\t\t\ttheme=theme,\n\t\t\tquotes=quotes\n\t\t))\n\t#resp.set_cookie('theme',str(theme.theme.data))\n\n\treturn resp\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('index'))\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=form.username.data).first()\n\t\tif user is None or not user.check_password(form.password.data):\n\t\t\tflash('Invalid username or password')\n\t\t\treturn redirect(url_for('index'))\n\t\tlogin_user(user, remember=form.remember_me.data)\n\t\t#next_page = request.args.get('next')\n\t\t#if not next_page or url_parse(next_page).netloc != '':\n\t\tnext_page = url_for('index')\n\t\treturn redirect(next_page)\n\treturn render_template('login.html', title='Sign in', form=form)\n\n@app.route('/logout')\ndef logout():\n\tlogout_user()\n\treturn redirect(url_for('index'))\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n\tif current_user.is_authenticated:\n\t\treturn redirect(url_for('index'))\n\tregistForm = RegistrationForm()\n\tif registForm.validate_on_submit():\n\t\tuser = User(username=registForm.username.data, email=registForm.email.data)\n\t\t#user.password_hash = str(registForm.password.data)\n\t\tuser.set_password(registForm.password.data)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\t\tflash('Congratulations, you are now a registered user')\n\t\treturn redirect(url_for('index'))\n\treturn render_template('register.html', title='Register', registForm=registForm)\n\n@app.route('/newconfig', methods=['GET','POST'])\n@login_required\ndef new_config():\n\tform = SiteSettingForm()\n\tif form.validate_on_submit():\n\t\tconfig = SiteConfig(featured_link=form.featured_links.data,site_info=form.site_info.data)\n\t\tconfig.set_featured_from_id(form.post_id.data)\n\t\tdb.session.add(config)\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('index'))\n\treturn render_template('site_config.html', title='Site Setting', form=form)\n\n@app.route('/editconfig', methods=['GET','POST'])\n@login_required\ndef edit_Config():\n\tconfig = SiteConfig.query.first()\n\tif post is not None:\n\t\tform = SiteSettingForm()\n\t\tif form.validate_on_submit():\n\t\t\tconfig.featured_link = form.featured_links.data\n\t\t\tconfig.site_info = form.site_info.data\n\t\t\tconfig.set_featured_from_id(form.post_id.data)\n\t\t\tdb.session.add(config)\n\t\t\tdb.session.commit()\n\t\t\tflash('Changes have been saved')\n\t\t\treturn redirect(url_for('index'))\n\t\telif request.method=='GET':\n\t\t\tform.post_id.data = config.get_posts_id()\n\t\t\tform.featured_links.data = config.featured_link\n\t\t\tform.site_info.data = config.site_info\n\treturn render_template('site_config.html', title='Site Setting', form=form)","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"585248316","text":"# We need to write a program that checks different names and determines if they are equal.\n# We need to accept two strings and compare them.\n\ndef same_name(your_name, my_name):\n if (your_name == my_name):\n return True \n return False\n\n# Uncomment these function calls to test your same_name function:\nprint(same_name(\"Colby\", \"Colby\"))\n# should print True\nprint(same_name(\"Tina\", \"Amber\"))\n# should print False","sub_path":"6b - CHALLENGE ADV/6.1.2_SameName.py","file_name":"6.1.2_SameName.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"217286891","text":"# uncompyle6 version 3.2.0\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]\n# Embedded file name: pirates.npc.DistributedBountyHunter\nfrom direct.interval.IntervalGlobal import *\nfrom direct.directnotify import DirectNotifyGlobal\nfrom direct.distributed.ClockDelta import *\nfrom pandac.PandaModules import *\nfrom pirates.effects.GhostAura import GhostAura\nfrom pirates.pirate import DistributedPirateBase\nfrom pirates.piratesbase import PiratesGlobals\nfrom pirates.battle import WeaponGlobals\nfrom pirates.battle import DistributedBattleNPC\nfrom pirates.piratesbase import PiratesGlobals\nfrom pirates.piratesbase import PLocalizer\nfrom pirates.leveleditor import NPCList\nfrom pirates.pirate import HumanDNA\nfrom pirates.pirate import AvatarTypes\nfrom pirates.pirate import Human\nimport random\nfrom pirates.inventory import ItemGlobals\nfrom pirates.pirate import AvatarTypes\nfrom pirates.uberdog.UberDogGlobals import InventoryType\nfrom pirates.effects.Drown import Drown\nfrom pirates.battle import EnemyGlobals\nfrom pirates.audio import SoundGlobals\nfrom pirates.audio.SoundGlobals import loadSfx\n\nclass DistributedBountyHunter(DistributedBattleNPC.DistributedBattleNPC, Human.Human):\n __module__ = __name__\n notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBountyHunter')\n\n def __init__(self, cr):\n DistributedBattleNPC.DistributedBattleNPC.__init__(self, cr)\n Human.Human.__init__(self)\n self.inNotice = 0\n self.antiEffect = None\n self.antiIval = None\n self.sfxList = []\n return\n\n def announceGenerate(self):\n DistributedBattleNPC.DistributedBattleNPC.announceGenerate(self)\n if not self.loaded:\n if self.uniqueId:\n self.setDNAId(self.uniqueId)\n if self.style:\n Human.Human.generateHuman(self, self.style.gender, base.cr.human)\n self.checkState()\n self.getMinimapObject()\n\n def setTeam(self, team):\n DistributedBattleNPC.DistributedBattleNPC.setTeam(self, team)\n\n def generate(self):\n DistributedBattleNPC.DistributedBattleNPC.generate(self)\n self.setInteractOptions(isTarget=False, allowInteract=False)\n\n def disable(self):\n DistributedBattleNPC.DistributedBattleNPC.disable(self)\n self.stopBlink()\n if self.antiIval:\n self.antiIval.finish()\n self.antiIval = None\n return\n\n def delete(self):\n self.sfxList = []\n DistributedBattleNPC.DistributedBattleNPC.delete(self)\n Human.Human.delete(self)\n\n def getNameText(self):\n return Human.Human.getNameText(self)\n\n def isBattleable(self):\n return 1\n\n def setUniqueId(self, uniqueId):\n if uniqueId:\n self.setDNAId(uniqueId)\n self.loaded = 0\n\n def setDNAId(self, dnaId):\n if dnaId:\n dnaDict = NPCList.NPC_LIST.has_key(dnaId) and NPCList.NPC_LIST[dnaId]\n customDNA = HumanDNA.HumanDNA()\n customDNA.loadFromNPCDict(dnaDict)\n self.setDNAString(customDNA)\n self.checkState()\n else:\n self.setDNAString(None)\n self.setDefaultDNA()\n gender = random.choice(['m', 'm', 'f'])\n self.style.makeNPCBountyHunter(seed=None, gender=gender)\n self.checkState()\n return\n\n def play(self, *args, **kwArgs):\n Human.Human.play(self, *args, **kwArgs)\n\n def loop(self, *args, **kwArgs):\n Human.Human.loop(self, *args, **kwArgs)\n\n def pose(self, *args, **kwArgs):\n Human.Human.pose(self, *args, **kwArgs)\n\n def pingpong(self, *args, **kwArgs):\n Human.Human.pingpong(self, *args, **kwArgs)\n\n def stop(self, *args, **kwArgs):\n Human.Human.stop(self, *args, **kwArgs)\n\n def shouldNotice(self):\n if self.animSet == 'default':\n return 1\n else:\n return 0\n\n def startNoticeLoop(self):\n pass\n\n def endNoticeLoop(self):\n pass\n\n def startShuffle(self, turnAnim):\n if self.playNoticeAnims():\n self.loop(turnAnim, partName='legs', blendDelay=0.15)\n\n def midShuffle(self):\n if self.playNoticeAnims():\n self.loop('idle', blendDelay=0.3)\n\n def playNoticeAnim(self):\n if not self.doneThreat:\n self.doneThreat = 1\n if self.preselectedReaction:\n reaction = self.preselectedReaction\n self.preselectedReaction = None\n else:\n reaction = self.getNoticeAnimation()\n if reaction:\n self.play(reaction, blendInT=0.3, blendOutT=0.3)\n return\n\n def presetNoticeAnimation(self):\n self.preselectedReaction = self.getNoticeAnimation()\n return self.getDuration(self.preselectedReaction)\n\n def getNoticeAnimation(self):\n reaction = None\n if self.getLevel() - 10 >= localAvatar.getLevel():\n reaction = random.choice(['emote_laugh', 'emote_anger'])\n else:\n if self.getLevel() + 4 >= localAvatar.getLevel():\n reaction = random.choice(['emote_laugh', 'emote_anger'])\n else:\n reaction = random.choice(['emote_laugh', 'emote_anger'])\n return reaction\n\n def abortNotice(self):\n DistributedBattleNPC.DistributedBattleNPC.abortNotice(self)\n if self.inNotice:\n self.checkState()\n self.inNotice = 0\n\n def endNotice(self):\n DistributedBattleNPC.DistributedBattleNPC.endNotice(self)\n if self.inNotice:\n self.checkState()\n self.inNotice = 0\n\n def checkState(self):\n pass\n\n def getDeathTrack(self):\n if self.hp > 0:\n self.nametag3d.hide()\n return Sequence(Wait(3.0))\n return DistributedBattleNPC.DistributedBattleNPC.getDeathTrack(self)\n\n def doAntiEffect(self):\n if self.antiIval and self.antiIval.isPlaying():\n return\n antiEffect = Drown.getEffect()\n if antiEffect:\n effectScale = EnemyGlobals.getEffectScale(self)\n antiEffect.reparentTo(self)\n antiEffect.setScale(effectScale)\n antiEffect.play()\n\n def getMinimapObject(self):\n mmObj = DistributedBattleNPC.DistributedBattleNPC.getMinimapObject(self)\n if mmObj:\n if self.getTeam() == PiratesGlobals.PLAYER_TEAM:\n color = VBase4(0.1, 1.0, 0.1, 0.7)\n mmObj.setIconColor(color=color)\n else:\n mmObj.setIconColor()\n return mmObj","sub_path":"pirates/npc/DistributedBountyHunter.py","file_name":"DistributedBountyHunter.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"91574320","text":"from time import time\n\n# a = ABCBDAB --> BCAB\n# b = BDCABA\n# LCS(BCCDAB, BDCABA) = B + LCS(CCDAB, DCABA) --> max(LCS(CBDAB, CABA), LCS(BDAB, DCABA))\n\n\ndef treasure_hunt(grid):\n return dfs(grid, (0, 0), (len(grid) - 1, 0)) + dfs(grid, (0, len(grid[0]) - 1), (len(grid) - 1, len(grid[0]) - 1))\n\n\ndef dfs(grid, start, end):\n visited = {start}\n neighbours = [(1, 1), (1, -1), (1, 0)]\n stack = [[start, grid[start[0]][start[1]]]]\n max_sum = 0\n while stack:\n point, curr_sum = stack.pop()\n if point == end:\n max_sum = max(max_sum, curr_sum)\n for neighbour in neighbours:\n x = point[0] + neighbour[0]\n y = point[1] + neighbour[1]\n if 0 <= x < len(grid) and \\\n 0 <= y < len(grid[0]) and \\\n (x, y) not in visited:\n stack.append([(x, y), curr_sum + grid[x][y]])\n visited.add((x, y))\n return max_sum\n\n\ndef main():\n row, col = input().split()\n elements = input().split()\n grid = [[0 for _ in range(int(col))] for _ in range(int(row))]\n count = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n grid[i][j] = int(elements[count])\n count += 1\n\n return treasure_hunt(grid)\n\n\ndef lcs_recursive(i, j, str1, str2):\n # base-case\n if i == len(str1) or j == len(str2):\n return 0\n elif str1[i] == str2[j]:\n return 1 + lcs_recursive(i + 1, j + 1, str1, str2)\n else:\n return max(lcs_recursive(i, j + 1, str1, str2), lcs_recursive(i + 1, j, str1, str2))\n\n\ndef lcs_memo(i, j, str1, str2, memo):\n # base-case\n if i == len(str1) or j == len(str2):\n return 0\n elif (i, j) not in memo:\n if str1[i] == str2[j]:\n memo[(i, j)] = 1 + lcs_memo(i + 1, j + 1, str1, str2, memo)\n else:\n memo[(i, j)] = max(lcs_memo(i, j + 1, str1, str2, memo), lcs_memo(i + 1, j, str1, str2, memo))\n\n return memo[(i, j)]\n\n\ndef lcs_tabular(str1, str2):\n dp = [([0] * (len(str1) + 1)) for _ in range(len(str2) + 1)]\n\n for i in range(1, len(dp)):\n for j in range(1, len(dp[0])):\n if str1[j - 1] == str2[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n\n return dp[-1][-1]\n\n\ndef print_lcs(str_1, str_2, len_1, len_2, lookup):\n if len_1 == 0 or len_2 == 0:\n return \"\"\n\n if str_1[len_1 - 1] == str_2[len_2 - 1]:\n return print_lcs(str_1, str_2, len_1 - 1, len_2 - 1, lookup) + str_1[len_1 - 1]\n\n if lookup[len_1 - 1][len_2] > lookup[len_1][len_2 - 1]:\n return print_lcs(str_1, str_2, len_1 - 1, len_2, lookup)\n else:\n return print_lcs(str_1, str_2, len_1, len_2 - 1, lookup)\n\n\nstart = time()\nprint(lcs_tabular(\"bsdfdsfdkjkkscd\", \"abcdkjfkjlllsjkhkjhkjhkjjkdfd\"))\nend = time()\nprint(end - start)\n# start = time()\n# print(lcs_memo(0, 0, \"bsdfdsfdkjkkscd\", \"abcdkjfkjlllsjkhkjhkjhkjjkdfd\", {}))\n# end = time()\n# print(end - start)\n","sub_path":"BootCamp/CD5/LongestCommonSubsequence.py","file_name":"LongestCommonSubsequence.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"263169776","text":"# -*- coding: utf-8 -*-\n# @File : urls.py\n# @Author: clelandgt@163.com\n# @Date : 2020-07-05\n# @Desc :\n\nfrom django.urls import path\nfrom apps.order.views import OrderPlaceView, OrderCommitView, OrderPayView, CheckPayView,CommentView\n#\nurlpatterns = [\n path('place', OrderPlaceView.as_view(), name='place'), # 提交订单页面显示\n path('commit', OrderCommitView.as_view(), name='commit'), # 订单创建\n path('pay', OrderPayView.as_view(), name='pay'), # 订单支付\n path('check', CheckPayView.as_view(), name='check'), # 订单支付\n path('comment/(?P.+)', CheckPayView.as_view(), name='comment'), # 订单支付\n]","sub_path":"Django/fresh_shop/apps/order/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"127047537","text":"#when a request is sent it pulls the data from all the servers\n\nfrom threading import *\nimport socket\nimport time\nimport sys\n\ncount = 0\ncount_lock = Lock()\nnum = 0\nthread_lock = Lock()\nfile_lock = Lock()\n\n\ndef on_new_client(client_socket, address, threads):\n\n global count\n global count_lock\n global file_lock\n ts = []\n\n with client_socket:\n print('In {}:{}'.format(address[0], address[1]))\n while True:\n data = client_socket.recv(1024)\n if not data:\n #threads = [thread for thread in threads if thread[2] != client_socket]\n break\n #print('Rcvd', repr(data), 'From', address)\n data = data.decode()\n if data[:7] == 'DUMPLOG':\n print('Number of threads:' + str(len(threads)))\n user = data.split(',')[1]\n #save the entire message so we know what type of dumplog it is\n message = 'TAKE_DUMP'\n print('SENT: ' + message)\n message = message.encode()\n for i in threads:\n if i[2] == client_socket:\n pass\n else:\n try:\n i[2].sendall(message)\n ts.append(i[0])\n except OSError:\n pass\n with thread_lock:\n for i in range(len(threads)):\n threads.pop()\n for i in ts:\n try:\n i.join()\n except RuntimeError:\n pass\n print('onto_the_next')\n combine_log_files(client_socket, user, threads)\n break\n\n else:\n with count_lock:\n count += 1\n filename = str(count)\n print(\"{}:{} opening file: {}\".format(address[0], address[1], count))\n with open(filename, 'wt') as output:\n output.write(data)\n while True:\n data = client_socket.recv(1024)\n if not data:\n break\n data = data.decode()\n #print(data)\n output.write(data)\n break\n\n print('out {}:{}'.format(address[0], address[1]))\n '''\n print('out')\n print(count)\n print(len(ts))\n if count == len(ts):\n '''\n\n\ndef combine_log_files(client_socket, user, threads):\n global count\n global count_lock\n file_count = count\n # eventually will want to send the file over the socket once everything is connected, but for now will\n # just write it to file on local machine\n # NOTE: we will need to come up with a protocol for transfering these files because they will likely\n # be longer than the 1024 bytes currently expected.\n print('dumplog')\n generateXML(client_socket, user, file_count)\n with count_lock:\n count = 0\n with open('complete_log', 'a') as log_file:\n for j in range(1, file_count + 1):\n with open(str(j), 'rt') as input:\n logs = input.read()\n log_file.write(logs)\n\ndef send_entry(i, client_socket):\n message = '\\t<' + i[1] + '>\\n'\n message += '\\t\\t' + str(i[2]) + '\\n'\n message += '\\t\\t' + i[3] + '\\n'\n message += '\\t\\t' + str(i[4]) + '\\n'\n if i[5] != '\\\\N':\n message += '\\t\\t' + str(i[5]) + '\\n'\n if i[7] != '\\\\N':\n message += '\\t\\t' + str(i[7]) + '\\n'\n if i[8] != '\\\\N':\n message += '\\t\\t' + str(i[8]) + '\\n'\n if i[9] != '\\\\N':\n message += '\\t\\t' + str(i[9]) + '\\n'\n if i[6] != '\\\\N':\n try:\n message += '\\t\\t' + '{:.2f}'.format(round((float(i[6]) / 100.0), 2)) + '\\n'\n except (TypeError, ValueError) as error:\n message += '\\t\\t' + str(i[6]) + '\\n'\n if i[10] != '\\\\N':\n try:\n message += '\\t\\t' + '{:.2f}'.format(round((float(i[10]) / 100.0), 2)) + '\\n'\n except (TypeError, ValueError) as error:\n message += '\\t\\t' + str(i[10]) + '\\n'\n if i[11] != '\\\\N':\n message += '\\t\\t' + str(i[11]) + '\\n'\n if i[12] != '\\\\N':\n message += '\\t\\t' + str(i[12]) + '\\n\\n'\n if i[13] != '\\\\N':\n message += '\\t\\t' + str(i[13]) + '\\n'\n if i[14] != '\\\\N':\n message += '\\t\\t' + str(i[14]) + '\\n'\n if i[15] != '\\\\N':\n message += '\\t\\t' + str(i[15]) + '\\n'\n message += '\\t\\n'\n message = message.encode()\n client_socket.sendall(message)\n\n\ndef generateXML(client_socket, user, file_count):\n # eventually will want to send the file over the socket once everything is connected, but for now will\n # just write it to file on local machine\n # NOTE: we will need to come up with a protocol for transfering these files because they will likely\n # be longer than the 1024 bytes currently expected.\n all_entries = None\n print(file_count)\n message = '\\n\\n\\n'\n message = message.encode()\n client_socket.sendall(message)\n # comment out if you don't want to almagamate--------------------------------------------------------------------\n try:\n with open('complete_log', 'rt') as input:\n all_entries = input.read()\n all_entries = all_entries.split('\\t')\n if user == 'None':\n for k in all_entries[:-1]:\n try:\n i = k.split(',')\n send_entry(i, client_socket)\n except IndexError:\n print(all_entries.index(k))\n else:\n for k in all_entries[:-1]:\n i = k.split(',')\n if i[8] != user:\n pass\n else:\n send_entry(i, client_socket)\n except FileNotFoundError:\n pass\n #----------------------------------------------------------------------------------------------------------------\n\n\n for j in range(1, file_count + 1):\n print('file: ' + str(j))\n with open(str(j), 'rt') as input:\n all_entries = input.read()\n all_entries = all_entries.split('\\t')\n if user == 'None':\n for k in all_entries[:-1]:\n try:\n i = k.split(',')\n send_entry(i, client_socket)\n except IndexError:\n print(all_entries.index(k))\n else:\n for k in all_entries[:-1]:\n i = k.split(',')\n if i[8] != user:\n pass\n else:\n send_entry(i, client_socket)\n\n message = '\\n\\n'\n message = message.encode()\n client_socket.sendall(message)\n message = 'ENDOFDUMPLOG'\n message = message.encode()\n client_socket.sendall(message)\n\n\ndef main():\n\n num = 0\n threads = []\n\n # set port to free port\n if len(sys.argv) == 1:\n port = 8030\n else:\n port = int(sys.argv[1])\n # init server\n # init server\n print(\"Initializing server...\")\n server_socket = None\n msg = \"\"\n try:\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n except Exception as msg:\n print(msg)\n server_socket = None\n\n hostName = socket.gethostname()\n print(hostName)\n # user specified port?\n hostIP = socket.gethostbyname(socket.gethostname())\n try:\n server_socket.bind(('', port))\n except Exception as msg:\n server_socket.close()\n print(msg)\n server_socket = None\n\n # TODO: Graceful exit\n\n if server_socket is None:\n print(\"Could not open socket\")\n exit(-1)\n\n print(\"Host: \" + hostName + \" is running on: \" + hostIP + \":\" + str(port))\n print(\"Ctr-Break to force close\")\n # wait for, and handle connections\n server_socket.listen(5)\n while True:\n try:\n client_socket, address = server_socket.accept()\n print(\"Connection from\", address)\n num += 1\n # with client_socket:\n client_thread = Thread(target=on_new_client, args=(client_socket, address, threads))\n with thread_lock:\n threads.append([client_thread, False, client_socket])\n client_thread.start()\n except Exception as e:\n # something went wrong\n print(e)\n\n server_socket.close()\n print(\"Server closed\")\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Software Systems Scalability/log_server/log_server.py","file_name":"log_server.py","file_ext":"py","file_size_in_byte":8978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"508538840","text":"def compareVersion(version1, version2):\n\tv1 = [int(i) for i in version1.split('.')] \n\tv2 = [int(i) for i in version2.split('.')] \n\tmax_len = max(len(v1), len(v2)) \n\tv1 += [0]*(max_len - len(v1)) \n\tv2 += [0]*(max_len - len(v2))\n\tif v1 > v2: return 1 \n\telif v1 < v2: return -1 \n\telse: return 0\n\n\nver1=\"1.11\"\nver2=\"1.10.5\"\nprint(compareVersion(ver1, ver2))","sub_path":"interview/leetcode/165.py","file_name":"165.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"94753062","text":"from collections import Counter\n\nfrom flair.data import Dictionary\n\n# Flair Characters\nchars = Dictionary.load('common-chars')\nflair_characters = sorted([b.decode(\"utf-8\") for b in chars.idx2item])\ncontent = \"\\n\".join(flair_characters)\nf = open(\"characters_flair.txt\", \"w\")\nf.write(content)\n\n# Corpus Characters\nfiles = [\n \"data1/train.txt\",\n \"data1/dev.txt\",\n \"data1/test.txt\"\n]\n\ncharacters = Counter()\nfor file in files:\n for line in open(file):\n if line.strip():\n c = Counter(line.strip().split()[0])\n characters += c\ncorpus_characters = sorted([c for c, n in characters.most_common()])\ncontent = \"\\n\".join(corpus_characters)\nf = open(\"characters_corpus.txt\", \"w\")\nf.write(content)\n\n# Merge characters\ncharacters = sorted(set(corpus_characters).union(set(flair_characters)))\ncontent = \"\\n\".join(characters)\nf = open(\"characters_merged.txt\", \"w\")\nf.write(content)\n\n","sub_path":"egs/vlsp2016_flair/extract_characters.py","file_name":"extract_characters.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"445290236","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pysnmp/proto/secmod/rfc3414/priv/des.py\n# Compiled at: 2019-08-18 17:24:05\nimport random\nfrom pysnmp.proto.secmod.rfc3414.priv import base\nfrom pysnmp.proto.secmod.rfc3414.auth import hmacmd5, hmacsha\nfrom pysnmp.proto.secmod.rfc3414 import localkey\nfrom pysnmp.proto.secmod.rfc7860.auth import hmacsha2\nfrom pysnmp.proto import errind, error\nfrom pyasn1.type import univ\nfrom sys import version_info\ntry:\n from Cryptodome.Cipher import DES\nexcept ImportError:\n DES = None\n\ntry:\n from hashlib import md5, sha1\nexcept ImportError:\n import md5, sha\n md5 = md5.new\n sha1 = sha.new\n\nrandom.seed()\n\nclass Des(base.AbstractEncryptionService):\n __module__ = __name__\n serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 2, 2)\n keySize = 16\n if version_info < (2, 3):\n _localInt = int(random.random() * 4294967295)\n else:\n _localInt = random.randrange(0, 4294967295)\n\n def hashPassphrase(self, authProtocol, privKey):\n if authProtocol == hmacmd5.HmacMd5.serviceID:\n hashAlgo = md5\n elif authProtocol == hmacsha.HmacSha.serviceID:\n hashAlgo = sha1\n elif authProtocol in hmacsha2.HmacSha2.hashAlgorithms:\n hashAlgo = hmacsha2.HmacSha2.hashAlgorithms[authProtocol]\n else:\n raise error.ProtocolError('Unknown auth protocol %s' % (authProtocol,))\n return localkey.hashPassphrase(privKey, hashAlgo)\n\n def localizeKey(self, authProtocol, privKey, snmpEngineID):\n if authProtocol == hmacmd5.HmacMd5.serviceID:\n hashAlgo = md5\n elif authProtocol == hmacsha.HmacSha.serviceID:\n hashAlgo = sha1\n elif authProtocol in hmacsha2.HmacSha2.hashAlgorithms:\n hashAlgo = hmacsha2.HmacSha2.hashAlgorithms[authProtocol]\n else:\n raise error.ProtocolError('Unknown auth protocol %s' % (authProtocol,))\n localPrivKey = localkey.localizeKey(privKey, snmpEngineID, hashAlgo)\n return localPrivKey[:self.keySize]\n\n def __getEncryptionKey(self, privKey, snmpEngineBoots):\n desKey = privKey[:8]\n preIV = privKey[8:16]\n securityEngineBoots = int(snmpEngineBoots)\n salt = [\n securityEngineBoots >> 24 & 255, securityEngineBoots >> 16 & 255, securityEngineBoots >> 8 & 255, securityEngineBoots & 255, self._localInt >> 24 & 255, self._localInt >> 16 & 255, self._localInt >> 8 & 255, self._localInt & 255]\n if self._localInt == 4294967295:\n self._localInt = 0\n else:\n self._localInt += 1\n return (\n desKey.asOctets(), univ.OctetString(salt).asOctets(),\n univ.OctetString(map(lambda x, y: x ^ y, salt, preIV.asNumbers())).asOctets())\n\n @staticmethod\n def __getDecryptionKey(privKey, salt):\n return (\n privKey[:8].asOctets(),\n univ.OctetString(map(lambda x, y: x ^ y, salt.asNumbers(), privKey[8:16].asNumbers())).asOctets())\n\n def encryptData(self, encryptKey, privParameters, dataToEncrypt):\n if DES is None:\n raise error.StatusInformation(errorIndication=errind.encryptionError)\n (snmpEngineBoots, snmpEngineTime, salt) = privParameters\n (desKey, salt, iv) = self.__getEncryptionKey(encryptKey, snmpEngineBoots)\n privParameters = univ.OctetString(salt)\n desObj = DES.new(desKey, DES.MODE_CBC, iv)\n plaintext = dataToEncrypt + univ.OctetString((0, ) * (8 - len(dataToEncrypt) % 8)).asOctets()\n ciphertext = desObj.encrypt(plaintext)\n return (\n univ.OctetString(ciphertext), privParameters)\n\n def decryptData(self, decryptKey, privParameters, encryptedData):\n if DES is None:\n raise error.StatusInformation(errorIndication=errind.decryptionError)\n (snmpEngineBoots, snmpEngineTime, salt) = privParameters\n if len(salt) != 8:\n raise error.StatusInformation(errorIndication=errind.decryptionError)\n (desKey, iv) = self.__getDecryptionKey(decryptKey, salt)\n if len(encryptedData) % 8 != 0:\n raise error.StatusInformation(errorIndication=errind.decryptionError)\n desObj = DES.new(desKey, DES.MODE_CBC, iv)\n return desObj.decrypt(encryptedData.asOctets())","sub_path":"pycfiles/pysnmp-se-3.5.0.tar/des.py","file_name":"des.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"433607645","text":"import numpy as np\n\n\n# Class for an MDP instance\nclass MDP:\n # Constructor that reads a text file to\n # parse the MDP instance\n def __init__(self, file_name):\n # Open MDP instance file\n fin = open(file_name, 'r')\n # Read in number of states from line 1 as an int\n self.nstates = int(fin.readline())\n # Read in number of action types from line 2\n self.nactions = int(fin.readline())\n # Read in reward function into a matrix\n # Init reward matrix\n self.f_reward = np.zeros((self.nstates, self.nactions, self.nstates))\n # Read nstates x nactions number of lines\n for i in range(self.nstates * self.nactions):\n s = i // self.nactions\n a = i % self.nactions\n self.f_reward[s][a] = \\\n np.fromstring(fin.readline(), dtype=float, sep='\\t')\n # Read in Transition function into a matrix\n self.f_trans = np.zeros_like(self.f_reward)\n for i in range(self.nstates * self.nactions):\n s = i // self.nactions\n a = i % self.nactions\n self.f_trans[s][a] = \\\n np.fromstring(fin.readline(), dtype=float, sep='\\t')\n # Read discount factor\n self.gamma = float(fin.readline())\n # Read Problem type --> continuing or episodic\n self.type = fin.readline()[:-1]\n\n # Function to return all terminal state candidates\n # Very last state is always a candidate as promised in PA2\n # So in case of an episodic task return list will be non empty\n def get_terminal_states(self):\n # Transitions to itself with probability 1 irrespective\n # of the action chosen by the policy imply terminal state\n lst = []\n for s in range(self.nstates):\n if np.array_equal(self.f_trans[s, :, s], np.ones(self.nactions)):\n lst.append(s)\n return lst\n","sub_path":"PA3/mdp_simulation_code/MDP_class.py","file_name":"MDP_class.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"556188844","text":"#!/usr/bin/env python\n#--coding: utf-8--\n\nfrom conf import setting\nfrom core.socket_class import MySocketServer\nfrom core import tools,hash_factory\nimport json,os\n\n\ndef connect():\n class MyServer(MySocketServer):\n def handle(self):\n # 接收用户名和密码\n self.conn = self.request\n self.ip_port = self.client_address\n self.message={\n \"pwd\":setting.SERVER_DIR\n }\n\n\n os.chdir(self.message[\"pwd\"])\n\n self.send_data(\"成功连接ftp,请输入用户名和密码\")\n # 循环登陆,直到成功\n count = 0\n while True:\n user_dict=self.recv_data()\n\n user_dict=json.loads(user_dict)\n user_path=os.path.join(setting.DATABASE_CONFIG[\"dirname\"],\n \"%s.json\"%user_dict[\"user\"])\n\n print(user_path)\n if os.path.exists(user_path):\n with open(user_path,\"r\") as f:\n file_dict=json.load(f)\n if file_dict[\"passwd\"] == user_dict[\"passwd\"]:\n send_dict={\"flag\":True,\"msg\":\"登陆成功\",\"user\":user_dict[\"user\"]}\n print(json.dumps(send_dict))\n self.send_data(json.dumps(send_dict))\n break\n else:\n send_dict = {\"flag\": False, \"msg\": \"密码输入错误\"}\n else:\n send_dict={\"flag\":False,\"msg\":\"用户名不存在\"}\n print(json.dumps(send_dict))\n self.send_data(json.dumps(send_dict))\n while True:\n cmd=self.recv_data()\n if cmd == \"send_file\":\n put(self)\n elif cmd == \"recv_file\":\n get(self)\n elif cmd == \"ls\":\n ls(self)\n server_socket=MyServer.SocketStart(setting.SERVER_ADDR)\ndef put(s):\n '''给客户端做put工作'''\n file_head = s.recv_data()\n file_head=json.loads(file_head)\n\n if not file_head[\"dstfile\"]:\n file_head[\"dstfile\"]=os.path.join(setting.SERVER_DIR,\n os.path.basename(file_head[\"srcfile\"]))\n else:\n file_head[\"dstfile\"] = os.path.join(setting.SERVER_DIR,\n os.path.basename(file_head[\"dstfile\"]))\n s.send_data(\"ok\")\n print(file_head)\n # 进度条\n rfi=s.recv_file_iter(file_head[\"dstfile\"])\n size,filesize=rfi.__next__()\n for size in rfi:\n s.sendall_data(str(size/filesize))\n pass\n #\n # 接受完发送md5头部信息\n dst_md5=hash_factory.file_md5_factory(file_head[\"dstfile\"])\n file_head[\"dstmd5\"] = dst_md5\n file_head=json.dumps(file_head)\n print(file_head)\n s.sendall_data(file_head)\n\n\ndef get(s):\n '''给客户端做get工作'''\n file_head_dict=s.recv_data()\n file_head_dict=json.loads(file_head_dict)\n file_head_dict[\"srcfile\"] = os.path.join(setting.SERVER_DIR,file_head_dict[\"srcfile\"])\n if os.path.exists(file_head_dict[\"srcfile\"]):\n file_head_dict.update({\"flag\":True,\"msg\":\"源文件存在\"})\n src_md5 = hash_factory.file_md5_factory(file_head_dict[\"srcfile\"])\n file_head_dict[\"srcmd5\"] = src_md5\n else:\n file_head_dict.update({\"flag\": False, \"msg\": \"源文件不存在\"})\n file_head=json.dumps(file_head_dict)\n s.send_data(file_head)\n\n if file_head_dict[\"flag\"]:\n s.recv_data()\n sfi=s.send_file_iter(file_head_dict[\"srcfile\"])\n for item in sfi:\n pass\n\n\n\ndef ls(s):\n '''给客户端做ls工作'''\n sight=s.recv_data()\n print(sight)\n import glob\n if sight == \"?\":\n dirlist=glob.glob1(s.message[\"pwd\"],\"*\")\n else:\n dirlist = glob.glob1(s.message[\"pwd\"],sight)\n print(sight)\n dirlist=json.dumps(dirlist)\n s.send_data(dirlist)\n pass\n\n\n\n","sub_path":"socket_ftp_server练习/core/server_sock.py","file_name":"server_sock.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"486686501","text":"import os, json, pickle, random, sys, shutil\nimport string\nimport numpy as np\nimport cv2\nimport hashlib\nimport argparse\n\nfrom pprint import pprint\nfrom util import us3\nfrom util import udynamodb\nfrom util.ulogger import Log\nfrom util.udeco import debug_function_name\nfrom util import ufile\nfrom const.const_common import *\nfrom character_list import *\n\nfrom matplotlib import pyplot as plt\n\n\"\"\"\n- 180511:\n - 画像ファイルとラベルを読み込んで、バイナリデータとして保存\n - LABEL_DATA\n - IMAGE_DATA\n を作成し、教師データ、検証用データとして利用\n\n- 180512:\n -ONE HOT VECTOR 作成用のスクリプトを作成\n\n- 180515:\n - sklearn-learn用、tensorflow用などこの段階であらかじめある程度データをフォーマットして保存\n\n - default binary_data:\n - LABEL_DATA:\n - description : 1d-char-list-array \n - filename : default_label.data \n\n - IMAGE_DATA:\n - description : 2d-array, 8bit-gray-scale\n - filename : default_image.data\n \n - sklearn-learn:\n - LABEL_DATA:\n - description : 1d-char-list-array\n - filename : sklearn_label.data \n\n - IMAGE_DATA:\n - description : 1d-array, 1d-gray-scale-8bit-array\n - filename : sklearn_image.data\n\n - tensorlowr\n - LABEL_DATA:\n - description : 1d-array, one-hot-vector\n - filename : tf_label.data\n\n - IMAGE_DATA:\n - description : 1d-array, 1d-gray-scale-8bit-array\n - filename : tf_image.data\n \n- 180528:\n - template matching 用\n - template matching\n - LABEL_DATA:\n - description : 1d-char-list-array\n - filename : tm_label.data\n\n - IMAGE_DATA:\n - description : 2d-array, 8bit-gray-scale\n - filename : tm_image.data\n \n\"\"\"\n\nABS_PATH = os.path.abspath(os.path.dirname(__file__))\n\nPNG_IMAGE_FILE_PATH = ABS_PATH + '/test_png/'\nJSON_IMAGE_FILE_PATH = ABS_PATH + '/json/'\n\nYYYYMMDDhhmmss = ufile.get_YYYYMMDDhhmmss()\n\nDATA_DIR = ABS_PATH + '/binary/label_image/' + YYYYMMDDhhmmss + '/'\nLATEST_DIR = ABS_PATH + '/binary/label_image/latest/'\n\nufile.make_directory(DATA_DIR)\nufile.make_directory(LATEST_DIR)\n\ndir_number = len(os.listdir(DATA_DIR))\nDATA_DIR = DATA_DIR + '{0:06d}'.format(dir_number) + '_' + ''.join(\n [random.choice(string.ascii_letters + string.digits) for i in range(8)]) + '/'\n\nufile.make_directory(DATA_DIR)\n\nDEFAULT_LABEL_DATA = DATA_DIR + 'default_label.data'\nDEFAULT_IMAGE_DATA = DATA_DIR + 'default_image.data'\nTM_LABEL_DATA = DATA_DIR + 'tm_label.data'\nTM_IMAGE_DATA = DATA_DIR + 'tm_image.data'\nSKLEARN_LABEL_DATA = DATA_DIR + 'sklearn_label.data'\nSKLEARN_IMAGE_DATA = DATA_DIR + 'sklearn_image.data'\nTF_LABEL_DATA = DATA_DIR + 'tf_label.data'\nTF_IMAGE_DATA = DATA_DIR + 'tf_image.data'\n\nALL_DATA_LIST = [\n DEFAULT_LABEL_DATA,\n DEFAULT_IMAGE_DATA,\n TM_LABEL_DATA,\n TM_IMAGE_DATA,\n SKLEARN_LABEL_DATA,\n SKLEARN_IMAGE_DATA,\n TF_LABEL_DATA,\n TF_IMAGE_DATA,\n]\n\nONE_HOT_VECTOR_TABLE = DATA_DIR + '/one_hot_vector.data'\nONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE = DATA_DIR + '/one_hot_vector_reverse_key_value.data'\nONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX = DATA_DIR + '/one_hot_vector_table_key_char_value_argmax.data'\nONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE = DATA_DIR + '/one_hot_vector_table_key_char_value_argmax_reverse_key_value.data'\n\nLATEST_DEFAULT_LABEL_DATA = LATEST_DIR + 'default_label.data'\nLATEST_DEFAULT_IMAGE_DATA = LATEST_DIR + 'default_image.data'\nLATEST_TM_LABEL_DATA = LATEST_DIR + 'tm_label.data'\nLATEST_TM_IMAGE_DATA = LATEST_DIR + 'tm_image.data'\nLATEST_SKLEARN_LABEL_DATA = LATEST_DIR + 'sklearn_label.data'\nLATEST_SKLEARN_IMAGE_DATA = LATEST_DIR + 'sklearn_image.data'\nLATEST_TF_LABEL_DATA = LATEST_DIR + 'tf_label.data'\nLATEST_TF_IMAGE_DATA = LATEST_DIR + 'tf_image.data'\n\nALL_LATEST_DATA_LIST = [\n LATEST_DEFAULT_LABEL_DATA,\n LATEST_DEFAULT_IMAGE_DATA,\n LATEST_TM_LABEL_DATA,\n LATEST_TM_IMAGE_DATA,\n LATEST_SKLEARN_LABEL_DATA,\n LATEST_SKLEARN_IMAGE_DATA,\n LATEST_TF_LABEL_DATA,\n LATEST_TF_IMAGE_DATA,\n]\n\nLATEST_ONE_HOT_VECTOR_TABLE = LATEST_DIR + '/one_hot_vector.data'\nLATEST_ONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE = LATEST_DIR + '/one_hot_vector_reverse_key_value.data'\nLATEST_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX = LATEST_DIR + '/one_hot_vector_table_key_char_value_argmax.data'\nLATEST_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE = LATEST_DIR + '/one_hot_vector_table_key_char_value_argmax_reverse_key_value.data'\n\nSHOW_IMAGE_MAX = 600\n\nufile.make_directory(PNG_IMAGE_FILE_PATH)\nufile.make_directory(JSON_IMAGE_FILE_PATH)\n\n# 180518:\n# sklearnとtfdataのlatest dataのコピー先\nEXTERNAL_DIR_BASE_LIST = ABS_PATH.split('/')\nEXTERNAL_DIR_BASE_LIST.pop()\nEXTERNAL_DIR_BASE = '/'.join(EXTERNAL_DIR_BASE_LIST)\n\nTM_DATA_DIR = EXTERNAL_DIR_BASE + '/tm/binary_data/'\nSKLEARN_DATA_DIR = EXTERNAL_DIR_BASE + '/sklearn/binary_data/'\nTF_DATA_DIR = EXTERNAL_DIR_BASE + '/tf/binary_data/'\n\nufile.make_directory(TM_DATA_DIR)\nufile.make_directory(SKLEARN_DATA_DIR)\nufile.make_directory(TF_DATA_DIR)\n\nEXTERNAL_TM_LABEL_DATA = TM_DATA_DIR + 'tm_label.data'\nEXTERNAL_TM_IMAGE_DATA = TM_DATA_DIR + 'tm_image.data'\nEXTERNAL_SKLEARN_LABEL_DATA = SKLEARN_DATA_DIR + 'sklearn_label.data'\nEXTERNAL_SKLEARN_IMAGE_DATA = SKLEARN_DATA_DIR + 'sklearn_image.data'\nEXTERNAL_TF_LABEL_DATA = TF_DATA_DIR + 'tf_label.data'\nEXTERNAL_TF_IMAGE_DATA = TF_DATA_DIR + 'tf_image.data'\n\nEXTERNAL_TF_ONE_HOT_VECTOR_TABLE = TF_DATA_DIR + 'one_hot_vector_table.data'\nEXTERNAL_TF_ONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE = TF_DATA_DIR + 'one_hot_vector_table_reverse_key_value.data'\nEXTERNAL_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX = TF_DATA_DIR + 'one_hot_vector_table_key_char_value_argmax.data'\nEXTERNAL_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE = TF_DATA_DIR + 'one_hot_vector_table_key_char_value_argmax_reverse_key_value.data'\n\n\ndef _shuffle_two_list(list_1, list_2):\n zipped = list(zip(list_1, list_2))\n np.random.shuffle(zipped)\n shuffle_list_1, shuffle_list_2 = zip(*zipped)\n return np.asarray(shuffle_list_1), np.asarray(shuffle_list_2)\n\n\ndef show_some_data(image_list, label_list):\n # 画像の表示\n for index in range(3):\n position = 141 + index\n\n plt.subplot(position), plt.imshow(image_list[index], cmap='gray')\n plt.title('label : {}'.format(label_list[index])), plt.xticks([]), plt.yticks([])\n\n plt.show()\n\n\ndef get_directory_number(path):\n ufile.make_directory(path)\n dir_list = os.listdir(path)\n return len(dir_list)\n\n\ndef create_one_hot_vector_table(char_template_list):\n one_hot_vector_table = {}\n one_hot_vector_table_reverse_key_value = {}\n one_hot_vector_table_key_char_value_argmax = {}\n one_hot_vector_table_key_char_value_argmax_reverse_key_value = {}\n\n for index, char in enumerate(char_template_list):\n one_hot_vector_table.update({\n char: np.eye(len(char_template_list))[index]\n })\n\n one_hot_vector_table_key_char_value_argmax.update({\n char: np.argmax(np.eye(len(char_template_list))[index])\n })\n\n for index, char in enumerate(char_template_list):\n one_hot_vector_table_reverse_key_value.update({\n str(list(np.eye(len(char_template_list))[index])): char\n })\n\n one_hot_vector_table_key_char_value_argmax_reverse_key_value.update({\n str(np.argmax(np.eye(len(char_template_list))[index])): char\n })\n\n with open(ONE_HOT_VECTOR_TABLE, mode='wb') as f:\n pickle.dump(one_hot_vector_table, f)\n\n with open(ONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE, mode='wb') as f:\n pickle.dump(one_hot_vector_table_reverse_key_value, f)\n\n with open(ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX, mode='wb') as f:\n pickle.dump(one_hot_vector_table_key_char_value_argmax, f)\n\n with open(ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE, mode='wb') as f:\n pickle.dump(one_hot_vector_table_key_char_value_argmax_reverse_key_value, f)\n\n return one_hot_vector_table, one_hot_vector_table_reverse_key_value\n\n\ndef create_one_hot_vector_list(label_char_list, char_template_list):\n one_hot_vector_list = []\n for label in label_char_list:\n one_hot_vector_list.append(create_one_hot_vector_table(char_template_list)[label])\n return one_hot_vector_list\n\n\ndef create_one_hot_vector(label_char, char_template_list):\n return create_one_hot_vector_table(char_template_list)[label_char]\n\n\ndef save(object_list, is_show_data=False):\n # 180515:pngディレクトリには各ラベルごとにディレクトリが切られている\n directory_list = ufile.get_absolute_file_list(PNG_IMAGE_FILE_PATH)\n\n default_label_data_list = []\n tm_label_data_list = []\n sklearn_label_data_list = []\n tf_label_data_list = []\n\n default_image_data_list = []\n tm_image_data_list = []\n sklearn_image_data_list = []\n tf_image_data_list = []\n\n # 記号、カタカナ、数字リストにラベルが含まれる場合\n one_hot_vector_table, one_hot_vector_table_reverse_key_value = create_one_hot_vector_table(object_list)\n\n for directory in directory_list:\n\n _label = ufile.get_filename_without_extension(directory).split('/')[-1]\n\n file_list = ufile.get_absolute_file_list(directory + '/')\n Log.write_log_debug('directory : {}'.format(directory))\n\n #\n # 180517\n # ワイルドカードは除外にする!!!\n # os.walkで最危険さうすると、*はワイルドカード扱いになってすべてのファイルが抽出される\n #\n\n if str(_label) == '*':\n continue\n\n for _file in file_list:\n Log.write_log_debug('file : {}'.format(_file))\n\n if _label in object_list:\n # 1次元に直して格納\n # unsigned int8 で指定\n # 実際の計算はCライブラリなので、typeを指定する\n\n # default list\n default_label_data_list.append(str(_label))\n default_np_array = np.array(cv2.imread(_file, flags=cv2.IMREAD_GRAYSCALE), dtype='float32')\n default_image_data_list.append(default_np_array)\n\n # tm list\n tm_label_data_list.append(str(_label))\n tm_np_array = np.array(cv2.imread(_file, flags=cv2.IMREAD_GRAYSCALE), dtype='float32')\n tm_image_data_list.append(tm_np_array)\n\n # sklearn list\n sklearn_label_data_list.append(str(_label))\n sklearn_np_array = np.ravel(np.array(cv2.imread(_file, flags=cv2.IMREAD_GRAYSCALE), dtype='float32'))\n sklearn_image_data_list.append(sklearn_np_array)\n\n # tf list\n tf_label_data_list.append(np.array(one_hot_vector_table[_label], dtype='float32'))\n tf_np_array = np.ravel(np.array(cv2.imread(_file, flags=cv2.IMREAD_GRAYSCALE), dtype='float32'))\n tf_image_data_list.append(tf_np_array)\n\n #\n # save default binary_data list\n #\n default_label_data_list, default_image_data_list = _shuffle_two_list(default_label_data_list, default_image_data_list)\n print('default_label_data_list.shape = {}'.format(default_label_data_list.shape))\n print('default_image_data_list.shape = {}'.format(default_image_data_list.shape))\n with open(DEFAULT_LABEL_DATA, mode='wb') as f:\n pickle.dump(default_label_data_list, f)\n\n with open(DEFAULT_IMAGE_DATA, mode='wb') as f:\n pickle.dump(default_image_data_list, f)\n\n #\n # save tm binary_data list\n #\n tm_label_data_list, tm_image_data_list = _shuffle_two_list(tm_label_data_list, tm_image_data_list)\n print('tm_label_data_list.shape = {}'.format(tm_label_data_list.shape))\n print('tm_image_data_list.shape = {}'.format(tm_image_data_list.shape))\n with open(TM_LABEL_DATA, mode='wb') as f:\n pickle.dump(tm_label_data_list, f)\n\n with open(TM_IMAGE_DATA, mode='wb') as f:\n pickle.dump(tm_image_data_list, f)\n\n #\n # save sklearn binary_data list\n #\n sklearn_label_data_list, sklearn_image_data_list = _shuffle_two_list(sklearn_label_data_list, sklearn_image_data_list)\n print('sklearn_label_data_list.shape = {}'.format(sklearn_label_data_list.shape))\n print('sklearn_image_data_list.shape = {}'.format(sklearn_image_data_list.shape))\n with open(SKLEARN_LABEL_DATA, mode='wb') as f:\n pickle.dump(sklearn_label_data_list, f)\n\n with open(SKLEARN_IMAGE_DATA, mode='wb') as f:\n pickle.dump(sklearn_image_data_list, f)\n\n #\n # save tf binary_data list\n #\n tf_label_data_list, tf_image_data_list = _shuffle_two_list(tf_label_data_list, tf_image_data_list)\n print('tf_label_data_list.shape = {}'.format(tf_label_data_list.shape))\n print('tf_image_data_list.shape = {}'.format(tf_image_data_list.shape))\n with open(TF_LABEL_DATA, mode='wb') as f:\n pickle.dump(tf_label_data_list, f)\n\n with open(TF_IMAGE_DATA, mode='wb') as f:\n pickle.dump(tf_image_data_list, f)\n\n if is_show_data:\n show_some_data(label_list=default_label_data_list, image_list=default_image_data_list)\n\n return default_label_data_list, \\\n default_image_data_list, \\\n tm_label_data_list, \\\n tm_image_data_list, \\\n sklearn_label_data_list, \\\n sklearn_image_data_list, \\\n tf_label_data_list, \\\n tf_image_data_list\n\n\ndef load():\n with open(DEFAULT_LABEL_DATA, mode='rb') as f:\n default_label_data_list = pickle.load(f)\n\n with open(DEFAULT_IMAGE_DATA, mode='rb') as f:\n default_image_data_list = pickle.load(f)\n\n with open(TM_LABEL_DATA, mode='rb') as f:\n tm_label_data_list = pickle.load(f)\n\n with open(TM_IMAGE_DATA, mode='rb') as f:\n tm_image_data_list = pickle.load(f)\n\n with open(SKLEARN_LABEL_DATA, mode='rb') as f:\n sklearn_label_data_list = pickle.load(f)\n\n with open(SKLEARN_IMAGE_DATA, mode='rb') as f:\n sklearn_image_data_list = pickle.load(f)\n\n with open(TF_LABEL_DATA, mode='rb') as f:\n tf_label_data_list = pickle.load(f)\n\n with open(TF_IMAGE_DATA, mode='rb') as f:\n tf_image_data_list = pickle.load(f)\n\n return default_label_data_list, \\\n default_image_data_list, \\\n tm_label_data_list, \\\n tm_image_data_list, \\\n sklearn_label_data_list, \\\n sklearn_image_data_list, \\\n tf_label_data_list, \\\n tf_image_data_list\n\n\ndef download_json_result():\n key_list = us3.get_key_list(\n bucket=S3_CHARACTER_BUCKET_NAME,\n key_prefix='before_correction'\n )\n if not key_list is None:\n for key in key_list:\n if ufile.get_file_extension(key) == 'json':\n us3.download(\n bucket=S3_CHARACTER_BUCKET_NAME,\n key=key,\n local_file_full_path=JSON_IMAGE_FILE_PATH + ufile.get_filename_with_extension(key)\n )\n\n\ndef _get_image_result_list():\n json_file_list = ufile.get_absolute_file_list(JSON_IMAGE_FILE_PATH)\n\n Log.write_log_debug(json_file_list)\n\n result_list = []\n for json_file in json_file_list:\n\n with open(json_file, mode='r') as f:\n json_result = json.load(f)\n\n for _result in json_result['result']:\n result_list.append(_result)\n\n return result_list\n\n\ndef get_correct_image_list():\n result_list = _get_image_result_list()\n\n # リストのシャッフル\n Log.write_log_debug('list shuffle')\n random.shuffle(result_list)\n\n count = 0\n correct_image_sha256_list = []\n for _result in result_list:\n\n item_list = udynamodb.query(\n table_name=DB_CHARACTER_IMAGE_TABLE_NAME,\n query_hash=_result['image_sha256'],\n hash_key=DB_CHARACTER_IMAGE_HASH_NAME,\n range_key=DB_CHARACTER_IMAGE_RANGE_NAME,\n )\n\n #\n # correct flagがinputのもののみリストに追加\n #\n if item_list is not None:\n for item in item_list:\n if 'status' in item:\n if item['status'] == 'input':\n correct_character = item['correct_character']\n\n char_dir = PNG_IMAGE_FILE_PATH + correct_character + '/'\n ufile.make_directory(char_dir)\n us3.download(\n bucket=S3_CHARACTER_BUCKET_NAME,\n key=S3_CHARACTER_RESULT_IMAGE_KEY_PREFIX_NAME + item['hash'] + '.png',\n local_file_full_path=char_dir + item['hash'] + '.png',\n )\n\n correct_image_sha256_list.append(_result)\n Log.write_log_debug(count)\n count += 1\n\n if count >= SHOW_IMAGE_MAX:\n break\n\n Log.write_log_debug('correct_image_sha256_list = {}'.format(correct_image_sha256_list))\n return correct_image_sha256_list\n\n\ndef check(default_label_list_object,\n default_image_list_object,\n tm_label_list_object,\n tm_image_list_object,\n sklearn_label_list_object,\n sklearn_image_list_object,\n tf_label_list_object,\n tf_image_list_object):\n default_label_data_list, \\\n default_image_data_list, \\\n tm_label_data_list, \\\n tm_image_data_list, \\\n sklearn_label_data_list, \\\n sklearn_image_data_list, \\\n tf_label_data_list, \\\n tf_image_data_list = load()\n\n # default return\n ret_bool = True\n\n #\n # default binary_data\n #\n save_label_list_sha256 = hashlib.sha256(pickle.dumps(default_label_list_object)).hexdigest()\n load_label_list_sha256 = hashlib.sha256(pickle.dumps(default_label_data_list)).hexdigest()\n\n save_image_list_sha256 = hashlib.sha256(pickle.dumps(default_image_list_object)).hexdigest()\n load_image_list_sha256 = hashlib.sha256(pickle.dumps(default_image_data_list)).hexdigest()\n\n if save_label_list_sha256 == load_label_list_sha256:\n print('default label : OK')\n else:\n ret_bool = False\n print('default label : NG')\n\n if save_image_list_sha256 == load_image_list_sha256:\n print('default image : OK')\n else:\n print('default image : NG')\n\n #\n # tm binary_data\n #\n save_label_list_sha256 = hashlib.sha256(pickle.dumps(tm_label_list_object)).hexdigest()\n load_label_list_sha256 = hashlib.sha256(pickle.dumps(tm_label_data_list)).hexdigest()\n\n save_image_list_sha256 = hashlib.sha256(pickle.dumps(tm_image_list_object)).hexdigest()\n load_image_list_sha256 = hashlib.sha256(pickle.dumps(tm_image_data_list)).hexdigest()\n\n if save_label_list_sha256 == load_label_list_sha256:\n print('tm label : OK')\n else:\n ret_bool = False\n print('tm label : NG')\n\n if save_image_list_sha256 == load_image_list_sha256:\n print('tm image : OK')\n else:\n ret_bool = False\n print('tm image : NG')\n\n #\n # sklearn learn binary_data\n #\n save_label_list_sha256 = hashlib.sha256(pickle.dumps(sklearn_label_list_object)).hexdigest()\n load_label_list_sha256 = hashlib.sha256(pickle.dumps(sklearn_label_data_list)).hexdigest()\n\n save_image_list_sha256 = hashlib.sha256(pickle.dumps(sklearn_image_list_object)).hexdigest()\n load_image_list_sha256 = hashlib.sha256(pickle.dumps(sklearn_image_data_list)).hexdigest()\n\n if save_label_list_sha256 == load_label_list_sha256:\n print('sklearn label : OK')\n else:\n ret_bool = False\n print('sklearn label : NG')\n\n if save_image_list_sha256 == load_image_list_sha256:\n print('sklearn image : OK')\n else:\n ret_bool = False\n print('sklearn image : NG')\n\n #\n # tf binary_data\n #\n save_label_list_sha256 = hashlib.sha256(pickle.dumps(tf_label_list_object)).hexdigest()\n load_label_list_sha256 = hashlib.sha256(pickle.dumps(tf_label_data_list)).hexdigest()\n\n save_image_list_sha256 = hashlib.sha256(pickle.dumps(tf_image_list_object)).hexdigest()\n load_image_list_sha256 = hashlib.sha256(pickle.dumps(tf_image_data_list)).hexdigest()\n\n if save_label_list_sha256 == load_label_list_sha256:\n print('tf label : OK')\n else:\n ret_bool = False\n print('tf label : NG')\n\n if save_image_list_sha256 == load_image_list_sha256:\n print('tf image : OK')\n else:\n ret_bool = False\n print('tf image : NG')\n\n return ret_bool\n\n\ndef copy_to_each_dir():\n # copy to latest dir\n for _data, _latest_data in zip(ALL_DATA_LIST, ALL_LATEST_DATA_LIST):\n shutil.copy2(_data, _latest_data)\n\n shutil.copy2(ONE_HOT_VECTOR_TABLE, LATEST_ONE_HOT_VECTOR_TABLE)\n shutil.copy2(ONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE, LATEST_ONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE)\n shutil.copy2(ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX, LATEST_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX)\n shutil.copy2(ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE,\n LATEST_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE)\n\n # copy to ecah dir(sklearn, tf)\n print(EXTERNAL_TM_LABEL_DATA)\n print(EXTERNAL_TM_IMAGE_DATA)\n print(EXTERNAL_SKLEARN_LABEL_DATA)\n print(EXTERNAL_SKLEARN_IMAGE_DATA)\n print(EXTERNAL_TF_LABEL_DATA)\n print(EXTERNAL_TF_IMAGE_DATA)\n print(EXTERNAL_TF_ONE_HOT_VECTOR_TABLE)\n\n shutil.copy2(TM_LABEL_DATA, EXTERNAL_TM_LABEL_DATA)\n shutil.copy2(TM_IMAGE_DATA, EXTERNAL_TM_IMAGE_DATA)\n\n shutil.copy2(SKLEARN_LABEL_DATA, EXTERNAL_SKLEARN_LABEL_DATA)\n shutil.copy2(SKLEARN_IMAGE_DATA, EXTERNAL_SKLEARN_IMAGE_DATA)\n\n # one hot vector が必要なのはtfだけ\n shutil.copy2(TF_LABEL_DATA, EXTERNAL_TF_LABEL_DATA)\n shutil.copy2(TF_IMAGE_DATA, EXTERNAL_TF_IMAGE_DATA)\n\n shutil.copy2(ONE_HOT_VECTOR_TABLE, EXTERNAL_TF_ONE_HOT_VECTOR_TABLE)\n shutil.copy2(ONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE, EXTERNAL_TF_ONE_HOT_VECTOR_TABLE_REVERSE_KEY_VALUE)\n shutil.copy2(ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX, EXTERNAL_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX)\n shutil.copy2(ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE,\n EXTERNAL_ONE_HOT_VECTOR_TABLE_KEY_CHAR_VALUE_ARGMAX_REVERSE_KEY_VALUE)\n\n\ndef upload_binary_data():\n for _data in ALL_LATEST_DATA_LIST:\n us3.upload(\n bucket=S3_CHARACTER_BUCKET_NAME,\n key=S3_CHARACTER_RESULT_BINARY_DATA_KEY_PREFIX_NAME + YYYYMMDDhhmmss + '/' + ufile.get_filename_with_extension(\n _data),\n local_file_full_path=_data\n )\n\n us3.upload(\n bucket=S3_CHARACTER_BUCKET_NAME,\n key=S3_CHARACTER_RESULT_BINARY_DATA_KEY_PREFIX_NAME + 'latest/' + ufile.get_filename_with_extension(_data),\n local_file_full_path=_data\n )\n\n us3.upload(\n bucket=S3_CHARACTER_BUCKET_NAME,\n key=S3_CHARACTER_RESULT_BINARY_DATA_KEY_PREFIX_NAME + YYYYMMDDhhmmss + '/' + ufile.get_filename_with_extension(\n ONE_HOT_VECTOR_TABLE),\n local_file_full_path=ONE_HOT_VECTOR_TABLE\n )\n\n us3.upload(\n bucket=S3_CHARACTER_BUCKET_NAME,\n key=S3_CHARACTER_RESULT_BINARY_DATA_KEY_PREFIX_NAME + 'latest/' + ufile.get_filename_with_extension(\n ONE_HOT_VECTOR_TABLE),\n local_file_full_path=ONE_HOT_VECTOR_TABLE\n )\n\n\ndef argument_parser():\n parser = argparse.ArgumentParser(description=\"this is description\")\n parser.add_argument(\n \"-m\", \"--mode\", dest=\"mode\",\n type=str,\n default=\"inference\",\n choices=[\"inference\", \"teacher\"],\n help=\"specified execution execution mode, inference or teacher\")\n\n parser.add_argument(\n \"-e\", \"--method\", dest=\"method\",\n type=str,\n default=\"tm\",\n choices=[\"tm\", \"tf\", \"svm\"],\n help=\"specified execution execution mode, tm, tf or svm\")\n\n parser.add_argument(\n \"-d\", \"--debug\", dest=\"debug\",\n type=bool,\n default=True,\n choices=[True, False],\n help=\"specified execution as a debug mode or not\")\n\n parser.add_argument(\n \"-l\", \"--local\", dest=\"local\",\n type=bool,\n default=False,\n choices=[True, False],\n help=\"specified local mode or server mode\")\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n #\n # 0. parse arguments\n #\n\n argument = argument_parser()\n\n mode = argument.mode\n method = argument.method\n debug = DEBUG_MODE\n local = argument.local\n\n Log.write_log_info('mode = {}'.format(mode))\n Log.write_log_info('method = {}'.format(method))\n Log.write_log_info('debug = {}'.format(debug))\n Log.write_log_info('local = {}'.format(local))\n\n #\n # 3. ディレクトリ名によってラベル付されたデータ教師データを読み込み、label_list、image_listととして、バイナリデータとして保存\n # save()の返り値はpython objectを返している\n # 後のcheck()関数用\n # label_list_object, image_list_object = save()\n # object_listはdata_listとして保存する対象のリストを選択する\n # NUMERIC_LISTは数字のみ\n #\n\n # 180622 : 現段階で、数字と¥マークとカンマだけとする\n # CUSTOM_LIST = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9','¥','ast',',']\n # HANKAKU_KIGOU_LIST\n # NUMERIC_KATAKANA_KIGOU_LIST\n # ALPHABET_SMALL_LIST\n # ALPHABET_LARGE_LIST\n # NUMERIC_LIST\n # ZENKAKU_NUMERIC_LIST\n # HIRAGANA_LIST\n # KATAKANA_LIST\n # KANJI_LIST\n\n default_label_data_list, \\\n default_image_data_list, \\\n tm_label_data_list, \\\n tm_image_data_list, \\\n sklearn_label_data_list, \\\n sklearn_image_data_list, \\\n tf_label_data_list, \\\n tf_image_data_list = save(object_list=CUSTOM_LIST, is_show_data=False)\n\n #\n # 4. 保存されたバイナリデータを読み込み、そのsha256を計算し、一致しているかチェック\n # TODO 二次元配列で保存しているのを一次元配列として保存する\n check_result = check(\n default_label_list_object=default_label_data_list, default_image_list_object=default_image_data_list,\n tm_label_list_object=tm_label_data_list, tm_image_list_object=tm_image_data_list,\n sklearn_label_list_object=sklearn_label_data_list, sklearn_image_list_object=sklearn_image_data_list,\n tf_label_list_object=tf_label_data_list, tf_image_list_object=tf_image_data_list,\n )\n\n #\n # 5. 最新版のデータを各ディレクトリに保存する\n # S3にもアップする\n if check_result:\n copy_to_each_dir()\n upload_binary_data()\n","sub_path":"server/data/04_create_label_image_binary_data.py","file_name":"04_create_label_image_binary_data.py","file_ext":"py","file_size_in_byte":25232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"341668532","text":"# coding=utf-8\nimport sys\nimport os\n\nRESULT_NAME = 'result.txt'\n\n# 统计字符数\ndef funChar(flow):\n charLen = len(flow)\n return charLen\n\n# 统计单词\ndef funWord(flow):\n wordLen = len(flow.replace(',', ' ').split(' '))\n return wordLen\n# 统计行数\ndef funLine(flow):\n lineLen = flow.count('\\n') \n return lineLen + 1\n\n# 写文件\ndef funOut(result):\n fileObj = open(RESULT_NAME, 'w')\n fileObj.write(result)\n return 0\n\n# 主函数\ndef main():\n result = ''\n paramsLen = len(sys.argv)\n fileName = sys.argv[paramsLen - 1]\n params = sys.argv[1:-1]\n fileObj = open(fileName, 'r')\n wordFlow = fileObj.read()\n ifOutPut = 0\n for func in params:\n if func == '-c':\n log = '字符数为:' + str(funChar(wordFlow)) + '\\n'\n print(log)\n result += log\n if func == '-w':\n log = '单词数为:' + str(funWord(wordFlow)) + '\\n'\n print(log)\n result += log\n if func == '-l':\n log = '行数为:' + str(funLine(wordFlow)) + '\\n'\n print(log)\n result += log\n if func == '-o':\n ifOutPut = 1 \n if ifOutPut == 1:\n funOut(result)\n\nif __name__ == '__main__':\n main() ","sub_path":"WordCount/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"344301219","text":"import gym\r\nimport numpy as np\r\nimport math\r\nimport random\r\nfrom copy import deepcopy, copy\r\nimport time\r\nimport ray\r\nimport json\r\nimport wandb\r\nfrom matplotlib import animation\r\nimport matplotlib.pyplot as plt\r\nfrom gym import wrappers\r\n\r\nray.init(num_cpus=8)\r\nwandb.init(project=\"genetic\")\r\n\r\n\r\nclass NeuralNet:\r\n\r\n def __init__(self, numLayers, numNodes):\r\n super().__init__()\r\n\r\n self.numLayers = numLayers\r\n self.numNodes = numNodes\r\n\r\n # print(\"##########\")\r\n # print(self.numNodes)\r\n\r\n self.weights = []\r\n self.baises = []\r\n\r\n self.fitness = 0.0\r\n\r\n self.initWeights()\r\n\r\n\r\n def initWeights(self):\r\n\r\n for i in range(0, self.numLayers - 1):\r\n self.weights.append(np.random.uniform(low = -1.0, high = 1.0, size = (self.numNodes[i], self.numNodes[i+1])))\r\n self.baises.append(np.random.uniform(low = -1.0, high = 1.0, size = (self.numNodes[i+1])))\r\n\r\n self.weights = np.array([np.array(xi) for xi in self.weights], dtype=object)\r\n self.baises = np.array([np.array(xi) for xi in self.baises], dtype=object)\r\n \r\n\r\n def getAction(self, observation):\r\n\r\n action = observation\r\n for i in range(0, self.numLayers - 1):\r\n action = np.matmul(np.asarray(action),\r\n self.weights[i]) + self.baises[i]\r\n action = np.reshape(action, (1, self.numNodes[i+1]))\r\n # if(i == self.numLayers - 2):\r\n # action = np.tanh(action)\r\n # else:\r\n # action = self.relu(action)\r\n action = np.tanh(action)\r\n action = np.reshape(action, (self.numNodes[i+1]))\r\n\r\n return action\r\n\r\n def relu(self, x):\r\n return np.maximum(0, x)\r\n\r\n\r\nclass Population:\r\n\r\n def __init__(self, populationSize, mutationRate, learningRate, NNLayers):\r\n super().__init__()\r\n \r\n self.populationSize = populationSize\r\n self.mutationRate = mutationRate\r\n self.learningRate = learningRate\r\n self.NNLayers = NNLayers\r\n self.population = []\r\n self.parent = None\r\n self.currentGeneration = 0\r\n\r\n self.weights_noise = []\r\n self.baises_noise = []\r\n\r\n self.data = {}\r\n self.datafile_name = \"trained_weights.json\"\r\n\r\n\r\n def dump_data(self):\r\n\r\n temp_json = None\r\n\r\n try:\r\n with open(self.datafile_name, \"r\") as infile:\r\n temp_json = json.load(infile)\r\n except:\r\n print(\"File not found will create one!\")\r\n\r\n if(temp_json is None):\r\n temp_json = {}\r\n\r\n with open(self.datafile_name, \"w\") as outfile:\r\n temp_json[str(self.NNLayers)] = self.data\r\n json.dump(temp_json, outfile)\r\n\r\n\r\n def initPopulation(self):\r\n self.parent = NeuralNet(len(self.NNLayers), self.NNLayers)\r\n # self.dump_data()\r\n\r\n\r\n def mutation(self):\r\n # new_individual = NeuralNet(len(self.NNLayers), self.NNLayers)\r\n \r\n self.weights_noise.clear()\r\n self.baises_noise.clear()\r\n for i in range(len(self.NNLayers) - 1):\r\n self.weights_noise.append(np.random.randn(self.populationSize, self.NNLayers[i], self.NNLayers[i+1]))\r\n self.baises_noise.append(np.random.randn(self.populationSize, self.NNLayers[i+1]))\r\n pass\r\n\r\n \r\n for i in range(self.populationSize):\r\n self.population.append(NeuralNet(len(self.NNLayers), self.NNLayers))\r\n for j in range(len(self.NNLayers) - 1):\r\n self.population[i].weights[j] = self.parent.weights[j] + self.mutationRate * self.weights_noise[j][i]\r\n self.population[i].baises[j] = self.parent.baises[j] + self.mutationRate * self.baises_noise[j][i]\r\n\r\n def incrementGeneration(self, population_fitness):\r\n temp_data = {}\r\n temp_data[\"weights\"] = []\r\n temp_data[\"baises\"] = []\r\n\r\n for i in range(len(self.NNLayers) - 1):\r\n temp_data[\"weights\"].append(self.parent.weights[i].tolist())\r\n temp_data[\"baises\"].append(self.parent.baises[i].tolist())\r\n\r\n self.data[self.currentGeneration] = temp_data\r\n\r\n self.dump_data()\r\n\r\n self.currentGeneration = self.currentGeneration + 1\r\n\r\n # min_fitness = min(population_fitness)\r\n # total_fitness = 0\r\n\r\n # for i in range(self.populationSize):\r\n # total_fitness += population_fitness[i] - min_fitness\r\n\r\n # for i in range(len(self.NNLayers) - 1):\r\n # self.parent.weights[i] = np.zeros(shape=(self.NNLayers[i], self.NNLayers[i+1]))\r\n # self.parent.baises[i] = np.zeros(shape=(1, self.NNLayers[i+1]))\r\n # for j in range(self.populationSize):\r\n # self.parent.weights[i] += ((population_fitness[j] - min_fitness) / total_fitness) * self.population[j].weights[i]\r\n # self.parent.baises[i] += ((population_fitness[j] - min_fitness) / total_fitness) * self.population[j].baises[i]\r\n\r\n # self.population.clear()\r\n\r\n population_fitness = np.asarray(population_fitness)\r\n A = (population_fitness - np.mean(population_fitness)) / (np.std(population_fitness))\r\n # print(A.shape)\r\n for i in range(len(self.NNLayers) - 1):\r\n # print(self.weights_noise[i].transpose(1, 2, 0).shape)\r\n self.parent.weights[i] = self.parent.weights[i] + (self.learningRate) * np.dot(np.asarray(self.weights_noise[i]).transpose(1, 2, 0), A)\r\n self.parent.baises[i] = self.parent.baises[i] + (self.learningRate) * np.dot(np.asarray(self.baises_noise[i]).transpose(1, 0), A)\r\n\r\n self.population.clear()\r\n\r\n # population_fitness = np.asarray(population_fitness)\r\n # A = (population_fitness - np.min(population_fitness))\r\n # A = A / np.sum(A)\r\n\r\n # for i in range(len(self.NNLayers) - 1):\r\n # self.parent.weights[i] = self.parent.weights[i] + self.learningRate * np.dot(np.asarray(self.weights_noise[i]).transpose(1, 2, 0), A)\r\n # self.parent.baises[i] = self.parent.baises[i] + self.learningRate * np.dot(np.asarray(self.baises_noise[i]).transpose(1, 0), A)\r\n # self.population.clear()\r\n\r\n\r\nENVIRONMENT = 'BipedalWalker-v3'\r\nMAX_STEPS = 1000\r\nMAX_GENERATIONS = 300\r\nPOPULATION_SIZE = 200\r\nMUTATION_RATE = 0.1\r\nLEARNING_RATE = 0.01\r\n\r\nenv = gym.make(ENVIRONMENT)\r\nobservation = env.reset()\r\n\r\nenv.render()\r\n\r\nobs_dim = env.observation_space.shape[0]\r\naction_dim = env.action_space.shape[0]\r\n\r\nobs_range = (env.observation_space.low, env.observation_space.high)\r\naction_range = (env.action_space.low, env.action_space.high)\r\n\r\nprint(\"OBSERVATION --> \\nSHAPE:\" + str(obs_dim) + \"x1, \\nRANGE: (\" + str(obs_range[0]) + \", \" + str(obs_range[1]) + \")\")\r\nprint(\"\\n\")\r\nprint(\"Action --> \\nSHAPE:\" + str(action_dim) + \"x1, \\nRANGE: (\" + str(action_range[0]) + \", \" + str(action_range[1]) + \")\")\r\n\r\n\r\ndef play_individual(individual, steps):\r\n observation = env.reset()\r\n frames = []\r\n total_reward = 0\r\n for step in range(steps):\r\n frames.append(env.render(mode=\"rgb_array\"))\r\n action = individual.getAction(observation)\r\n observation, reward, done, info = env.step(action)\r\n total_reward = total_reward + reward\r\n if done:\r\n break\r\n return np.array(frames), total_reward\r\n\r\n\r\n@ray.remote\r\ndef run_individual(individual, steps):\r\n totalReward = 0\r\n env_local = gym.make(ENVIRONMENT)\r\n obs = env_local.reset()\r\n\r\n for step in range(steps):\r\n action = individual.getAction(obs)\r\n obs, reward, done, info = env_local.step(action)\r\n totalReward += reward\r\n if done:\r\n break\r\n \r\n return totalReward\r\n pass\r\n\r\npopulation = Population(POPULATION_SIZE, MUTATION_RATE, LEARNING_RATE, (obs_dim, 64, 64, 32, action_dim))\r\n\r\npopulation.initPopulation()\r\n\r\nfor i in range(MAX_GENERATIONS):\r\n print(\"\\n\\n\\t GENERATION \" + str(population.currentGeneration) + \"\\n\")\r\n\r\n env_list = []\r\n for i in range(POPULATION_SIZE):\r\n env_list.append(gym.make(ENVIRONMENT))\r\n\r\n population.mutation()\r\n\r\n start_time = time.time()\r\n\r\n future_rewards = [run_individual.remote(individual, MAX_STEPS) for individual in population.population]\r\n totalRewards = ray.get(future_rewards)\r\n\r\n end_time = time.time()\r\n\r\n print(\"\\t\\tElapsed time to simulate generation: \" + str(end_time - start_time) + \"\\n\")\r\n # play_individual(population.parent, MAX_STEPS)\r\n observation = env.reset()\r\n parent_reward = 0\r\n\r\n for step in range(MAX_STEPS):\r\n # env.render()\r\n action = population.parent.getAction(observation)\r\n observation, reward, done, info = env.step(action)\r\n parent_reward += reward\r\n if done:\r\n break\r\n\r\n population.parent.fitness = parent_reward\r\n\r\n\r\n average_fitness = np.mean(np.asarray(totalRewards))\r\n population.incrementGeneration(totalRewards)\r\n # gif, parent_reward = play_individual(population.parent, MAX_STEPS)\r\n\r\n # population.parent.fitness = parent_reward\r\n print(\"\\t\\tGENERATION \" + str(population.currentGeneration) + \", PARENT FITNESS: \" + str(parent_reward))\r\n\r\n # gif = np.swapaxes(gif, 1, -1)\r\n # gif = np.swapaxes(gif, 2, -1)\r\n\r\n wandb.log(\r\n {\"Parent Fitness\": population.parent.fitness,\r\n \"Average Population Fitness\": average_fitness\r\n }, step=population.currentGeneration)\r\n\r\ntmp = input(\"Press enter to continue...\")","sub_path":"es.py","file_name":"es.py","file_ext":"py","file_size_in_byte":9524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"133037764","text":"def longest_substring(string):\r\n l = len(string)\r\n xy = 0\r\n max = 0\r\n initial = 0\r\n position = {}\r\n position[string[0]] = 0\r\n for i in range(1, l):\r\n if string[i] not in position:\r\n position[string[i]] = i\r\n else:\r\n if position[string[i]] >= xy:\r\n\r\n current_length = i - xy\r\n if max < current_length:\r\n max = current_length\r\n initial = xy\r\n xy = position[string[i]] + 1\r\n position[string[i]] = i\r\n if max < i - xy:\r\n max = i - xy\r\n initial = xy\r\n return string[initial: initial + max]\r\nif __name__ == \"__main__\":\r\n string = input(\"enter a string\")\r\n print(longest_substring(string))\r\n","sub_path":"Lab Assignment-1/Prog-1.py","file_name":"Prog-1.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"315761465","text":"from django.http import HttpResponse\nfrom django.utils import simplejson\nfrom django.shortcuts import render_to_response\nfrom engine import board\nfrom engine.board import GameState\nfrom engine.bot import botMove\n\ndef new(request):\n \"\"\" Handles new requests for games \"\"\"\n gs = GameState()\n state = gs.reset()\n return render_to_response(\"board.html\", { 'state' : state, 'info_message' : 'Your Move First'})\n\ndef move(request,position,state):\n \"\"\" Handles player move \"\"\"\n gs = GameState()\n sGameState = str(state)\n iGameState = []\n response_dict = {}\n\n for ci in sGameState:\n if int(ci) == 2:\n iGameState.append(board.COMPUTER_PLAYER)\n else:\n iGameState.append(int(ci))\n\n newState = gs.move(board.HUMAN_PLAYER,iGameState,int(position))\n\n if newState is None:\n response_dict.update({'status' : 'err', 'msg' : 'Invalid move'})\n elif gs.isGameOver(newState):\n winner = gs.getWinner(newState)\n\n if winner is None:\n response_dict.update({'status' : 'gameover',\n 'msg' : 'Tie Game',\n 'state' : newState})\n elif winner == board.HUMAN_PLAYER:\n response_dict.update({'status' : 'gameover', \n 'msg' : 'Player Wins!',\n 'state' : newState})\n else:\n response_dict.update({'status' : 'gameover', \n 'msg' : 'Computer Wins!',\n 'state' : newState})\n else:\n aiMove = botMove(newState)\n\n if aiMove is None:\n response_dict.update({'status' : 'err', \n 'msg' : 'Something has gone wrong'})\n else:\n fState = gs.move(board.COMPUTER_PLAYER,newState,aiMove)\n\n if gs.isGameOver(fState):\n response_dict.update({'status' : 'gameover', \n 'msg' : 'Computer Wins!',\n 'state' : fState})\n else:\n response_dict.update({'status' : 'ok', \n 'msg' : 'Your move',\n 'state' : fState})\n\n return HttpResponse(simplejson.dumps(response_dict), mimetype='application/javascript')\n","sub_path":"game/engine/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"469770997","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Gadfly database adapter unit tests.\n\n$Id$\n\"\"\"\nimport os\nimport tempfile\nfrom unittest import TestCase, TestSuite, main, makeSuite\n\nimport transaction\nfrom zope.interface.verify import verifyObject\n\nfrom zope.rdb import DatabaseAdapterError\nfrom zope.rdb.interfaces import IZopeConnection, IZopeCursor\nfrom zope.rdb.gadflyda import GadflyAdapter, setGadflyRoot\n\n\nclass GadflyTestBase(TestCase):\n\n def setUp(self):\n TestCase.setUp(self)\n self.tempdir = None\n\n def tearDown(self):\n TestCase.tearDown(self)\n if self.tempdir:\n os.rmdir(self.tempdir)\n setGadflyRoot()\n\n def getGadflyRoot(self):\n # note that self is GadflyTestBase here\n if not self.tempdir:\n self.tempdir = tempfile.mkdtemp('gadfly')\n setGadflyRoot(self.tempdir)\n return self.tempdir\n\n def _create(self, *args):\n return GadflyAdapter(*args)\n\n\nclass TestGadflyAdapter(GadflyTestBase):\n \"\"\"Test incorrect connection strings\"\"\"\n\n def test__connection_factory_nonexistent(self):\n # Should raise an exception on nonexistent dirs.\n a = self._create(\"dbi://demo;dir=nonexistent\")\n self.assertRaises(DatabaseAdapterError, a._connection_factory)\n\n def test__connection_factory_bad_dsn(self):\n a = self._create(\"dbi://user:pass/demo;dir=nonexistent\")\n self.assertRaises(DatabaseAdapterError, a._connection_factory)\n\n a = self._create(\"dbi://localhost:1234/demo;dir=nonexistent\")\n self.assertRaises(DatabaseAdapterError, a._connection_factory)\n\n\nclass TestGadflyAdapterNew(GadflyTestBase):\n \"\"\"Test with nonexistent databases\"\"\"\n\n def test__connection_factory_create(self):\n # Should create a database if the directory is empty.\n a = self._create(\"dbi://demo;dir=test\")\n conn = a._connection_factory()\n conn.rollback() # is it really a connection?\n\n def test__connection_factory_existing(self):\n # Should fail gracefully if the directory is a file.\n open(os.path.join(self.getGadflyRoot(), \"regular\"), \"w\").close()\n a = self._create(\"dbi://demo;dir=regular\")\n self.assertRaises(DatabaseAdapterError, a._connection_factory)\n\n def setUp(self):\n # Create a directory for the database.\n GadflyTestBase.setUp(self)\n dir = self.getGadflyRoot()\n os.mkdir(os.path.join(dir, \"test\"))\n\n def tearDown(self):\n # Remove the files and directories created.\n dir = self.getGadflyRoot()\n try: os.unlink(os.path.join(dir, \"test\", \"demo.gfd\"))\n except: pass\n os.rmdir(os.path.join(dir, \"test\"))\n try:\n os.unlink(os.path.join(dir, \"regular\"))\n except:\n pass\n GadflyTestBase.tearDown(self)\n\n\nclass TestGadflyAdapterDefault(GadflyTestBase):\n \"\"\"Test with pre-existing databases\"\"\"\n\n def setUp(self):\n # Create a directory for the database.\n GadflyTestBase.setUp(self)\n dir = self.getGadflyRoot()\n os.mkdir(os.path.join(dir, \"demo\"))\n\n def tearDown(self):\n # Remove the files and directories created.\n dir = self.getGadflyRoot()\n try:\n os.unlink(os.path.join(dir, \"demo\", \"demo.gfd\"))\n except:\n pass\n os.rmdir(os.path.join(dir, \"demo\"))\n GadflyTestBase.tearDown(self)\n\n def test__connection_factory_create(self):\n # Should create a database if the directory is empty.\n a = self._create(\"dbi://demo\")\n conn = a._connection_factory()\n conn.rollback() # is it really a connection?\n\n def test__connection_factory_reopen(self):\n # Should open an existing database.\n a = self._create(\"dbi://demo\")\n conn = a._connection_factory()\n conn.rollback() # is it really a connection?\n conn.close()\n\n conn = a._connection_factory()\n conn.rollback() # is it really a connection?\n\n def test__interfaces(self):\n a = self._create(\"dbi://demo\")\n connection = a()\n verifyObject(IZopeConnection, connection)\n cursor = connection.cursor()\n verifyObject(IZopeCursor, cursor)\n\nclass GadflyCursorStub(object):\n\n def __init__(self):\n self.operations = []\n\n def execute(self, operation, parameters=None):\n self.operations.append((operation, parameters))\n\nclass GadflyConnectionStub(object):\n\n def cursor(self):\n return GadflyCursorStub()\n\n def commit(self):\n pass\n\n def rollback(self):\n pass\n\n def close(self):\n pass\n\nclass GadflyTestAdapter(GadflyAdapter):\n\n def _connection_factory(self):\n return GadflyConnectionStub()\n\nclass GadflyAdapterTests(TestCase):\n\n def setUp(self):\n self.adapter = GadflyTestAdapter(\"dbi://\")\n self.connection = self.adapter()\n self.cursor = self.connection.cursor()\n\n def tearDown(self):\n transaction.abort()\n\n def testBadExecutemanyOperations(self):\n raises = self.assertRaises\n for operation in [\n \"SELECT\",\n \"CREATE\",\n \"DROP\",\n ]:\n raises(DatabaseAdapterError,\n self.cursor.executemany, operation, [])\n\n def testExecutemanyInsert(self):\n operation = \"INSERT INTO table(v1, v2) VALUES (?, ?)\"\n parameters = [(1, 2), (3, 4)]\n self.cursor.executemany(operation, parameters)\n self.failUnlessEqual([(operation, parameters)],\n self.cursor.operations)\n\n def testExecutemanyUpdate(self):\n operation = \"UPDATE table SET value=0 WHERE id=?\"\n parameters = [(1,), (2,)]\n self.cursor.executemany(operation, parameters)\n self.failUnlessEqual([\n (operation, parameters[0]),\n (operation, parameters[1]),\n ], self.cursor.operations)\n\n def testExecutemanyDelete(self):\n operation = \"DELETE FROM table WHERE id=?\"\n parameters = [(1,), (2,)]\n self.cursor.executemany(operation, parameters)\n self.failUnlessEqual([\n (operation, parameters[0]),\n (operation, parameters[1]),\n ], self.cursor.operations)\n\n\ndef test_suite():\n return TestSuite((\n makeSuite(TestGadflyAdapter),\n makeSuite(TestGadflyAdapterNew),\n makeSuite(TestGadflyAdapterDefault),\n makeSuite(GadflyAdapterTests),\n ))\n\nif __name__=='__main__':\n main(defaultTest='test_suite')\n","sub_path":"zope.rdb/branches/3.5/src/zope/rdb/tests/test_gadflyadapter.py","file_name":"test_gadflyadapter.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"229759724","text":"import logging\nimport plistlib\nfrom six import PY2\nfrom six.moves.urllib import parse as urlparse\nimport time\n\nfrom libpytunes.Song import Song\nfrom libpytunes.Playlist import Playlist\n\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import xspf\n xspfAvailable = True\nexcept ImportError:\n xspfAvailable = False\n pass\n\n\nclass Library:\n def __init__(self, itunesxml, musicPathXML=None, musicPathSystem=None, filesOnly=False):\n # musicPathXML and musicPathSystem will do path conversion\n # when xml is being processed on different OS then iTunes\n self.musicPathXML = musicPathXML\n self.musicPathSystem = musicPathSystem\n self.filesOnly = filesOnly\n with open(itunesxml, 'rb') as f:\n self.il = plistlib.load(f)\n self.songs = {}\n self.getSongs()\n\n def getSongs(self):\n format = \"%Y-%m-%d %H:%M:%S\"\n for trackid, attributes in self.il['Tracks'].items():\n s = Song()\n\n s.name = attributes.get('Name')\n\n # Support classical music naming (Work+Movement Number+Movement Name) since iTunes 12.5\n s.work = attributes.get('Work')\n s.movement_number = attributes.get('Movement Number')\n s.movement_count = attributes.get('Movement Count')\n s.movement_name = attributes.get('Movement Name')\n\n s.track_id = int(attributes.get('Track ID')) if attributes.get('Track ID') else None\n s.artist = attributes.get('Artist')\n s.album_artist = attributes.get('Album Artist')\n s.composer = attributes.get('Composer')\n s.album = attributes.get('Album')\n s.genre = attributes.get('Genre')\n s.kind = attributes.get('Kind')\n s.size = int(attributes.get('Size')) if attributes.get('Size') else None\n s.total_time = attributes.get('Total Time')\n s.track_number = attributes.get('Track Number')\n s.track_count = int(attributes.get('Track Count')) if attributes.get('Track Count') else None\n s.disc_number = int(attributes.get('Disc Number')) if attributes.get('Disc Number') else None\n s.disc_count = int(attributes.get('Disc Count')) if attributes.get('Disc Count') else None\n s.year = int(attributes.get('Year')) if attributes.get('Year') else None\n s.date_modified = time.strptime(str(attributes.get('Date Modified')), format) if attributes.get('Date Modified') else None\n s.date_added = time.strptime(str(attributes.get('Date Added')), format) if attributes.get('Date Added') else None\n s.bit_rate = int(attributes.get('Bit Rate')) if attributes.get('Bit Rate') else None\n s.sample_rate = int(attributes.get('Sample Rate')) if attributes.get('Sample Rate') else None\n s.comments = attributes.get(\"Comments\")\n s.rating = int(attributes.get('Rating')) if attributes.get('Rating') else None\n s.rating_computed = 'Rating Computed' in attributes\n s.play_count = int(attributes.get('Play Count')) if attributes.get('Play Count') else None\n s.album_rating = attributes.get('Album Rating')\n s.album_rating_computed = 'Album Rating Computed' in attributes\n s.persistent_id = attributes.get('Persistent ID')\n\n if attributes.get('Location'):\n s.location_escaped = attributes.get('Location')\n s.location = s.location_escaped\n s.location = urlparse.unquote(urlparse.urlparse(attributes.get('Location')).path[1:])\n s.location = s.location.decode('utf-8') if PY2 else s.location # fixes bug #19\n if (self.musicPathXML is not None and self.musicPathSystem is not None):\n s.location = s.location.replace(self.musicPathXML, self.musicPathSystem)\n\n s.compilation = 'Compilation' in attributes\n s.lastplayed = time.strptime(str(attributes.get('Play Date UTC')), format) if attributes.get('Play Date UTC') else None\n s.skip_count = int(attributes.get('Skip Count')) if attributes.get('Skip Count') else None\n s.skip_date = time.strptime(str(attributes.get('Skip Date')), format) if attributes.get('Skip Date') else None\n s.length = int(attributes.get('Total Time')) if attributes.get('Total Time') else None\n s.track_type = attributes.get('Track Type')\n s.grouping = attributes.get('Grouping')\n s.podcast = 'Podcast' in attributes\n s.movie = 'Movie' in attributes\n s.has_video = 'Has Video' in attributes\n s.loved = 'Loved' in attributes\n s.album_loved = 'Album Loved' in attributes\n s.playlist_only = 'Playlist Only' in attributes\n s.apple_music = 'Apple Music' in attributes\n s.protected = 'Protected' in attributes\n s.disabled = 'Disabled' in attributes\n\n self.songs[int(trackid)] = s\n\n def getPlaylistNames(self, ignoreList=[\n \"Library\", \"Music\", \"Movies\", \"TV Shows\", \"Purchased\", \"iTunes DJ\", \"Podcasts\"\n ]):\n\n playlists = []\n for playlist in self.il['Playlists']:\n if playlist['Name'] not in ignoreList:\n playlists.append(playlist['Name'])\n return playlists\n\n def getPlaylist(self, playlistName):\n for playlist in self.il['Playlists']:\n if playlist['Name'] == playlistName:\n # id \tplaylist_id \ttrack_num \turl \ttitle \talbum \tartist \tlength \tuniqueid\n p = Playlist(playlistName)\n p.playlist_id = playlist['Playlist ID']\n p.is_folder = playlist.get('Folder', False)\n p.playlist_persistent_id = playlist.get('Playlist Persistent ID')\n p.parent_persistent_id = playlist.get('Parent Persistent ID')\n p.distinguished_kind = playlist.get('Distinguished Kind')\n p.is_genius_playlist = True if playlist.get('Genius Track ID') else False\n p.is_smart_playlist = True if playlist.get('Smart Info') and not playlist.get('Folder', False) else False\n tracknum = 1\n # Make sure playlist was not empty\n if 'Playlist Items' in playlist:\n for track in playlist['Playlist Items']:\n id = int(track['Track ID'])\n t = self.songs[id]\n t.playlist_order = tracknum\n tracknum += 1\n p.tracks.append(t)\n return p\n\n def getPlaylistxspf(self, playlistName):\n global xspfAvailable\n if (xspfAvailable):\n x = xspf.Xspf()\n for playlist in self.il['Playlists']:\n if playlist['Name'] == playlistName:\n x.title = playlistName\n x.info = \"\"\n for track in playlist['Playlist Items']:\n id = int(track['Track ID'])\n x.add_track(title=self.songs[id].name, creator=\"\", location=self.songs[id].location)\n return x.toXml()\n else:\n logger.warning(\"xspf library missing, go to https://github.com/alastair/xspf to install.\")\n return None\n","sub_path":"build/lib/libpytunes/Library.py","file_name":"Library.py","file_ext":"py","file_size_in_byte":7299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"426650679","text":"import sys\n\ndef reverse_sentences(sentences):\n index = len(sentences)-1\n reverse = []\n while index >= 0:\n reverse.append(sentences[index])\n index -= 1\n return reverse\n\ndef print_reverse(reverse):\n sentence = \" \".join(reverse)\n print(sentence)\n\nif __name__==\"__main__\":\n sentence = sys.argv[1:]\n print_reverse(reverse_sentences(sentence))\n","sub_path":"reverse_sentences.py","file_name":"reverse_sentences.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"387959733","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nfrom collections import Counter\n\nfrom pybel import BELGraph\nfrom pybel.constants import INCREASES\nfrom pybel.dsl import protein\nfrom pybel.examples import sialic_acid_graph\nfrom pybel.struct.summary.edge_summary import (\n get_annotation_values_by_annotation, iter_annotation_value_pairs,\n iter_annotation_values,\n)\n\n\nclass TestEdgeSummary(unittest.TestCase):\n def test_1(self):\n graph = BELGraph()\n u = protein('HGNC', name='U')\n v = protein('HGNC', name='V')\n w = protein('HGNC', name='W')\n\n graph.add_qualified_edge(\n u,\n v,\n relation=INCREASES,\n evidence='',\n citation='',\n annotations={\n 'A': {'1', '2'},\n 'B': {'X'}\n }\n )\n\n graph.add_qualified_edge(\n u,\n w,\n relation=INCREASES,\n evidence='',\n citation='',\n annotations={\n 'A': {'1', '3'},\n 'C': {'a'}\n }\n )\n\n graph.add_qualified_edge(\n w,\n v,\n relation=INCREASES,\n evidence='',\n citation='',\n )\n\n x = dict(Counter(iter_annotation_value_pairs(graph)))\n\n self.assertEqual({\n ('A', '1'): 2,\n ('A', '2'): 1,\n ('A', '3'): 1,\n ('B', 'X'): 1,\n ('C', 'a'): 1,\n }, x)\n\n y = Counter(iter_annotation_values(graph, 'A'))\n self.assertEqual(x['A', '1'] + x['A', '2'] + x['A', '3'], sum(y.values()))\n\n y = Counter(iter_annotation_values(graph, 'B'))\n self.assertEqual(x['B', 'X'], sum(y.values()))\n\n y = Counter(iter_annotation_values(graph, 'C'))\n self.assertEqual(x['C', 'a'], sum(y.values()))\n\n def test_get_annotation_values(self):\n expected = {\n 'Confidence': {'High', 'Low'},\n 'Species': {'9606'}\n }\n result = get_annotation_values_by_annotation(sialic_acid_graph)\n self.assertEqual(expected, result)\n","sub_path":"tests/test_struct_summary_edges.py","file_name":"test_struct_summary_edges.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"508951417","text":"import socket\nimport sys\nimport select\nimport threading\n\nclass Servidor():\n\n def crea_servidor(self, host, puerto): #self, host = sys.argv[1], puerto = sys.argv[2]\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind((str(host), int(puerto)))\n self.socket.listen(150)\n except IOError as ioe:\n print ('El host no es una cadena o el puerto no es un número')\n\n # Definimos la lista de clientes.\n self.lista_clientes = []\n self.datos = {}\n\n\n def maneja_mensajes(self, conectado, direccion):\n conectado.send(\"Bienvenido {0}\".format(datos[conectado]))\n while True:\n try:\n mensaje = conectado.recv(2048)\n if mensaje:\n print (direccion[0] + \"\" + datos[conectado] + mensaje)\n mensaje_enviado = datos[conectado] + \":\" + mensaje\n envia(mensaje_enviado, conectado)\n\n nuevo_hilo= threading.Thread(target= maneja_mensajes, args= (conectado, direccion))\n nuevo_hilo.daemon = True\n nuevo_hilo.start\n else:\n remove(conectado)\n except:\n continue\n\n\n def envia(self, mensaje, conexion):\n for cliente in lista_clientes:\n if cliente != conexion:\n try:\n cliente.send(mensaje)\n except:\n cliente.close()\n elimina(cliente)\n\n\n def elimina(self, conexion):\n if conexion in lista_clientes:\n lista_clientes.remove(conexion)\n\n\n def maneja_conexion(self):\n while True:\n try:\n conectado, direccion = self.socket.accept()\n info = conectado.recv(2048)\n self.lista_clientes.append(conectado)\n\n # Agregamos al cliente a la lista de clientes si no esta aun.\n añadido = True\n for i in self.datos:\n if self.datos[i] == info:\n añadido = True\n else:\n añadido = False\n\n if(añadido == False):\n self.datos[conectado] = info\n conectado.send(bytes(\"1\", 'utf-8'))\n else:\n # Si no está disponible lo negamos hasta que exista un \"nombre\" que\n # si esté disponible\n while añadido:\n conectado.send(bytes(\"0\", 'utf-8'))\n info = conectado.recv(2048)\n if self.datos[i] == info:\n añadido = True\n else:\n añadido = False\n conectado.send(bytes(\"1\", 'utf-8'))\n #print(direccion[0] + \"\" + info)\n except OSError as oe:\n continue\n\n conectado.close()\n socket.close()\n\n\n\n\n\n\ndef main():\n s = Servidor()\n host = sys.argv[1]\n puerto = sys.argv[2]\n\n s.crea_servidor(host, puerto)\n print('Escuchando en el puerto:', puerto)\n s.maneja_conexion()\n\n\nif __name__ == '__main__':\n main()\n #def get_clientes(self):\n\n\n #def maneja_conexion(self):\n # acepta_conexiones = threading.Thread(target = self.maneja_mensajes,\n # args = (conexiones, direccion))\n # acepta_conexiones.daemon = True\n # acepta_conexiones.start\n # print (str(direccion[0]) + \":\" + str(direccion[1]) \"se ha conectado.\")\n","sub_path":"src/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"349215741","text":"#-*- coding:utf-8 -*-\n\nfrom tensorflow.python.ops.gen_math_ops import compare_and_bitpack\nimport torch\nimport numpy as np\nimport tensorflow as tf\n\n\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n \n\n return tf.concat((boxes[:, 0:2] - boxes[:, 2:4]/2, # xmin, ymin\n boxes[:, 0:2] + boxes[:, 2:4]/2), 1) \n\n\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, 2:4] + boxes[:, 0:2])/2, # cx, cy\n boxes[:, 2:4] - boxes[:, 0:2], 1) # w, h\n\n\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [A,4].\n box_b: (tensor) bounding boxes, Shape: [B,4].\n Return:\n (tensor) intersection area, Shape: [A,B].\n \"\"\"\n A = box_a.shape[0]\n B = box_b.shape[0]\n # print(box_a.shape)\n # print(box_b.shape)\n boxa1=box_a[:, 2:4]\n boxa1=tf.expand_dims(boxa1,axis=1)\n boxa1=tf.broadcast_to(boxa1,shape=(A,B,2))\n boxb1=box_b[:, 2:4]\n boxb1=tf.expand_dims(boxb1,axis=0)\n boxb1=tf.broadcast_to(boxb1,shape=(A,B,2))\n # print(boxa1.shape,boxb1.shape)\n max_xy = tf.math.minimum(boxa1,boxb1)\n # max_xy = torch.min(box_a[:, 2:4].unsqueeze(1).expand(A, B, 2),\n # box_b[:, 2:4].unsqueeze(0).expand(A, B, 2))\n\n boxa1=box_a[:, 0:2]\n boxa1=tf.expand_dims(boxa1,axis=1)\n boxa1=tf.broadcast_to(boxa1,shape=(A,B,2))\n boxb1=box_b[:, 0:2]\n boxb1=tf.expand_dims(boxb1,axis=0)\n boxb1=tf.broadcast_to(boxb1,shape=(A,B,2))\n min_xy = tf.math.maximum(boxa1,boxb1)\n # min_xy = torch.max(box_a[:, 0:2].unsqueeze(1).expand(A, B, 2),\n # box_b[:, 0:2].unsqueeze(0).expand(A, B, 2))\n inter = tf.clip_by_value((max_xy - min_xy), clip_value_min=0, clip_value_max=tf.float32.max)\n return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a=tf.expand_dims(((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1])),axis=1)\n area_b=tf.expand_dims(((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1])),axis=0)\n area_a=tf.broadcast_to(area_a,shape=inter.shape)\n area_b=tf.broadcast_to(area_b,shape=inter.shape)\n # area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n # area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n # print(\"inter\",inter.shape)\n # print(area_a.shape,area_b.shape)\n # exit()\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\ndef matrix_iou(a, b):\n \"\"\"\n return iou of a and b, numpy version for data augenmentation\n \"\"\"\n lt = np.maximum(a[:, np.newaxis, 0:2], b[:, 0:2])\n rb = np.minimum(a[:, np.newaxis, 2:4], b[:, 2:4])\n\n area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n area_a = np.prod(a[:, 2:4] - a[:, 0:2], axis=1)\n area_b = np.prod(b[:, 2:4] - b[:, 0:2], axis=1)\n return area_i / (area_a[:, np.newaxis] + area_b - area_i)\n\n\ndef matrix_iof(a, b):\n \"\"\"\n return iof of a and b, numpy version for data augenmentation\n \"\"\"\n lt = np.maximum(a[:, np.newaxis, 0:2], b[:, 0:2])\n rb = np.minimum(a[:, np.newaxis, 2:4], b[:, 2:4])\n\n area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n area_a = np.prod(a[:, 2:4] - a[:, 0:2], axis=1)\n return area_i / np.maximum(area_a[:, np.newaxis], 1)\n\n\ndef match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n threshold: (float) The overlap threshold used when mathing boxes.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n variances: (tensor) Variances corresponding to each prior coord,\n Shape: [num_priors, 4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.\n idx: (int) current batch index\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n # jaccard index\n overlaps = jaccard(\n truths,\n point_form(priors)\n )\n # (Bipartite Matching)\n # [1,num_objects] best prior for each ground truth\n # print(type(overlaps),overlaps.shape)\n # exit()\n best_prior_overlap = tf.reduce_max(overlaps,axis=1,keepdims=True)\n best_prior_idx = tf.argmax(overlaps,axis=1)\n best_prior_idx = tf.expand_dims(best_prior_idx,axis=-1)\n # print(best_prior_overlap,best_prior_idx)\n # print(x.shape,y.shape)\n # best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)\n\n # ignore hard gt\n # print(best_prior_overlap,best_prior_idx)\n valid_gt_idx = best_prior_overlap[:, 0] >= 0.2\n # print(valid_gt_idx)\n best_prior_idx_filter = tf.boolean_mask(best_prior_idx, valid_gt_idx)\n # print(best_prior_idx_filter)\n # best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]\n # print(loc_t)\n if best_prior_idx_filter.shape[0] is not None and best_prior_idx_filter.shape[0] <= 0:\n # loc_t[idx] = 0\n # conf_t[idx] = 0\n # ones = tf.ones((16,5875,4))\n # print(tf.reshape(tf.where(ones[idx]),shape=(idx,5875,4)))\n # ones = tf.tensor_scatter_nd_update(ones,[[idx]],[tf.zeros((5875,4))])\n # loc_t = tf.tensor_scatter_nd_update(loc_t,tf.cast(loc_t[idx,:],dtype=\"int64\"),tf.zeros((idx,5875,4)))\n # print(ones)\n loc_t = tf.tensor_scatter_nd_update(loc_t,[[idx]],[tf.zeros((5875,4))])\n conf_t = tf.tensor_scatter_nd_update(conf_t,[[idx]],[tf.zeros((5875))])\n # print(loc_t)\n # exit()\n # print(\"exception!!!!!\")\n return tf.squeeze(tf.zeros((1, priors.shape[0])),axis=0),loc_t,conf_t\n\n # [1,num_priors] best ground truth for each prior\n best_truth_overlap = tf.reduce_max(overlaps,axis=0,keepdims=True)\n best_truth_overlap = tf.squeeze(best_truth_overlap,axis=0)\n best_truth_idx = tf.argmax(overlaps,axis=0)\n \n best_prior_idx=tf.squeeze(best_prior_idx,axis=1)\n best_prior_idx_filter=tf.squeeze(best_prior_idx_filter,axis=1)\n best_prior_overlap=tf.squeeze(best_prior_overlap,axis=1)\n # print(best_truth_overlap)\n # print(\"filter\",best_prior_idx_filter)\n # print(tf.where(best_truth_overlap==2))\n # for x in best_prior_idx_filter:\n # best_truth_overlap = tf.tensor_scatter_nd_update(best_truth_overlap,[[x]],[2])\n # print(\"IDX\",type(best_truth_idx))\n # print(\"update\",tf.ones((best_prior_idx_filter.shape[0]))*2)\n best_truth_overlap = tf.tensor_scatter_nd_update(best_truth_overlap,tf.expand_dims(best_prior_idx_filter,axis=-1),tf.ones((best_prior_idx_filter.shape[0]))*2)\n # print(best_truth_overlap)\n # print(tf.where(best_truth_overlap==2))\n # exit()\n\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.shape[0]):\n # best_truth_idx = tf.tensor_scatter_nd_update(best_truth_idx, [[best_prior_idx[j].numpy()]],[j])\n best_truth_idx = tf.tensor_scatter_nd_update(best_truth_idx,[[best_prior_idx[j]]],[j])\n matches = tf.gather(truths,best_truth_idx)\n # matches = truths[best_truth_idx] # Shape: [num_priors,14]\n conf = tf.gather(labels,best_truth_idx)\n # print(\"conf\",conf)\n # print(tf.where(conf==0))\n # exit()\n # conf = labels[best_truth_idx] # Shape: [num_priors]\n # conf_np=conf.numpy()\n # conf_np[best_truth_overlap < threshold] = 0 \n # print(conf_np,conf_np.shape)\n # print(\"overlap\",best_truth_overlap)\n # print(tf.where(best_truth_overlap < threshold))\n\n\n compare = (tf.where(best_truth_overlap < threshold))\n # print(\"conf\",conf)\n # print(\"compare\",compare)\n # print(\"update\",tf.zeros((compare.shape[0])))\n # # for i in compare:\n conf = tf.tensor_scatter_nd_update(conf,compare,tf.zeros((compare.shape[0])))\n # print(conf)\n # exit()\n # print(tf.where(conf>0))\n # confnp = \n # exit()\n # for i in range (best_truth_overlap.shape[0]):\n # if best_truth_overlap[i]< threshold:\n # conf_np[i]=0\n\n # conf = tf.convert_to_tensor(conf_np)\n # conf = conf_np\n # conf[best_truth_overlap < threshold] = 0 # label as background\n loc = encode(matches, priors, variances)\n # loc_t[idx] = loc # [num_priors,14] encoded offsets to learn\n loc_t = tf.tensor_scatter_nd_update(loc_t,[[idx]],[loc])\n # print(loc_t)\n # loc_tnp=loc_t.numpy()\n # loc_tnp[idx]=loc\n # loc_t = tf.convert_to_tensor(loc_tnp)\n # conf_t[idx] = conf # [num_priors] top class label for each prior\n conf_t = tf.tensor_scatter_nd_update(conf_t,[[idx]],[conf])\n # print(tf.where(conf_t == 1))\n # conf_tnp=conf_t.numpy()\n # conf_tnp[idx]=conf\n # conf_t = tf.convert_to_tensor(conf_tnp)\n\n return best_truth_overlap,loc_t,conf_t\n\n\ndef encode(matched, priors, variances):\n \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n we have matched (based on jaccard overlap) with the prior boxes.\n Args:\n matched: (tensor) Coords of ground truth for each prior in point-form\n Shape: [num_priors, 4].\n priors: (tensor) Prior boxes in center-offset form\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n encoded boxes and landmarks (tensor), Shape: [num_priors, 14]\n \"\"\"\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, 0:2] + matched[:, 2:4])/2 - priors[:, 0:2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:4])\n # match wh / prior wh\n g_wh = (matched[:, 2:4] - matched[:, 0:2]) / priors[:, 2:4]\n g_wh = tf.math.log(g_wh) / variances[1]\n\n # landmarks\n # g_xy1 = (matched[:, 4:6] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])\n # g_xy2 = (matched[:, 6:8] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])\n # g_xy3 = (matched[:, 8:10] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])\n # g_xy4 = (matched[:, 10:12] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])\n # g_xy5 = (matched[:, 12:14] - priors[:, 0:2]) / (variances[0] * priors[:, 2:4])\n\n # return target for loss\n # return torch.cat([g_cxcy, g_wh, g_xy1, g_xy2, g_xy3, g_xy4, g_xy5], 1) # [num_priors,14]\n return tf.concat([g_cxcy, g_wh], 1) # [num_priors,14]\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n loc (tensor): location predictions for loc layers,\n Shape: [num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n boxes = np.concatenate((\n priors[:, 0:2] + loc[:, 0:2] * variances[0] * priors[:, 2:4],\n priors[:, 2:4] * np.exp(loc[:, 2:4] * variances[1])),1)\n # priors[:, 0:2] + loc[:, 4:6] * variances[0] * priors[:, 2:4],\n # priors[:, 0:2] + loc[:, 6:8] * variances[0] * priors[:, 2:4],\n # priors[:, 0:2] + loc[:, 8:10] * variances[0] * priors[:, 2:4],\n # priors[:, 0:2] + loc[:, 10:12] * variances[0] * priors[:, 2:4],\n # priors[:, 0:2] + loc[:, 12:14] * variances[0] * priors[:, 2:4]), 1)\n boxes[:, 0:2] -= boxes[:, 2:4] / 2\n boxes[:, 2:4] += boxes[:, 0:2]\n return boxes\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n # x_max = x.data.max()\n x_max = tf.reduce_max(x)\n # print(\"xmax\",x_max)\n # print(\"sum\",tf.math.reduce_sum(tf.exp(x-x_max), 1, keepdims=True))\n # print(\"log\",tf.math.log(tf.math.reduce_sum(tf.exp(x-x_max), 1, keepdims=True)))\n return tf.math.log(tf.math.reduce_sum(tf.exp(x-x_max), 1, keepdims=True)) + x_max\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.5, top_k=200):\n \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n \"\"\"\n\n keep = torch.Tensor(scores.size(0)).fill_(0).long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count\n\n","sub_path":"tf2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"43054844","text":"maxint=999999999\r\n\r\ndef minKey(key,mstSet):\r\n\tminx=maxint \r\n\tfor v in range(V): \r\n\t\tif key[v]0 and mstSet[v]==False and key[v]>graph[u][v]: \r\n\t\t\t\t\tkey[v]=graph[u][v] \r\n\t\t\t\t\tparent[v]=u \r\n\r\n\treturn(parent) \r\n\r\n\r\nV=5\r\ngraph = [[0, 2, 0, 6, 0],[2, 0, 3, 8, 5],[0, 3, 0, 0, 7],[6, 8, 0, 0, 9],[0, 5, 7, 9, 0]]\r\nparent=primMST(graph,V)\r\nprint(\"Edge \\t Weight\")\r\nfor i in range(1,V): \r\n\tprint(parent[i],\"-\",i,\"\\t \",graph[i][parent[i]]) \r\n","sub_path":"Prims.py","file_name":"Prims.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"61582829","text":"import contextlib\nimport ntpath\nimport os.path\nimport shutil\nimport sys\nimport tempfile\nimport warnings\n\nif sys.version_info[:2] < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\nimport mock\n\nfrom okonomiyaki.repositories.enpkg import EnpkgS3IndexEntry\n\nfrom egginst.main import EggInst\nfrom egginst.tests.common import mkdtemp, DUMMY_EGG, NOSE_1_2_1, NOSE_1_3_0\nfrom egginst.utils import makedirs\n\nfrom enstaller.config import Configuration\nfrom enstaller.egg_meta import split_eggname\nfrom enstaller.eggcollect import EggCollection, JoinedEggCollection\nfrom enstaller.enpkg import Enpkg, EnpkgError\nfrom enstaller.enpkg import get_default_kvs, req_from_anything, \\\n get_writable_local_dir\nfrom enstaller.main import _create_enstaller_update_enpkg, create_joined_store\nfrom enstaller.resolve import Req\nfrom enstaller.store.indexed import LocalIndexedStore, RemoteHTTPIndexedStore\nfrom enstaller.store.tests.common import EggsStore, MetadataOnlyStore\nfrom enstaller.utils import PY_VER\n\nfrom .common import dummy_enpkg_entry_factory\n\nclass TestMisc(unittest.TestCase):\n def test_get_default_kvs(self):\n config = Configuration()\n config.webservice_entry_point = \"http://acme.com\"\n store = get_default_kvs(config)\n self.assertEqual(store.root, \"http://acme.com/\")\n\n def test_req_from_anything_egg_string(self):\n req_string = \"numpy-1.8.0-1.egg\"\n\n req = req_from_anything(req_string)\n\n self.assertEqual(req.name, \"numpy\")\n self.assertEqual(req.version, \"1.8.0\")\n self.assertEqual(req.build, 1)\n\n def test_req_from_anything_req(self):\n req_arg = Req(\"numpy 1.8.0-1\")\n\n req = req_from_anything(req_arg)\n\n self.assertEqual(req.name, \"numpy\")\n self.assertEqual(req.version, \"1.8.0\")\n self.assertEqual(req.build, 1)\n\n def test_req_from_anything_string(self):\n req = req_from_anything(\"numpy\")\n\n self.assertEqual(req.name, \"numpy\")\n self.assertEqual(req.version, None)\n self.assertEqual(req.build, None)\n\n def test_writable_local_dir_writable(self):\n config = Configuration()\n with mkdtemp() as d:\n config.repository_cache = d\n self.assertEqual(get_writable_local_dir(config), d)\n\n def test_writable_local_dir_non_writable(self):\n fake_dir = \"/some/dummy_dir/hopefully/doesnt/exists\"\n\n config = Configuration()\n config.repository_cache = fake_dir\n def mocked_makedirs(d):\n raise OSError(\"mocked makedirs\")\n with mock.patch(\"os.makedirs\", mocked_makedirs):\n self.assertNotEqual(get_writable_local_dir(config), \"/foo\")\n\nclass TestEnstallerUpdateHack(unittest.TestCase):\n def test_scenario1(self):\n \"\"\"Test that we upgrade when remote is more recent than local.\"\"\"\n remote_versions = [(\"4.6.1\", 1)]\n local_version = \"4.6.0\"\n\n actions = self._compute_actions(remote_versions, local_version)\n self.assertNotEqual(actions, [])\n\n def test_scenario2(self):\n \"\"\"Test that we don't upgrade when remote is less recent than local.\"\"\"\n remote_versions = [(\"4.6.1\", 1)]\n local_version = \"4.6.2\"\n\n actions = self._compute_actions(remote_versions, local_version)\n self.assertEqual(actions, [])\n\n def _compute_actions(self, remote_versions, local_version):\n prefixes = [sys.prefix]\n\n entries = [dummy_enpkg_entry_factory(\"enstaller\", version, build) \\\n for version, build in remote_versions]\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n enpkg = Enpkg(repo, prefixes=prefixes, hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n new_enpkg = _create_enstaller_update_enpkg(enpkg, local_version)\n return new_enpkg._install_actions_enstaller(local_version)\n\nclass TestCreateJoinedStores(unittest.TestCase):\n def test_simple_dir(self):\n with mkdtemp() as d:\n urls = [d]\n store = create_joined_store(Configuration(), urls)\n self.assertEqual(len(store.repos), 1)\n\n store = store.repos[0]\n self.assertTrue(isinstance(store, LocalIndexedStore))\n self.assertEqual(store.root, d)\n\n def test_simple_file_scheme(self):\n urls = [\"file:///foo\"]\n store = create_joined_store(Configuration(), urls)\n self.assertEqual(len(store.repos), 1)\n\n store = store.repos[0]\n self.assertIsInstance(store, LocalIndexedStore)\n self.assertEqual(store.root, \"/foo\")\n\n def test_simple_http_scheme(self):\n urls = [\"http://acme.com/repo\"]\n store = create_joined_store(Configuration(), urls)\n self.assertEqual(len(store.repos), 1)\n\n store = store.repos[0]\n self.assertIsInstance(store, RemoteHTTPIndexedStore)\n self.assertEqual(store.root, urls[0])\n\n def test_invalid_scheme(self):\n urls = [\"ftp://acme.com/repo\"]\n with self.assertRaises(Exception):\n create_joined_store(urls)\n\nclass TestEnpkg(unittest.TestCase):\n def test_info_list_names(self):\n entries = [\n dummy_enpkg_entry_factory(\"numpy\", \"1.6.1\", 1),\n dummy_enpkg_entry_factory(\"numpy\", \"1.8.0\", 2),\n dummy_enpkg_entry_factory(\"numpy\", \"1.7.1\", 1),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mkdtemp() as d:\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n queried_entries = enpkg.info_list_name(\"numpy\")\n\n self.assertEqual(len(queried_entries), 3)\n self.assertEqual([q[\"version\"] for q in queried_entries],\n [\"1.6.1\", \"1.7.1\", \"1.8.0\"])\n\n def test_info_list_names_invalid_version(self):\n entries = [\n dummy_enpkg_entry_factory(\"numpy\", \"1.6.1\", 1),\n dummy_enpkg_entry_factory(\"numpy\", \"1.8k\", 2),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mkdtemp() as d:\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n queried_entries = enpkg.info_list_name(\"numpy\")\n\n self.assertEqual(len(queried_entries), 2)\n self.assertEqual([q[\"version\"] for q in queried_entries],\n [\"1.6.1\", \"1.8k\"])\n\n def test_query_simple(self):\n entries = [\n dummy_enpkg_entry_factory(\"numpy\", \"1.6.1\", 1),\n dummy_enpkg_entry_factory(\"numpy\", \"1.8k\", 2),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mkdtemp() as d:\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n r = dict(enpkg.query(name=\"numpy\"))\n self.assertEqual(set(r.keys()),\n set(entry.s3index_key for entry in entries))\n\n def test_query_simple_with_local(self):\n \"\"\"\n Ensure enpkg.query finds both local and remote eggs.\n \"\"\"\n local_egg = DUMMY_EGG\n\n entries = [\n dummy_enpkg_entry_factory(\"dummy\", \"1.6.1\", 1),\n dummy_enpkg_entry_factory(\"dummy\", \"1.8k\", 2),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n local_entry = EnpkgS3IndexEntry.from_egg(DUMMY_EGG)\n\n with mkdtemp() as d:\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n enpkg.ec.install(os.path.basename(local_egg),\n os.path.dirname(local_egg))\n\n r = dict(enpkg.query(name=\"dummy\"))\n self.assertEqual(set(r.keys()),\n set(entry.s3index_key for entry in entries + [local_entry]))\n\nclass TestEnpkgActions(unittest.TestCase):\n def test_install_simple(self):\n entries = [\n dummy_enpkg_entry_factory(\"numpy\", \"1.6.1\", 1),\n dummy_enpkg_entry_factory(\"numpy\", \"1.8.0\", 2),\n dummy_enpkg_entry_factory(\"numpy\", \"1.7.1\", 2),\n ]\n\n r_actions = [\n ('fetch_0', 'numpy-1.8.0-2.egg'),\n ('install', 'numpy-1.8.0-2.egg')\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mkdtemp() as d:\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n actions = enpkg.install_actions(\"numpy\")\n\n self.assertEqual(actions, r_actions)\n\n def test_install_no_egg_entry(self):\n entries = [\n dummy_enpkg_entry_factory(\"numpy\", \"1.6.1\", 1),\n dummy_enpkg_entry_factory(\"numpy\", \"1.8.0\", 2),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mkdtemp() as d:\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n with self.assertRaises(EnpkgError):\n enpkg.install_actions(\"scipy\")\n\n def test_remove(self):\n repo = MetadataOnlyStore([])\n repo.connect()\n\n with mkdtemp() as d:\n makedirs(d)\n\n for egg in [DUMMY_EGG]:\n egginst = EggInst(egg, d)\n egginst.install()\n\n local_repo = JoinedEggCollection([EggCollection(d, False, None)])\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n enpkg.ec = local_repo\n\n self.assertTrue(local_repo.find(os.path.basename(DUMMY_EGG)))\n actions = enpkg.remove_actions(\"dummy\")\n self.assertEqual(actions, [(\"remove\", os.path.basename(DUMMY_EGG))])\n\n def test_remove_non_existing(self):\n entries = [\n dummy_enpkg_entry_factory(\"numpy\", \"1.6.1\", 1),\n dummy_enpkg_entry_factory(\"numpy\", \"1.8.0\", 2),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mkdtemp() as d:\n enpkg = Enpkg(repo, prefixes=[d], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n with self.assertRaises(EnpkgError):\n enpkg.remove_actions(\"numpy\")\n\n def test_chained_override_update(self):\n \"\"\" Test update to package with latest version in lower prefix\n but an older version in primary prefix.\n \"\"\"\n l0_egg = NOSE_1_3_0\n l1_egg = NOSE_1_2_1\n\n expected_actions = [\n ('fetch_0', os.path.basename(l0_egg)),\n ('remove', os.path.basename(l1_egg)),\n ('install', os.path.basename(l0_egg)),\n ]\n\n entries = [\n dummy_enpkg_entry_factory(*split_eggname(os.path.basename(l0_egg))),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mkdtemp() as d:\n l0 = os.path.join(d, 'l0')\n l1 = os.path.join(d, 'l1')\n makedirs(l0)\n makedirs(l1)\n\n # Install latest version in l0\n EggInst(l0_egg, l0).install()\n # Install older version in l1\n EggInst(l1_egg, l1).install()\n\n local_repo = JoinedEggCollection([EggCollection(l1, False, None),\n EggCollection(l0, False, None)])\n enpkg = Enpkg(repo, prefixes=[l1, l0], hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n enpkg.ec = local_repo\n\n actions = enpkg.install_actions(\"nose\")\n self.assertListEqual(actions, expected_actions)\n\n\nclass TestEnpkgExecute(unittest.TestCase):\n def setUp(self):\n self.prefixes = [tempfile.mkdtemp()]\n\n def tearDown(self):\n for prefix in self.prefixes:\n shutil.rmtree(prefix)\n\n def test_simple_fetch(self):\n egg = \"yoyo.egg\"\n fetch_opcode = 0\n\n repo = MetadataOnlyStore([])\n repo.connect()\n\n with mock.patch(\"enstaller.enpkg.Enpkg.fetch\") as mocked_fetch:\n enpkg = Enpkg(repo, prefixes=self.prefixes, hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n enpkg.ec = mock.MagicMock()\n enpkg.execute([(\"fetch_{0}\".format(fetch_opcode), egg)])\n\n self.assertTrue(mocked_fetch.called)\n mocked_fetch.assert_called_with(egg, force=fetch_opcode)\n\n def test_simple_install(self):\n egg = DUMMY_EGG\n base_egg = os.path.basename(egg)\n fetch_opcode = 0\n\n entries = [\n EnpkgS3IndexEntry(product=\"free\", build=1,\n egg_basename=\"dummy\", version=\"1.0.1\",\n available=True),\n ]\n\n repo = MetadataOnlyStore(entries)\n repo.connect()\n\n with mock.patch(\"enstaller.enpkg.Enpkg.fetch\") as mocked_fetch:\n enpkg = Enpkg(repo, prefixes=self.prefixes, hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n local_repo = JoinedEggCollection([\n EggCollection(prefix, False, None) for prefix in\n self.prefixes])\n local_repo.install = mock.MagicMock()\n enpkg.ec = local_repo\n\n actions = enpkg.install_actions(\"dummy\")\n enpkg.execute(actions)\n\n mocked_fetch.assert_called_with(base_egg, force=fetch_opcode)\n local_repo.install.assert_called_with(base_egg, enpkg.local_dir,\n None)\n\nclass TestEnpkgRevert(unittest.TestCase):\n def setUp(self):\n self.prefixes = [tempfile.mkdtemp()]\n\n def tearDown(self):\n for prefix in self.prefixes:\n shutil.rmtree(prefix)\n\n def test_empty_history(self):\n repo = EggsStore([])\n repo.connect()\n\n enpkg = Enpkg(repo, prefixes=self.prefixes, hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n enpkg.revert_actions(0)\n\n with self.assertRaises(EnpkgError):\n enpkg.revert_actions(1)\n\n def test_simple_scenario(self):\n egg = DUMMY_EGG\n r_actions = {1: [], 0: [(\"remove\", os.path.basename(egg))]}\n\n repo = EggsStore([egg])\n repo.connect()\n\n enpkg = Enpkg(repo, prefixes=self.prefixes, hook=None,\n evt_mgr=None, verbose=False, config=Configuration())\n actions = enpkg.install_actions(\"dummy\")\n enpkg.execute(actions)\n\n self.assertIsNotNone(enpkg.find(os.path.basename(egg)))\n\n for state in [0, 1]:\n actions = enpkg.revert_actions(state)\n self.assertEqual(actions, r_actions[state])\n","sub_path":"enstaller/tests/test_enpkg.py","file_name":"test_enpkg.py","file_ext":"py","file_size_in_byte":15062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"543772020","text":"import json\nimport re\nfrom typing import List, Dict\nfrom urllib.parse import urlparse\nimport time\nimport sys\n\ndef compute_word_frequencies(token_list: list) -> Dict[str, int]:\n freq_dict = {}\n for item in token_list: \n if (item in freq_dict): \n freq_dict[item] += 1\n else: \n freq_dict[item] = 1\n return freq_dict\n\ndef compute_page_tokenlength(input_file_name):\n big_list = list()\n lineNo = 0\n lines_found = 0\n\n with open(input_file_name, 'r') as input_file:\n with open(\"./page_tokenlength.csv\", 'w') as page_tokenlength:\n for line in input_file:\n lineNo += 1\n if not line.strip():\n print(\"skipping blank\")\n continue\n lines_found += 1\n line_json=json.loads(line.encode(\"utf-8\"))\n line_tokens = line_json['token_list']\n big_list.extend(line_tokens)\n page_tokenlength.write(f\"{line_json['URL']}, {len(line_tokens)}\\n\")\n print(\"linesfound \", lines_found)\n\n print(f\"Total tokens from all pages, {len(big_list)}\")\n master_dictionary = compute_word_frequencies(big_list)\n with open(\"./token_frequency.csv\", 'w') as token_frequency:\n for key in master_dictionary.keys():\n token_frequency.write(f\"{key}, {master_dictionary[key]}\\n\")\n\ndef subdomain_counter(input_file_name):\n print(\"#####################################################\")\n hostname_list = list()\n lineNo = 0\n lines_found = 0\n with open(input_file_name, 'r') as input_file:\n for line in input_file:\n lineNo += 1\n if not line.strip():\n print(\"skipping blank\")\n continue\n lines_found += 1\n print(\"linesfound \", lines_found)\n line_json=json.loads(line.encode(\"utf-8\"))\n url = line_json['URL']\n if not re.match(r\"^https?://(.*\\.)?ics.uci.edu(/.*$|/?$)\", url):\n continue\n print(f\"url match, {url}\")\n hostname_list.append(urlparse(url).hostname)\n hostname_freq_dict = compute_word_frequencies(hostname_list)\n with open(\"./hostname_frequency.csv\", 'w') as hostname_frequency:\n for key in hostname_freq_dict.keys():\n hostname_frequency.write(f\"{key}, {hostname_freq_dict[key]}\\n\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(f\"usage: {sys.argv[0]} \")\n sys.exit()\n\n compute_page_tokenlength(sys.argv[1])\n subdomain_counter(sys.argv[1])","sub_path":"get_results.py","file_name":"get_results.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"572581952","text":"# -*- coding: utf-8 -*- \n# Copyright (c) 2016 Ruben Cuadra\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software distributed under the License # is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom googlemaps import Client\nimport requests , json\nclass Maps():\n\tdef __init__(self,key):\n\t\tself.key=key\n\t\tself.con = Client(key=self.key)\n\tdef getDirections(self,origin,destination,mode='walking'):\n\t\tr,a=self.con.directions(origin,destination,mode=mode)[0]['legs'][0]['steps'],[]\n\t\tfor i in r:\n\t\t\ta.append(i['html_instructions'])\n\t\treturn a\n\tdef textToLatLng(self,text):\n\t\tr=self.con.geocode(text,language='ES-MX')[0]['geometry']['location']\n\t\treturn '%s,%s'%(r['lat'],r['lng'])\n\tdef getReferenceOnLocation(self,latLngAsString):\n\t\tlat,lng=latLngAsString.split(',')\n\t\tnearbyS='$MAPS_API?key=%s&location=%s&rankby=distance'%(self.key,latLngAsString)\n\t\treturn json.loads(requests.get(nearbyS).text)['results'][0]['name']\n\n\tdef getKeyWords(self,string,start='',end=''): \n\t\twords=[]\n\t\tfor i in (lambda x:(i for i in xrange(0,x.count(start))))(string):\n\t\t\tstring=string[string.find(start)+3:]\n\t\t\twords.append(string[:string.find(end)])\n\t\treturn words\n\tdef getRouteLine(self,line,route=None, stop=None):\n\t\tk = self.getKeyWords(line)\n\t\tif route and stop: \n\t\t\treturn 'Continua hasta %s y abordas la ruta %s en la %s '%(k[0], route, stop)\n\t\telif stop: \n\t\t\treturn 'Desciende en la %s'%stop\n\t\telse:\t\t\n\t\t\tif len(k)>2:\n\t\t\t\treturn 'Voltea hacia el %s, camina desde %s hacia %s '%(k[0],k[1],k[2])\n\t\t\telse:\n\t\t\t\treturn 'Dirigete hacia %s '%k[0]","sub_path":"mapsConnector.py","file_name":"mapsConnector.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"398951489","text":"############################################################################################\n# Python script to calculate the wave-induced strain on the iceberg.\n# NOTE: Before running this script, make sure that the following files are present\n# [YOURDIR]/dispVsFreq.py\n# [YOURDIR]/strainVsFreq.py\n# These are generated by the getSolutionInterp.edp FreeFem script.\n# See the bash script run-script.sh for more information.\n#\n# Run:\n# python3 strain2psd.py ICEBERG\n#\n# First compute the Pierson-Moskowitz spectrum (PSD) for approximating the ocean wave data\n# Load the frequency domain data for the displacement and the strain.\n# Compute the strain^2*PSD curve.\n# Compute the area under the strain^2*PSD curve in a frequency bin to compute the RMS strain\n#############################################################################################\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport sys\nimport time\nfrom scipy.signal import find_peaks\nfont = {'weight' : 'normal',\n 'size' : 15}\nmatplotlib.rc('font', **font)\npi = np.pi\n\nfilePath = sys.argv[1]\n\ndirs = [filePath+str(1), filePath+str(1.5)]\nyoungs = [1e9, 1.5e9]\ng=9.8\nomegap=0.092*2*pi\nomega0=1.14*omegap\nalpha=0.0081\nbeta=0.74\n\nomega0s = [0.039, 0.047]\nomega1s = np.add(omega0s, 0.003)\n\ni = 0\nplt.figure(figsize=[10,4])\nfor d in dirs:\n strainVsFreq = np.loadtxt(d+\"/strainVsFreq.dat\")\n\n # Pierson Moskowitz spectrum\n omeganew = 2*pi*np.linspace(0.01,0.125,len(strainVsFreq))\n S=alpha*g**2/omeganew**5*np.exp(-beta*(omega0/omeganew)**4) + 1e-7;\n\n strain2S = (strainVsFreq[:,0]**2)*S\n omega0 = 2*pi*omega0s[i]\n omega1 = 2*pi*omega1s[i]\n\n plt.semilogy(omeganew/(2*pi), strain2S, label=\"E = \"+str(youngs[i]/1e9)+\" GPa\")\n plt.fill_between(omeganew/(2*pi), strain2S, 0, where=(omeganew > omega0) & (omeganew < omega1), color='red', alpha=0.5)\n plt.legend()\n plt.grid()\n\n omeganew1 = omeganew[(omeganew > omega0) & (omeganew < omega1)]\n strain2S1 = strain2S[(omeganew > omega0) & (omeganew < omega1)]\n area2 = np.trapz(strain2S1, omeganew1/(2*pi))\n area = np.sqrt(area2)\n print(area/1e-3)\n\n i += 1\n\nplt.xlabel(\"$\\\\frac{\\omega}{2\\pi}$\")\nplt.ylabel(\"$\\\\varepsilon^2\\,PSD$\")\nplt.show()\n","sub_path":"python_modules/strain2psd.py","file_name":"strain2psd.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"117082954","text":"# -*- coding:utf-8 -*-\n__author__ = 'Tnew'\n\nfrom time import sleep\n\nimport pytest\nimport yaml\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\"\"\"\n打开企业微信,\n点击通讯录,\n点击添加成员,\n点击手动输入添加,\n输入姓名,性别,手机号并点击保存\n\"\"\"\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\n\n\nclass TestWework:\n def setup(self):\n desire_caps = {\n \"platformName\": 'android',\n \"deviceName\": '127.0.0.0:7555',\n \"appPackage\": 'com.tencent.wework',\n \"appActivity\": 'com.tencent.wework.launch.WwMainActivity',\n \"noReset\": 'true',\n 'settings[waitForIdleTimeout]': 0,\n \"ChromedriverExecutable\": \"D:/Study/Automation_Tester_Guide/Lesson6_appium/chromedriver\"\n }\n self.driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", desire_caps)\n self.driver.implicitly_wait(5)\n\n def teardown(self):\n self.driver.quit()\n\n # 通过yaml文件给姓名,性别,手机号传参,注意有中文,添加encoding='utf-8'\n @pytest.mark.parametrize('name,gender,number', yaml.safe_load(open('./data/data.yaml', encoding='utf-8')))\n def test_wework(self, name, gender, number):\n # 点击通讯录\n self.driver.find_element(MobileBy.XPATH, '//*[@text=\"通讯录\"]').click()\n # 滑动到添加成员并点击\n self.driver.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().'\n 'scrollable(true).instance(0)).'\n f'scrollIntoView(new UiSelector().text(\"添加成员\").'\n 'instance(0));').click()\n # 点击手动输入添加\n self.driver.find_element_by_xpath(\"//*[@text='手动输入添加']\").click()\n\n # 输入姓名\n self.driver.find_element_by_xpath(\"//*[contains(@text,'姓名')]/..//*[@text='必填']\").send_keys(name)\n\n # 点击男弹出男女选择\n self.driver.find_element_by_xpath(\"//*[contains(@text,'性别')]/..//*[@text='男']\").click()\n\n # 判断男女弹框是否可见\n element = WebDriverWait(self.driver, 10).until(\n expected_conditions.visibility_of_element_located((By.XPATH, \"//*[@text='女']\")))\n # print(element.is_displayed())\n # 如果“女”元素可见,点击男、女\n if element.is_displayed() == True:\n self.driver.find_element_by_xpath(f\"//*[@text='{gender}']\").click()\n # 输入手机号\n self.driver.find_element_by_xpath(\"//*[contains(@text,'手机')]/..//*[@text='手机号']\").send_keys(number)\n # 点击保存\n self.driver.find_element_by_id(\"com.tencent.wework:id/gur\").click()\n\n # 获取toast信息,并添加断言判断添加成功\n add = self.driver.find_element_by_xpath(\"//*[contains(@text,'添加成功')]\").text\n assert '添加成功' == add\n","sub_path":"pratice_wework_add_contactor_appium/test_wework_add_contactor.py","file_name":"test_wework_add_contactor.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"528634768","text":"import xml.etree.ElementTree as ET\nimport sys\nfrom sys import argv\nimport math\nimport os\nfrom shutil import copyfile\n\n#e.g. 'python frageta.py benzene.cml benzene 3'\nscript, first, second, third = argv\n\n\n\nclass atom:\n def __init__ (self, name):\n self.n = name\n\n#Fundamental output will be given as\n\nclass PIEpair:\n def __init__(self, fragment, coefficient):\n self.frag = fragment\n self.factor = coefficient\n\n\n#Hydrogen check\ndef ish(atm):\n if \"H\" not in str(atm.n):\n return False\n else:\n return True\n\ndef hardcon(a,b):\n if bmat[a+1][b+1]==0:\n return False\n if bmat[a+1][b+1]==1:\n if ish(alist[a]) or ish(alist[b]):\n return True\n else:\n return False\n if bmat[a+1][b+1]>=2:\n return True\n return False\n\ndef softcon(a,b):\n if bmat[a+1][b+1]==0:\n return False\n else:\n return True\n\ndef isoverlap(a,fraglist, numlist):\n for i in fraglist:\n for j in i:\n if alist[a].n==j:\n return True\n return False\n\n\n#Intersection of Two Things. Induces readily to n variables.\n\ndef intersect(a,b):\n intab = []\n a = int(a)\n for i in range(0, len(numlist[a])):\n #numlist[a]=numlist[a].sort()\n if numlist[a][i] in b:\n if numlist[a][i] not in intab:\n intab.append(numlist[a][i])\n return intab\n\ndef redundant(pie, pietemp):\n for r in range(0, len(pie)):\n pietemp.frag.sort()\n pie[r].frag.sort()\n if pietemp.frag==pie[r].frag and pietemp.factor==pie[r].factor:\n return True\n return False\n\ndef recurse(cur, pie, ord, ind):\n plac = []\n cursort = []\n for k in range(ind,len(numlist)):\n plac = cur\n pietemp = PIEpair(intersect(k,cur),ord)\n cur.sort()\n cursort = intersect(k,cur)\n cursort.sort()\n if cursort!=[] and redundant(pie, pietemp)==False and cursort!=cur:\n cur = intersect(k,cur)\n pietemp = PIEpair(cur,ord)\n if redundant(pie,pietemp)==False:\n pie.append(pietemp)\n recurse(cur, pielist, ord+1, ind+1)\n cur = plac\n return pie\n\ndef nums2frag(nums):\n frag = []\n for i in nums:\n frag.append(alist[i].n)\n return frag\n\ndef writecml(frag,dest):\n targetn = open(dest,\"w+\")\n copyfile(first,dest)\n treen = ET.parse(dest)\n rootn = treen.getroot()\n for j in range(0,acount):\n if root[0][j].attrib not in frag:\n rootn[0][j].attrib[\"elementType\"]=\"X\"\n targetn.write(ET.tostring(rootn))\n return 0\n\n#Parses a list of atoms from cml file\n\ntree = ET.ElementTree(file = first)\nroot = tree.getroot()\nacount = 0\nalist = []\n\ndeparted = []\nfor gc in root[0]:\n acount = acount+1\nfor i in range(0, acount):\n atmp = atom(root[0][i].attrib)\n alist.append(atmp)\n departed.append(root[0][i].attrib[\"elementType\"])\n\nbmat = [[0 for i in range(acount+1)] for j in range(acount+1)]\n\n#Parses a list of bonds from cml file and puts them in bmat\n\nbcount=0\nfor gc in root[1]:\n bcount = bcount+1\nfor i in range(0, bcount):\n btmp = str(root[1][i].attrib)\n btmp = btmp.replace(\"atomRefs2\",'')\n btmp = btmp.replace(\"a\",'')\n btmp = btmp.replace(\"order\",'')\n btmp = btmp.replace(\"\\'\",'')\n btmp = btmp.replace(\":\",'')\n btmp = btmp.replace(\"{\",'')\n btmp = btmp.replace(\"}\",'')\n btmp = btmp.replace(\", \",'')\n btmp = btmp.split()\n ab = int(btmp[0])\n ba = int(btmp[1])\n ord = int(btmp[2])\n bmat[ab][ba]=ord\n bmat[ba][ab]=ord\n\n#Fragment Sorter\n\nfraglist = []\nfragpail = []\nnumpail = []\nnumlist = []\neta = int(third)\nfullalist = []\n\nfor i in range(0,acount):\n fullalist.append(i)\n\nfor i in range(0,acount):\n if i==0 or isoverlap(i,fraglist,numlist)==False:\n fragpail.append(alist[i].n)\n numpail.append(i)\n done=False\n while (done==False):\n done=True\n for j in range(0,len(fragpail)):\n q=numpail[j]\n for k in range(q+1,acount):\n if hardcon(q,k) and alist[k].n not in fragpail:\n fragpail.append(alist[k].n)\n numpail.append(k)\n done=False\n fraglist.append(fragpail)\n numlist.append(numpail)\n fragpail=[]\n numpail=[]\n\nfor i in range(0,eta):\n for l in range(0,len(fraglist)):\n for k in range(0, len(fraglist[l])):\n for j in range(0,acount):\n if softcon(numlist[l][k],j) and (j not in numlist[l]):\n (fraglist[l]).append(alist[j].n)\n (numlist[l]).append(j)\n for m in range(0,acount):\n if hardcon(j,m) and m not in numlist[l]:\n (fraglist[l]).append(alist[m].n)\n (numlist[l]).append(m)\n\npreen = []\nfor i in range(0,len(numlist)):\n for j in range(0,len(numlist)):\n if set(numlist[i]).issubset(numlist[j]) == True and set(numlist[i])!=set(numlist[j]) and i not in preen:\n preen.append(i)\n\npreen.sort()\nfor i in range(len(preen)-1,-1,-1):\n del(numlist[preen[i]])\n del(fraglist[preen[i]])\n\npreen=[]\nfor i in (0,len(numlist)):\n for j in range(i+1,len(numlist)):\n if set(numlist[i])==set(numlist[j]) and i not in preen:\n preen.append(i)\n\npreen.sort()\nfor i in range(len(preen)-1,-1,-1):\n del(numlist[preen[i]])\n del(fraglist[preen[i]])\n\ndir = \"scratch\"\nif not os.path.exists(dir):\n os.makedirs(dir)\n\nnlist=[]\nfor i in range(0,acount):\n nlist.append(i)\n\npielist = []\nfor i in range(0,len(numlist)):\n recurse(fullalist, pielist, 1, i)\nfor i in pielist:\n i.frag.sort()\n\ndeadpool = []\nfor l in range(0,len(pielist)):\n for m in range(l+1,len(pielist)):\n if pielist[l].frag==pielist[m].frag and pielist[l].factor==pielist[m].factor:\n deadpool.append(m)\ndeadpool.sort()\n\nfor i in range(len(deadpool)-1,-1,-1):\n del(pielist[deadpool[i]])\n\n\nfor i in range(0,len(pielist)):\n targetfile = \"scratch/\"+str(second)+\"_Order=\"+str(pielist[i].factor)+\"Index=\"+str(i)+\"eta=\"+str(third)+\".cml\"\n target = open(targetfile,\"w+\")\n copyfile(first,targetfile)\n tree1 = ET.parse(targetfile)\n root1 = tree1.getroot()\n for j in range(0,acount):\n if j not in pielist[i].frag:\n root1[0][j].attrib[\"elementType\"]=\"X\"\n #text_file = open(\"Output.txt\", \"w\")\n target.write(ET.tostring(root1, encoding=\"unicode\"))\n #text_file.close()\ntargetfile2 = \"scratch/\" + str(second) + \"_atomlist.txt\"\ntarget2 = open(targetfile2,\"w+\")\ntarget2.write(str(departed))\n\ntargetfileb = \"scratch/\"+str(second)+\"pielist\"\ntargetb = open(targetfileb,\"w+\")\nfor i in pielist:\n targetb.write(str(i.factor))\n targetb.write(\"\\n\")\n targetb.write(str(i.frag))\n targetb.write(\"\\n\")\n","sub_path":"frageta.py","file_name":"frageta.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"181930665","text":"# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nimport random\r\nimport time\r\nimport os\r\nimport csv\r\nimport sys\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\nimport importlib\r\nimportlib.reload(sys)\r\n\r\n\r\nip_pool = [\r\n '103.212.92.254',\r\n '182.253.189.244',\r\n '190.103.85.37',\r\n '176.56.107.198',\r\n '175.42.122.245',\r\n '123.163.115.126',\r\n '113.124.93.151',\r\n '175.42.122.245',\r\n '49.93.27.56',\r\n '27.38.154.143',\r\n '218.250.205.57',\r\n '121.227.123.244',\r\n '58.20.230.246',\r\n '113.59.99.138',\r\n '125.108.103.169',\r\n '37.221.204.206'\r\n ]\r\n\r\nua_pool = [\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',\r\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',\r\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',\r\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0'\r\n\r\n ]\r\n\r\n\r\n\r\ndef get_random_ip(ip_pool):\r\n proxy_list = []\r\n for ip in ip_pool:\r\n proxy_list.append('http://' + ip)\r\n proxy_ip = random.choice(proxy_list)\r\n proxies = {'http': proxy_ip}\r\n return proxies\r\n\r\n\r\ndef get_random_headers(ua_pool):\r\n ua_list = []\r\n for ua in ua_pool:\r\n ua_list.append(ua)\r\n ua = random.choice(ua_list)\r\n headers = {\r\n 'method': 'get',\r\n 'Cookie': '请输入你的cookie',\r\n 'Referer': 'https://m.weibo.cn/status/IqtUKo5xH?type=comment',\r\n 'User-Agent': ua,\r\n 'X-Requested-With': 'XMLHttpRequest'\r\n }\r\n return headers\r\n\r\ndef get_page(max_id, id_type):\r\n params = {\r\n 'max_id': max_id,\r\n 'max_id_type': id_type\r\n }\r\n\r\n headers = get_random_headers(ua_pool)\r\n proxies = get_random_ip(ip_pool)\r\n print(proxies)\r\n\r\n try:\r\n r = requests.get(url, params=params, headers=headers, proxies=proxies)\r\n if r.status_code == 200:\r\n return r.json()\r\n except requests.ConnectionError as e:\r\n print('error', e.args)\r\n\r\n\r\ndef parse_page(jsondata):\r\n if jsondata:\r\n items = jsondata.get('data')\r\n item_max_id = {}\r\n item_max_id['max_id'] = items['max_id']\r\n #print(item_max_id)\r\n item_max_id['max_id_type'] = items['max_id_type']\r\n return item_max_id\r\n\r\ndef write_csv(jsondata):\r\n datas = jsondata.get('data').get('data')\r\n for data in datas:\r\n created_at = data.get(\"created_at\")\r\n userid = data.get(\"user\").get(\"id\")\r\n comment_id_url = data.get('user').get('profile_url')\r\n like_count = data.get(\"like_count\")\r\n source = data.get(\"source\")\r\n floor_number = data.get(\"floor_number\")\r\n username = data.get(\"user\").get(\"screen_name\")\r\n comment = data.get(\"text\")\r\n comment = BeautifulSoup(comment, 'lxml').get_text()\r\n writer.writerow([username, userid,comment_id_url,created_at, like_count, floor_number, source,\r\n json.dumps(comment, ensure_ascii=False)])\r\n\r\n# 要爬取热评的起始url\r\nurl = '请输入url'\r\n\r\n\r\n# 存为csv\r\npath = os.getcwd() + \"./292.csv\"\r\ncsvfile = open(path, 'w',encoding = 'utf-8',newline='')\r\nwriter = csv.writer(csvfile)\r\nwriter.writerow(['用户名', '用户账号ID','用户主页地址','评论时间', '点赞数', '楼层', '来源', '评论内容'])\r\n\r\nmaxpage = 请输入爬取页数 #爬取的数量\r\n\r\nm_id = 0\r\nid_type = 0\r\nfor page in range(0, maxpage):\r\n print('正在爬取第'+str(page+1)+'页')\r\n jsondata = get_page(m_id, id_type)\r\n try:\r\n write_csv(jsondata)\r\n results = parse_page(jsondata)\r\n time.sleep(random.randint(0,1))\r\n m_id = results['max_id']\r\n id_type = results['max_id_type']\r\n except:\r\n time.sleep(5)\r\n jsondata = get_page(m_id, id_type)\r\n try:\r\n write_csv(jsondata)\r\n results = parse_page(jsondata)\r\n time.sleep(random.randint(2,5))\r\n m_id = results['max_id']\r\n id_type = results['max_id_type']\r\n except:\r\n time.sleep(5)\r\n jsondata = get_page(m_id, id_type)\r\n write_csv(jsondata)\r\n results = parse_page(jsondata)\r\n time.sleep(random.randint(2,5))\r\n m_id = results['max_id']\r\n id_type = results['max_id_type']\r\n\r\n","sub_path":"手机端评论无cookie(代理池).py","file_name":"手机端评论无cookie(代理池).py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"272827755","text":"import siftgpu\nimport imageio as io\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\nx = io.imread('IMG_2759.JPG')\n\nb = siftgpu.Bitmap(x[:, :, ::-1])\n\nprint(b.is_grey())\n\nb.write('out.jpg')\n\nb.clone_as_grey().write('out_g.jpg')\n\noptions = siftgpu.make_extreme_options()\n\ndescriptors, keypoints = siftgpu.extract_covariant_sift(options, b.clone_as_grey())\n\nfor keypoint in keypoints:\n print(keypoint.x)\n print(keypoint.y)\n print(keypoint.compute_scale())\n print(keypoint.compute_orientation())\n\nprint(descriptors)\nprint(descriptors.shape)\n\nimage = Image.fromarray(x.copy())\ndraw = ImageDraw.Draw(image)\n\n\nfor keypoint in keypoints:\n point_x = keypoint.x\n point_y = keypoint.y\n\n draw.line((point_x - 4, point_y - 4, point_x + 4, point_y + 4), fill=(255, 50, 50), width=2)\n draw.line((point_x - 4, point_y + 4, point_x + 4, point_y - 4), fill=(255, 50, 50), width=2)\n\nio.imwrite('sift.jpg', image)\n\nimage = Image.fromarray(x.copy())\ndraw = ImageDraw.Draw(image)\n\ndescriptors_new = siftgpu.extract_covariant_sift_given_keypoints(options, b.clone_as_grey(), keypoints[:100])\n\nfor keypoint in keypoints:\n point_x = keypoint.x\n point_y = keypoint.y\n\n draw.line((point_x - 4, point_y - 4, point_x + 4, point_y + 4), fill=(255, 50, 50), width=2)\n draw.line((point_x - 4, point_y + 4, point_x + 4, point_y - 4), fill=(255, 50, 50), width=2)\n\nassert descriptors_new.shape[0] == 100\n\nio.imwrite('sift_from_keypoints.jpg', image)\n\nprint(descriptors[:100])\nprint(descriptors_new)\n\nassert np.all(np.equal(descriptors[:100], descriptors_new))\n","sub_path":"checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"453037770","text":"from bs4 import BeautifulSoup as soup\r\nfrom urllib.request import urlopen as uReq\r\nimport pandas as pd\r\n\r\nsearch = input('Enter the search term')\r\nurl = 'https://www.flipkart.com/search?q='+search\r\n# url ='https://www.flipkart.com/search?q=iphone'\r\nprint(url)\r\n\r\nuClient = uReq(url)\r\npage = uClient.read()\r\nuClient.close()\r\n\r\npage_h =soup(page,'html.parser')\r\npage_hp = soup.prettify(page_h)\r\n# print(page_hp)\r\nartics = page_h.findAll(\"div\", {\"class\": \"_2pi5LC col-12-12\"})\r\n\r\n# artic =artics[0]\r\nprint(\"Length\" ,len(artics))\r\n# artic = soup.prettify(artics[6])\r\nNAME= []; RATING =[]; PRICE =[]; LINK = []\r\nfor i in range(len(artics)):\r\n try:\r\n try:\r\n artic = artics[i]\r\n name = artic.div.div.img[\"alt\"]\r\n NAME.append(name)\r\n # print(name)\r\n except:\r\n name = 'null'\r\n NAME.append(name)\r\n try:\r\n price = artic.findAll(\"div\", {\"class\": \"col col-5-12 nlI3QM\"})\r\n Price = (price[0].text)\r\n # print(Price[0:7])\r\n PRICE.append(Price[0:7])\r\n except:\r\n Price = 'null'\r\n PRICE.append(Price)\r\n try:\r\n rating = artic.findAll(\"div\", {\"class\": \"_3LWZlK\"})\r\n # print(rating[0].text)\r\n RATING.append(rating[0].text)\r\n except:\r\n rating = 'null'\r\n RATING.append(rating)\r\n # articsp = soup.prettify(artics[6])\r\n # print(articsp)\r\n try:\r\n pdturl = artic.div.div.div.a['href']\r\n pdturlf = \"https://www.flipkart.com\" + pdturl\r\n # print(pdturlf)\r\n LINK.append(pdturlf)\r\n except:\r\n pdturlf = 'null'\r\n RATING.append(pdturlf)\r\n # print(\"An error occured\")\r\n except:\r\n print(\"An error occured\")\r\n\r\ncountnam = NAME.count('null')\r\nfor i in range(1,countnam+1):\r\n NAME.remove('null')\r\n\r\n\r\ncountrat = RATING.count('null')\r\nfor i in range(1,countrat+1):\r\n RATING.remove('null')\r\n\r\ncountpri = PRICE.count('null')\r\nfor i in range(1,countpri+1):\r\n PRICE.remove('null')\r\n\r\ndiff = [len(NAME),len(RATING),len(PRICE)]\r\ndiffi =[\"NAME\",\"RATING\",\"PRICE\"]\r\n# print(min(diff))\r\nmaxval =max(diff)\r\nmaxval_pos = []\r\nfor i in range(len(diff)):\r\n if diff[i] == maxval:\r\n maxval_pos.append(i)\r\n# print(maxval_pos)\r\nif len(maxval_pos)==1:\r\n diff1 = sorted(diff)\r\n for i in range(len(diff1)-1):\r\n red = diff1[i+1]-diff1[0]\r\n if red >>0:\r\n # print(red)\r\n for j in range(1,red+1,1):\r\n print(j)\r\n ind = diffi[diff.index(diff1[i+1])]\r\n # print(ind)\r\n vars()[ind].pop(0)\r\n print(vars()[ind])\r\nk=0;\r\nif len(maxval_pos)!=1:\r\n diff1 = sorted(diff)\r\n for i in range(len(diff1)-1):\r\n red = diff1[i+1]-diff1[0]\r\n if red >>0:\r\n # print(red)\r\n for j in range(1,red+1,1):\r\n print(j)\r\n ind = diffi[maxval_pos[k]]\r\n # print(ind)\r\n vars()[ind].pop(0)\r\n print(vars()[ind])\r\n k=k+1;\r\n\r\n\r\n\r\nDict = {\"Name\":NAME,\"Rating\":RATING,\"Price\":PRICE}\r\n# print(Dict)\r\nDictp =pd.DataFrame(Dict)\r\nprint(Dictp)\r\n\r\n\r\nprefer = int(input(\"Enter the Product no you need further details\"))\r\nprint(prefer)\r\nprefer_pdt_url = LINK[prefer]\r\nprint(prefer_pdt_url)\r\n\r\nuClient = uReq(prefer_pdt_url)\r\npdt_page = uClient.read()\r\nuClient.close()\r\n\r\npdt_page_h =soup(pdt_page,'html.parser')\r\npage_hp = soup.prettify(page_h)\r\n# print(page_hp)\r\nreview = pdt_page_h.findAll(\"div\", {\"class\": \"t-ZTKy\"})\r\nprint(len(review))\r\nREVIEW=[]\r\nPDTRATING =[]\r\nUSER = []\r\nREVIEW_TITLE = []\r\nfor i in range(len(review)):\r\n try:\r\n REVIEW.append(review[i].text)\r\n # print(REVIEW)\r\n except:\r\n REVIEW.append(\"null\")\r\n try:\r\n pdt_rating = pdt_page_h.findAll(\"div\", {\"class\": \"_3LWZlK _1BLPMq\"})\r\n PDTRATING.append(pdt_rating[i].text)\r\n # print(PDTRATING)\r\n except:\r\n rating = 'null'\r\n PDTRATING.append(rating)\r\n\r\n\r\n\r\n try:\r\n user_name = pdt_page_h.findAll(\"div\", {\"class\": \"row\"})\r\n user_name = pdt_page_h.findAll('p')\r\n # print(len(user_name))\r\n # print(user_name)\r\n User = user_name[2].text\r\n Review_Title = user_name[1].text\r\n USER.append(User)\r\n REVIEW_TITLE.append(Review_Title)\r\n except:\r\n USER.append(\"null\")\r\n REVIEW_TITLE.append(\"null\")\r\n # print(user_name)\r\n # print(USER)\r\n # print(REVIEW_TITLE)\r\n\r\n\r\nDict1 = {\"_id\": NAME[prefer],\"Product Rating\":PDTRATING,\"Review\":REVIEW}\r\n\r\nDict1p =pd.DataFrame(Dict1)\r\nprint(Dict1p)\r\n\r\n\r\n\r\nimport pymongo\r\n\r\nDEFAULT_CONNECTION_URL = \"mongodb://localhost:27017/\"\r\nDB_NAME = \"REVIEW\"\r\n\r\n# Establish a connection with mongoDB\r\nclient = pymongo.MongoClient(DEFAULT_CONNECTION_URL)\r\n\r\n# Create a DB\r\ndataBase = client[DB_NAME]\r\n\r\nCOLLECTION_NAME = \"Mobile\"\r\ncollection = dataBase[COLLECTION_NAME]\r\n\r\ncollection.insert_one(Dict1)","sub_path":"Review Scrapper/scrapperedu.py","file_name":"scrapperedu.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"649251049","text":"# import numpy as np\n\n# data = np.load('img.npz')\n# lst = data.files\n\n# for item in lst:\n# print(item)\n# print(np.shape(data[item]))\n\n\n# import numpy as np\n\n# data = np.load('img.npz')\n# for key, value in data.items():\n# np.savetxt(\"somepath\" + key + \".csv\", value)\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC,SVC\nfrom mlxtend.plotting import plot_decision_regions\nfrom matplotlib import pyplot as plt\n\n\ndata = np.load('img.npz')\nX= data['arr_0']\ny=data['arr_1']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state = 0)\nlinear = SVC(kernel='linear', C=1, decision_function_shape='ovo').fit(X_train, y_train)\nrbf = SVC(kernel='rbf', gamma=1, C=1, decision_function_shape='ovo').fit(X_train, y_train)\npoly = SVC(kernel='poly', degree=3, C=1, decision_function_shape='ovo').fit(X_train, y_train)\nsig = SVC(kernel='sigmoid', C=1, decision_function_shape='ovo').fit(X_train, y_train)\n#stepsize in the mesh, it alters the accuracy of the plotprint\n#to better understand it, just play with the value, change it and print it\nh = .01\n#create the mesh\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))\n\n# create the title that will be shown on the plot\ntitles = ['Linear kernel','RBF kernel','Polynomial kernel','Sigmoid kernel']\nfor i, clf in enumerate((linear, rbf, poly, sig)):\n #defines how many plots: 2 rows, 2columns=> leading to 4 plots\n plt.subplot(2, 2, i + 1) #i+1 is the index\n #space between plots\n plt.subplots_adjust(wspace=0.4, hspace=0.4) \n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.PuBuGn, alpha=0.7)\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], cmap=plt.cm.PuBuGn, edgecolors='grey')\n plt.xlabel('Sepal length')\n plt.ylabel('Sepal width')\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.xticks(())\n plt.yticks(())\n plt.title(titles[i])\n plt.show()\n \n# a_byte_array = bytearray(r)\n# byte_list = []\n\n# for byte in a_byte_array:\n# binary_representation = bin(byte)\n# byte_list.append(binary_representation)\n# y=np.array(byte_list)\n\n# Training a classifier\n\n# Plot data points and color using their class\n# color = ['black' if c == 0 else 'blue' for c in y]\n# plt.scatter(X[:,0], X[:,1], c=color)\n\n# # Create the hyperplane\n# w = svm.coef_[0]\n# a = -w[0] / w[1]\n# xx = np.linspace(-2.5, 2.5)\n# yy = a * xx - (svm.intercept_[0]) / w[1]\n\n# # Plot the hyperplane\n# plt.plot(xx, yy)\n# plt.axis(\"off\"), plt.show()\n\n","sub_path":"file1.py","file_name":"file1.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"73432588","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.10.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\n# %load_ext autoreload\n# %autoreload 2\n\n# %matplotlib inline\n\nimport sys\nsys.path.append(\"../\")\n\nfrom sprintdl.main import *\nfrom sprintdl.models.efficientnet import *\nfrom sprintdl.models.xresnet import *\n\ndevice = torch.device('cuda',0)\nfrom torch.nn import init\nimport torch\nimport math\n# -\n\n# # Preprocess\n\nimport pandas as pd\nimport shutil\nfrom PIL import Image\nfpath = Path(\"/media/hdd/Datasets/faceKeypoint/\")\nt_path = fpath/\"training.csv\"\nid_l = pd.read_csv(t_path)\n\nid_l.head(3)\n\nfor c in id_l.columns:\n if(id_l[c].dtype!='object'):\n id_l[c]=id_l[c].fillna(id_l[c].median())\n\nimport torchvision\nimport tqdm\n\n\ndef save_str_img(strimg,w,h,flpath):\n px=255-np.array(strimg.split(),dtype=float)\n if(len(px)==w*h and len(px)%w==0 and len(px)%h==0):\n cpx = list(px.reshape(w,h))\n img = torchvision.transforms.functional.to_pil_image(tensor([cpx,cpx,cpx]))\n img.save(flpath)\n return img\n else:\n raise Exception(\"Invalid height and width\")\n\n\n# +\ntrain_im_path = fpath/\"trainImages\"\n\ntrain_im_path.mkdir(exist_ok=True)\n# -\n\nid_l.shape\n\nfor index, train_row in tqdm.tqdm(id_l.iterrows(), total = id_l.shape[0]):\n save_str_img(train_row.Image,96,96,train_im_path/(str(index)+'.jpg'))\n\n\nclass PreProcessor():\n \"Basic class for a processor that will be applied to items at the end of the data block API.\"\n def __init__(self, ds:Collection=None): self.ref_ds = ds\n def process_one(self, item:Any): return item\n def process(self, ds:Collection): ds.items = array([self.process_one(item) for item in ds.items])\n\n\n\n# +\nclass PointsProcessor(PreProcessor):\n \"`PreProcessor` that stores the number of targets for point regression.\"\n def __init__(self, ds:ItemList): self.c = len(ds.items[0].reshape(-1))\n def process(self, ds:ItemList): ds.c = self.c\n\nclass PointsLabelList(ItemList):\n \"`ItemList` for points.\"\n _processor = PointsProcessor\n def __init__(self, items:Iterator, **kwargs):\n super().__init__(items, **kwargs)\n self.loss_func = MSELossFlat()\n\n def get(self, i):\n o = super().get(i)\n return ImagePoints(FlowField(_get_size(self.x,i), o), scale=True)\n\n def analyze_pred(self, pred, thresh:float=0.5): return pred.view(-1,2)\n def reconstruct(self, t, x): return ImagePoints(FlowField(x.size, t), scale=False)\n\nclass PointsItemList(ImageList):\n \"`ItemList` for `Image` to `ImagePoints` tasks.\"\n _label_cls,_square_show_res = PointsLabelList,False\n\n\n# -\n\n# # Define required\n\ndef pilToTensor(item):\n return torchvision.transforms.functional.pil_to_tensor(test)\n\n\n# tfms = [make_rgb, to_byte_tensor, to_float_tensor, pilToTensor]\ntfms = [make_rgb, pilToTensor]\nbs = 128\n\n\ndef mloss(y_true, y_pred):\n y_true=y_true.view(-1,15,2)\n \n y_true[:,:,0]=y_true[:,:,0].clone()-y_pred[:,:,0]\n y_true[:,:,1]=y_true[:,:,1].clone()-y_pred[:,:,1]\n \n y_true[:,:,0]=y_true[:,:,0].clone()**2\n y_true[:,:,1]=y_true[:,:,1].clone()**2\n \n return y_true.sum(dim=2).sum(dim=1).sum()\n\n\n\n# +\nlr = 1e-2\npct_start = 0.5\nphases = create_phases(pct_start)\nsched_lr = combine_scheds(phases, cos_1cycle_anneal(lr/10., lr, lr/1e5))\nsched_mom = combine_scheds(phases, cos_1cycle_anneal(0.95, 0.85, 0.95))\n\ncbfs = [\n partial(AvgStatsCallback,accuracy),\n partial(ParamScheduler, 'lr', sched_lr),\n partial(ParamScheduler, 'mom', sched_mom),\n partial(BatchTransformXCallback, norm_imagenette),\n ProgressCallback,\n Recorder,\n partial(CudaCallback, device)]\n\nloss_func=mloss\nlr = .001\nopt_func = adam_opt(mom=0.9, mom_sqr=0.99, eps=1e-6, wd=1e-2)\n# -\n\ndata = PointsItemList.from_files(train_im_path, tfms = tfms)\n\ndata\n\n# +\n# il = ImageList.from_files(\"/media/hdd/Datasets/imagewoof2-160/\", tfms = tfms)\n\n# +\n# il\n# -\n\nsd = SplitData.split_by_func(data, partial(random_splitter,p_valid = .2))\n\nsd\n\n\ndef get_locs(flname):\n index = int(flname.name[:-4])\n plist=[]\n coords=list(id_l.loc[index])\n for i in range(len(coords)//2):\n plist.append([coords[i*2+1],coords[i*2]])\n return tensor(plist)\n\n\nget_locs(Path(\"/media/hdd/Datasets/faceKeypoint/trainImages/2246.jpg\"))\n\n# +\n# sd.train.items\n# -\n\nll = label_by_func(sd, get_locs)\n\nll.train.x\n\ndata = ll.to_databunch(bs, c_in=3, c_out=10)\n\n\ndef show_image(im, ax=None, figsize=(3, 3)):\n \"\"\"\n Show single image\n \"\"\"\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=figsize)\n ax.axis(\"off\")\n ax.imshow(im.permute(1, 2, 0))\n\n\ndef show_batch(data, n=4, c=4, r=None, figsize=None):\n \"\"\"\n Show a batch of n images from the train dataloader\n \"\"\"\n x = data.train_ds.x[:n]\n if r is None:\n r = int(math.ceil(n / c))\n if figsize is None:\n figsize = (c * 3, r * 3)\n fig, axes = plt.subplots(r, c, figsize=figsize)\n for xi, ax in zip(x, axes.flat):\n# xi = torchvision.transforms.functional.pil_to_tensor(xi)\n show_image(xi, ax)\n\n\nshow_batch(data)\n\nlearn = Learner(get_vision_model('resnet34',1), data, loss_func, lr=lr, cb_funcs=cbfs, opt_func=opt_func)\n\nlearn.fit(1)\n\n\n\n\n","sub_path":"demos/keypointRegressionDemo(WIP).py","file_name":"keypointRegressionDemo(WIP).py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"366707656","text":"from py2gmsh import (Mesh, Entity, Field)\r\nimport axifem\r\nimport os\r\nfrom scipy.optimize import minimize,NonlinearConstraint\r\nimport numpy as np\r\nimport time\r\nh_innan = 1200\r\nT_inf_innan = 95\r\nT_inf_utan = 18\r\nh_utan = 45\r\nk = 16\r\nt_veggur = 0.003\r\nd = 0.003\r\na = 0.01\r\nL = 0.01\r\nd1 = 0.0005\r\nd2 = 0.0005\r\nV_sk = np.pi*(a/2)**2*t_veggur+np.pi*(d/2)**2*(L)\r\nprint('initial volume: {}'.format(V_sk))\r\n\r\ntolerance = 6e-11 #tolerance for volume\r\n# create Mesh class instance\r\nmy_mesh = Mesh()\r\ni = 0\r\n\r\ndef objective(x, sign=-1.0):\r\n time.sleep(0.1)\r\n my_mesh = Mesh()\r\n filename = 'my_mesh'\r\n d = x[0]\r\n a = x[1]\r\n L = x[2]\r\n s1 = x[3]\r\n s2 = x[4]\r\n s3 = x[5]\r\n \r\n # create points\r\n p1 = Entity.Point([0., -a/2, 0.,d1]) #fyrsti punktur neðri vinstri\r\n\r\n p2 = Entity.Point([0.,a-a/2, 0.,d1])#2. punktur efri vinstri\r\n\r\n p3 = Entity.Point([t_veggur, a-a/2, 0.,d1])#3. punktur efri hægri\r\n\r\n\r\n p4 = Entity.Point([t_veggur, a-s1-a/2, 0.,d1])#fyrsti ribbup ef\r\n\r\n\r\n\r\n p5 = Entity.Point([t_veggur+L/2,a-s2-a/2,0.,d1])# 2 ribbup ef \r\n\r\n\r\n p6 = Entity.Point([t_veggur+L,s3-a/2,0.,d1])#síðast ribbup ef\r\n\r\n\r\n p7 = Entity.Point([t_veggur+L,s3-a/2,0.,d1])#síðast ribbup ne\r\n\r\n p8 = Entity.Point([t_veggur+L/2,s2-a/2,0,d1])#2. ribbup ne\r\n\r\n\r\n p9 = Entity.Point([t_veggur, s1-a/2, 0.,d1])#1. ribbup ef\r\n \r\n \r\n p10 = Entity.Point([t_veggur,0.-a/2,0.,d1])#síðasti punktur neðri hægri\r\n\r\n my_mesh.addEntities([p1,p2,p3,p4,p5,p6,p7,p8,p9,p10])\r\n # create curves\r\n l1 = Entity.Curve([p1, p2]) #innri bein lína upp\r\n l2 = Entity.Curve([p2, p3]) # efri hlið einangrun\r\n l3 = Entity.Curve([p3, p4]) # ytri bein lína upp\r\n l4 = Entity.Curve([p4, p5]) #ribba 1. 2. p e\r\n \r\n l5 = Entity.Curve([p5, p6]) #ribba 2. 3. p e\r\n l6 = Entity.Curve([p6, p7]) #ribba endi\r\n l7 = Entity.Curve([p7, p8]) #ribba 3. 2. n\r\n l8 = Entity.Curve([p8, p9]) #ribba 2. 1. n\r\n l9 = Entity.Curve([p9,p10]) #neðri bein lína upp \r\n l10 = Entity.Curve([p10,p1]) #einangrun neðri\r\n\r\n my_mesh.addEntities([l1, l2, l3, l4, l5, l6, l7, l8,l9,l10])\r\n\r\n\r\n ll1 = Entity.CurveLoop([l1, l2, l3, l4, l5, l6, l7, l8,l9,l10], mesh=my_mesh)\r\n\r\n\r\n\r\n s1 = Entity.PlaneSurface([ll1], mesh=my_mesh)\r\n\r\n\r\n\r\n\r\n g1 = Entity.PhysicalGroup(name='innri')\r\n g2 = Entity.PhysicalGroup(name='ytri')\r\n g3 = Entity.PhysicalGroup(name='ribba')\r\n g4 = Entity.PhysicalGroup(name='einangrun')\r\n my_mesh.addEntities([g1, g2, g3, g4])\r\n g1.addEntities([l1])\r\n g2.addEntities([l3,l4,l5,l6,l7,l8,l9])\r\n g4.addEntities([l2,l10])\r\n g3.addEntities([s1])\r\n # set max element size\r\n #my_mesh.Options.Mesh.CharacteristicLengthMax = 0.1\r\n # adding Coherence option\r\n my_mesh.Coherence = True\r\n # write the geofile\r\n #os.system('rm .geo')\r\n my_mesh.writeGeo('{}.geo'.format(filename))\r\n os.system('gmsh {}.geo -2 -o {}.msh'.format(filename,filename))\r\n #os.system('gmsh my_mesh.geo')\r\n try:\r\n xu, y, tri, T, V, q = axifem.axiHeatCond('{}.msh'.format(filename), \\\r\n {'ribba':k}, {'ytri':(h_utan,-h_utan*T_inf_utan),'innri':(h_innan,-h_innan*T_inf_innan),'einangrun':(0,0)})\r\n print(sign*q['ytri'][1])\r\n except:\r\n return 0\r\n return sign*q['ytri'][1]\r\n\r\ndef volume(x):\r\n \r\n d = x[0]\r\n a = x[1]\r\n L = x[2]\r\n s1 = x[3]\r\n s2 = x[4]\r\n s3 = x[4]\r\n v=np.pi*(a/2)**2*t_veggur#base\r\n v=v+1/3*np.pi*( ((a-2*s1)/2)**2+(a-2*s1)/2*(a-2*s2)/2+((a-2*s2)/2)**2 )*L/2#1. conic section\r\n v=v+1/3*np.pi*( ((a-2*s2)/2)**2+(a-2*s2)/2*(a-2*s3)/2+((a-2*s3)/2)**2 )*L/21#2. conic section\r\n \r\n print(v)\r\n return v\r\n\r\n\r\ndef constraint1(x):\r\n x_tmp = [*x]\r\n v = volume(x_tmp)\r\n return v-V_sk+tolerance\r\n\r\ndef constraint2(x):\r\n x_tmp = [*x]\r\n v = volume(x_tmp)\r\n return V_sk-v+tolerance\r\n\r\ndef constraint3(x): #d ekki minna 0.25mm\r\n d = x[0]\r\n\r\n return d-0.00025\r\n\r\ndef constraint4(x): #a ekki minna en 2*d\r\n\r\n a = x[1]\r\n s1 = x[3]\r\n\r\n return a-2*s1\r\n\r\ndef constraint5(x): #L stærra en 0\r\n\r\n a = x[1]\r\n s2 = x[4]\r\n\r\n return a-2*s2\r\n\r\ndef constraint6(x): #L stærra en 0\r\n\r\n a = x[1]\r\n\r\n s3 = x[5]\r\n return a-2*s3\r\n\r\ndef constraint7(x):\r\n\r\n L = x[2]\r\n return L\r\n\r\ndef constraint8(x):\r\n a = x[1]\r\n return a\r\n\r\n\r\nnlc = NonlinearConstraint(volume,V_sk-tolerance,V_sk+tolerance)\r\n\r\n\r\n\r\ncons=({'type': 'ineq',\r\n 'fun': constraint1},\r\n {'type': 'ineq',\r\n 'fun': constraint2},\r\n {'type': 'ineq',\r\n 'fun': constraint3},\r\n {'type': 'ineq',\r\n 'fun': constraint4},\r\n {'type': 'ineq',\r\n 'fun': constraint5},\r\n {'type': 'ineq',\r\n 'fun': constraint6},\r\n {'type': 'ineq',\r\n 'fun': constraint7},\r\n {'type': 'ineq',\r\n 'fun': constraint8})\r\nx0 = [0.003,0.01,0.01,0.001, 0.001,0.001]\r\n\r\n#vte = volume(x0)\r\n#print(vte)\r\nsol = minimize(objective,x0,method='SLSQP', constraints = cons)\r\nprint(sol)\r\n\r\nprint('initial volume: {}'.format(V_sk))\r\nx, y, tri, T, V, q = axifem.axiHeatCond('my_mesh.msh', \\\r\n {'ribba':16}, {'ytri':(h_utan,-h_utan*T_inf_utan),'innri':(h_innan,-h_innan*T_inf_innan),'einangrun':(0,0)})\r\n\r\nfrom matplotlib.pyplot import *\r\nprint('Rúmmál {}'.format(V['ribba']))\r\nprint('Varmaflæði: {:g}'.format(q['ytri'][1]))\r\nprint('Hámarkshitastig: {:g}'.format(max(T)))\r\nprint('Lágmarkshitastig: {:g}'.format(min(T)))\r\nfigure(figsize=(16,3))\r\ntricontourf(x,y,tri,T,20)\r\ncolorbar()\r\naxis('equal')\r\nshow()\r\n\r\n\r\n","sub_path":"varmaflutningsfraedi/tolulegt_verkefni/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"1029236","text":"import subprocess\nimport cv2\nimport dlib\nimport h5py\nimport numpy as np\nimport glob \nimport os\nimport copy\nfrom moviepy.editor import VideoFileClip\nvalidation = [\"1001\",\"1002\",\"1003\",\"1004\",\"1005\"]\ntest = [\"1006\",\"1007\",\"1008\",\"1009\",\"1010\"]\nemo_dict = { \"ANG\": \"A\" , \"DIS\":\"D\", \"FEA\":\"F\" , \"HAP\":\"H\", \"NEU\":\"N\", \"SAD\":\"S\"}\ndef getLength(filename):\n\tclip = VideoFileClip(filename)\n\treturn clip.duration\nmin_frames =50\ncount = []\nfor filename in glob.iglob('./VideoFlash_30fps2/*.mp4'):\n\t# print(filename)\n\tvideo_number = str(filename[20:24])\n\tif video_number in validation:\n\t\tvideo_type = \"val_30\"\n\telif video_number in test:\n\t\tvideo_type = \"test_30\"\n\telse:\n\t\tvideo_type = \"train_30\"\n\temotion = str(emo_dict[filename[29:32]])\n\tdir_name = \"./\"+video_type+\"/\"+str(emotion)+\"/\"+str(filename[20:-4])\n\n\t# list = os.listdir(dir_name) # dir is your directory path\n\t# number_files = len(list)\n\t# seconds = getLength(filename)\n\t# if seconds*30-number_files > 30:\n\t# \tprint number_files,seconds\n\t# \tprint(filename)\n\t# \tif number_files >= 0:\n\tpath_to_video = filename\n\tvc = cv2.VideoCapture(path_to_video)\n\tc = 0\n\tdetector = dlib.get_frontal_face_detector()\n\tface_img = []\n\twhile vc.isOpened():\n\t try:\n\t _, img_frame = vc.read()\n\t rects = detector(img_frame, 0)\n\t except:\n\t break\n\t if len(rects) > 0:\n\t try:\n\t face_img = img_frame[rects[0].top():rects[0].bottom(), rects[0].left():rects[0].right()]\n\t face_img = cv2.resize(face_img, None, fx=224 / float(face_img.shape[1]), fy=224 / float(face_img.shape[0]),\n\t interpolation=cv2.INTER_CUBIC)\n\t except:\n\t face_img = copy.copy(prev_image)\n\t pass\n\t c += 1\n\t # if c%100 == 0:\n\t # \tprint(str(c))\n\t if(len(face_img) == 0):\n\t \tcontinue\n\t cv2.imwrite(dir_name+'_frame_' + str(c) + '.jpg', face_img)\n\t # print(dir_name+'_frame_' + str(c) + '.jpg')\n\t prev_image = copy.copy(face_img)\n\t # print(\"c = \" + c)\n\t if cv2.waitKey(1) & 0xFF == ord('q'):\n\t break\n\n\tvc.release()\n\tcv2.destroyAllWindows()\n\tif c 0 and bn_decay > 0:\n hidden_layers.append(nn.BatchNorm1d(layer_size, momentum=(1 - bn_decay)))\n hidden_layers.append(hidden_activation)\n previous_layer_size = layer_size\n\n if len(hidden_layers) > 0:\n self.hidden_layers = nn.Sequential(*hidden_layers)\n else:\n self.hidden_layers = None\n\n if type(output_size) is int:\n self.output = SingleOutput(previous_layer_size, output_size, nn.Sigmoid())\n elif type(output_size) is list:\n self.output = MultiCategorical(previous_layer_size, output_size)\n else:\n raise Exception(\"Invalid output size.\")\n\n def forward(self, noise, training=False, temperature=None):\n if self.hidden_layers is None:\n hidden = noise\n else:\n hidden = self.hidden_layers(noise)\n\n return self.output(hidden, training=training, temperature=temperature)\n\n def evaluate(self, ds, k):\n real = ds.features\n n = real.shape[0]\n with torch.no_grad():\n noise = Variable(torch.FloatTensor(n, self.noise_size).normal_())\n out = self(noise).numpy()\n generated = np.round(out).astype(int)\n err = k_way_marginals(pd.DataFrame(real), pd.DataFrame(generated), k=k)\n return err\n","sub_path":"modules/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"222891220","text":"from reportlab.platypus import SimpleDocTemplate\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.platypus import Table\nfrom reportlab.platypus import TableStyle\nfrom reportlab.lib import colors\nimport mysql.connector;\n\nconnection1 = mysql.connector.connect( host = \"localhost\", database = \"mydb\", user= \"root\", password=\"root\" )\n\ncursor = connection1.cursor()\ncursor.execute('select * from Global Where Emp_id = \"ID10102\"')\n#cursor.execute('select * from Global')\nrows = cursor.fetchone()\n#rows = cursor.fetchall()\n#print(rows)\ncursor.close()\nconnection1.close()\n\ndata = [\n ['EMP ID ', 'EMP Name ', 'EMP Address', 'EMP Mobile' , 'EMP Country', 'EMP Status'],\n ]\n#for row in rows:\ndata.append(rows)\n \nprint(data)\n \n \nfileName = 'Employee Table.pdf'\npdf = SimpleDocTemplate( fileName, pagesize= A4 )\ntable = Table(data)\nelems = []\nelems.append(table)\nts = TableStyle(\n [\n ('BOX',(0,0),(-1,-1),2,colors.black),\n\n ('LINEBEFORE',(2,1),(2,-1),2,colors.red),\n ('LINEABOVE',(0,2),(-1,2),2,colors.green),\n\n ('GRID',(0,1),(-1,-1),2,colors.black),\n ]\n)\ntable.setStyle(ts)\nprint(\"PDF Generated to file Location\")\npdf.build(elems)\n","sub_path":"python_pdf.py","file_name":"python_pdf.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"508822265","text":"\"\"\"\r\nYou are given a nested dictionary. You don't know how deeply nested it is. Your goal is to flatten all the keys\r\n\r\n\r\n{\r\n \"a\": {\r\n \"b\": {\r\n \"c\": 5,\r\n \"d\": 6\r\n }\r\n \"e\": 10\r\n },\r\n \"f\": 10,\r\n \"g\": {\r\n \"h\":11\r\n }\r\n}\r\n\r\n\r\n{\r\n \"a.b.c\": 5,\r\n \"a.b.d\": 6,\r\n \"a.e\": 10,\r\n \"f\": 10,\r\n \"g.h\": 11\r\n}\r\n\r\n\r\n\"\"\"\r\n\r\n\r\ndef helper(k, val, answer):\r\n if not val:\r\n answer[k] = val\r\n return answer\r\n\r\n if isinstance(val, int):\r\n answer[k] = val\r\n return answer\r\n\r\n else:\r\n for i in val.items():\r\n k += '.' + i[0]\r\n helper(k, i[1], answer)\r\n # backtracking\r\n k = k[:-2]\r\n\r\n\r\ndef flatten(dict):\r\n answer = {}\r\n for i in dict.items():\r\n new_key = i[0]\r\n helper(new_key, i[1], answer)\r\n\r\n return answer\r\n\r\n\r\nnested_dict = {\r\n \"a\": {\r\n \"b\": {\r\n \"c\": 5,\r\n \"d\": 6\r\n },\r\n \"e\": 10\r\n },\r\n \"f\": 10,\r\n \"g\": {\r\n \"h\":11\r\n }\r\n}\r\n\r\ntest = {}\r\ntest1 = {'a': {}}\r\n\r\nprint(flatten(nested_dict))\r\nprint(flatten(test1))\r\nprint(flatten(test))\r\n","sub_path":"flatten-keys.py","file_name":"flatten-keys.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"247489251","text":"import os\n\npath = \"\"\n\nos.chdir(path)\n\nb = 0\n\nfor i in os.listdir():\n with open(i, \"r\") as read:\n with open(\"Duplicate{}.txt\".format(i), \"w\") as write:\n for line in read:\n write.write(line)\n \n names = [\"Name1.ext\", \"Name2.ext\", \"Name3.ext\", \"Name4.ext\", \"Name5.ext\", \"Name6.ext\"]\n os.rename(i, names[b])\n b = b + 1\n","sub_path":"file_duplication_rename.py","file_name":"file_duplication_rename.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"114275877","text":"from flask import Flask, render_template, jsonify\nfrom pictures_data import Pictures\n\n# create Flask app\napp = Flask(__name__)\n\n# --- API Routes ---\n@app.route('/api/pictures')\ndef pictures():\n return jsonify(Pictures)\n\n@app.route('/api/pictures/')\n#by putting int before our variable id, tell route to expect integer\ndef picture_id(id):\n picture = next(picture for picture in Pictures if picture['id'] == id)\n return jsonify(picture)\n\n@app.route('/api/pictures/')\ndef picture_country(country):\n picture = next(picture for picture in Pictures if picture['country'].lower()\\\n == country.lower())\n return jsonify(picture)\n\n# --- HTML Routes ---\n@app.route('/pictures')\ndef pictures_general():\n return render_template('pictures_index.html', pictures = Pictures)\n\n@app.route('/pictures/')\ndef pictures_show(id):\n for pic in Pictures:\n if pic['id'] == id:\n return render_template('picture_show.html', picture=pic)\n return \"

Sorry, no there is no picture with that id!

\"\n\n@app.route('/pictures/')\ndef pictures_list_by_country(country):\n pictures = [picture for picture in Pictures if \\\n picture['country'].lower() == country.lower()]\n return render_template('pictures_index.html', pictures=pictures)\n\n# run our Flask app\nif __name__ == '__main__':\n app.run(debug = True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"351002625","text":"#!/usr/bin/env python\n\"\"\"\nUsage: {prog} [OPTION] FILE1 FILE2\n\nCompare two XML files, ignoring element and attribute order.\n\nAny extra options are passed to the `diff' command.\n\nCopyright (c) 2017, Johannes H. Jensen.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n* Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n* The names of the contributors may not be used to endorse or promote \n products derived from this software without specific prior written\n permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\nimport sys\nimport os\nimport io\nimport xml.etree.ElementTree as ET\nfrom tempfile import NamedTemporaryFile\nimport subprocess\n\ndef attr_str(k, v):\n return \"{}=\\\"{}\\\"\".format(k,v)\n\ndef node_str(n):\n attrs = sorted(n.attrib.items())\n astr = \" \".join(attr_str(k,v) for k,v in attrs)\n s = n.tag\n if astr:\n s += \" \" + astr\n return s\n\ndef node_key(n):\n return node_str(n)\n\ndef indent(s, level):\n return \" \" * level + s\n\ndef write_sorted(stream, node, level=0):\n children = node.getchildren()\n text = (node.text or \"\").strip()\n tail = (node.tail or \"\").strip()\n\n if children or text:\n children.sort(key=node_key)\n\n stream.write(indent(\"<\" + node_str(node) + \">\\n\", level))\n\n if text:\n stream.write(indent(text + \"\\n\", level))\n\n for child in children:\n write_sorted(stream, child, level + 1)\n\n stream.write(indent(\"\\n\", level))\n else:\n stream.write(indent(\"<\" + node_str(node) + \"/>\\n\", level))\n\n if tail:\n stream.write(indent(tail + \"\\n\", level))\n\nif sys.version_info < (3, 0):\n # Python 2\n import codecs\n def unicode_writer(fp):\n return codecs.getwriter('utf-8')(fp)\nelse:\n # Python 3\n def unicode_writer(fp):\n return fp\n\ndef xmldiffs(file1, file2, diffargs=[\"-u\"]):\n tree = ET.parse(file1)\n tmp1 = unicode_writer(NamedTemporaryFile('w'))\n write_sorted(tmp1, tree.getroot())\n tmp1.flush()\n\n tree = ET.parse(file2)\n tmp2 = unicode_writer(NamedTemporaryFile('w'))\n write_sorted(tmp2, tree.getroot())\n tmp2.flush()\n\n args = [ \"colordiff\" ]\n args += diffargs\n args += [ \"--label\", file1, \"--label\", file2 ]\n args += [ tmp1.name, tmp2.name ]\n\n try:\n return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]\n except OSError:\n args[0] = \"diff\"\n return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]\n\ndef print_usage(prog):\n print(__doc__.format(prog=prog).strip())\n\nif __name__ == '__main__':\n args = sys.argv\n prog = os.path.basename(args.pop(0))\n\n if '-h' in args or '--help' in args:\n print_usage(prog)\n exit(0)\n\n if len(args) < 2:\n print_usage(prog)\n exit(1)\n\n file2 = args.pop(-1)\n file1 = args.pop(-1)\n diffargs = args if args else [\"-u\"]\n\n xmldiffs(file1, file2, diffargs)\n","sub_path":"Test/testGBEXPORTFOLDER/classes/xmldiffs.py","file_name":"xmldiffs.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"413969978","text":"from typing import NewType, TypeVar\n\n\nContractName = NewType(\"ContractName\", str)\nFunctionName = NewType(\"FunctionName\", str)\nEvent = NewType(\"Event\", str)\nParam = NewType(\"Param\", str)\nSVar = NewType(\"SVar\", str)\nCtMod = NewType(\"CtMod\", str)\nFnMod = NewType(\"FnMod\", str)\nSouffleListItem = TypeVar(\"SouffleListItem\")","sub_path":"Code2Schema/datalog/newtypes.py","file_name":"newtypes.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"107098022","text":"from __future__ import absolute_import\n\nfrom .logger import Logger\n\nimport requests\nimport json\nfrom datetime import datetime, timedelta\n\n\nclass DataPlatformAPI:\n\n def __init__(self, cf_client):\n assert cf_client is not None\n self.cf_client = cf_client\n self.log = Logger().get_logger(self.__class__.__name__)\n\n def _request_headers(self):\n iam_token = self.cf_client.get_oidc_token()['access_token']\n headers = { 'Authorization': 'Bearer {}'.format(iam_token) }\n return headers\n\n def _request(self, url, http_method='get', data=None, description='', create_auth_headers=True):\n if create_auth_headers:\n headers = self._request_headers()\n else:\n headers = {}\n try:\n if http_method == 'get':\n response = requests.get(url, headers=headers)\n elif http_method == 'post':\n response = requests.post(url, headers=headers, data=json.dumps(data))\n elif http_method == 'delete':\n response = requests.delete(url, headers=headers)\n\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n self.log.error('{} : {} {} : {} {}'.format(description, http_method, url, response.status_code, response.text))\n raise\n\n try:\n self.log.debug('{} : {} {} : {} {}'.format(description, http_method, url, response.status_code, json.dumps(response.json())))\n except ValueError:\n self.log.debug('{} : {} {} : {} {}'.format(description, http_method, url, response.status_code, response.text))\n\n return response\n\n def status(self, vcap):\n api_url = vcap['cluster_management']['api_url'] + '/state'\n response = self._request(url=api_url, http_method='get', description='status')\n return response.json()\n","sub_path":"ibm_analytics_engine/dataplatform_api.py","file_name":"dataplatform_api.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"647952070","text":"# factions\nGRINEER = 0\nCORPUS = 1\nINFESTED = 2\nOROKIN = 3\n\n# flesh types\nCLONED_FLESH = 0\n\n# armor types\nFERRITE_ARMOR = 0\n\nCLONED_FLESH_STATS = {'Impact': -0.25,\n\t\t\t\t'Puncture' : 0,\t\n\t\t\t\t'Slash': 0.25,\n\t\t\t\t'Cold': 0,\n\t\t\t\t'Electricity': 0,\t\n\t\t\t\t'Heat': 0.25,\n\t\t\t\t'Toxin': 0,\n\t\t\t\t'Blast': 0,\n\t\t\t\t'Corrosive': 0,\n\t\t\t\t'Gas': -0.5,\n\t\t\t\t'Magnetic': 0,\n\t\t\t\t'Radiation': 0,\n\t\t\t\t'Viral': 0.75,\n\t\t\t\t'TrueDamage': 0,\n\t\t\t\t'Void': -0.50\n}\n\n\nFERRITE_ARMOR = {'Impact': 0,\n\t\t\t\t'Puncture' : 0.5,\t\n\t\t\t\t'Slash': -0.15,\n\t\t\t\t'Cold': 0,\n\t\t\t\t'Electricity': 0,\t\n\t\t\t\t'Heat': 0,\n\t\t\t\t'Toxin': 0.25,\n\t\t\t\t'Blast': -0.25,\n\t\t\t\t'Corrosive': 0.75,\n\t\t\t\t'Gas': 0,\n\t\t\t\t'Magnetic': 0,\n\t\t\t\t'Radiation': 0,\n\t\t\t\t'Viral': 0,\n\t\t\t\t'TrueDamage': 0,\n\t\t\t\t'Void': 0\n}\n\n\nclass Enemy():\n\tdef __init__(self, faction = None, base_level = 0, level = 0, base_health = 0, base_shield = 0, base_armor = 0, base_affinity = 0, flesh_type=None, armor_type=None):\n\t\t\n\t\tself.level = base_level if level==0 else level\n\t\tself.faction = faction\n\t\tself.flesh_type = flesh_type\n\t\tself.armor_type = armor_type\n\n\t\tself.health_stats = None\n\t\tself.armor_stats = None\n\n\t\tif flesh_type == CLONED_FLESH:\n\t\t\tself.health_stats = CLONED_FLESH_STATS\n\n\n\n\t\tif armor_type == FERRITE_ARMOR:\n\t\t\tself.armor_type = FERRITE_ARMOR\n\n\t\t\n\t\tself.health = base_health * ( 1 + pow(level - base_level, 2) * 0.015)\n\t\tself.shield = base_shield * ( 1 + pow(level - base_level, 2) * 0.0075)\n\t\tself.armor = base_armor * ( 1 + pow(level - base_level, 1.75) * 0.005)\n\t\tself.affinity = base_affinity * (1 + pow(level, 0.5) * 0.1425)\n\t\tself.damage_reduction = self.armor / (self.armor + 300)\n\t\tself.effective_health = self.health / (1- self.damage_reduction)\n\tdef __repr__(self):\n\t\treturn f'Faction: {self.faction}\\nHP: {self.health}\\nShield: {self.shield}\\nArmor: {self.armor}\\nAffinity: {self.affinity}\\nDamage Reduction: {self.damage_reduction}\\nEffective HP: {self.effective_health}'\n\n\nHeavyGunnerS = {'base_level': 8,\n\t\t\t\t'base_health': 300,\n\t\t\t\t'base_armor': 500,\n\t\t\t\t'faction': GRINEER,\n\t\t\t\t'flesh_type': CLONED_FLESH,\n\t\t\t\t'armor_type': FERRITE_ARMOR,\n\t\t\t\t#REMOVE LATER\n\t\t\t\t'level': 165}\n\nHeavyGunner = Enemy(**HeavyGunnerS)\nprint(HeavyGunner)","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"221166954","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 17 09:12:22 2017\r\n\r\n@author: lihepeng\r\n\"\"\"\r\n\r\ndef ev_model(SoC_old, p_ev):\r\n \r\n delta_t = 1/12\r\n \r\n Emax = 21.6\r\n \r\n Eebmin = 3.24\r\n\r\n lam = 1\r\n \r\n if p_ev >= 0:\r\n \r\n SoC_new = SoC_old + delta_t * p_ev * lam / Emax\r\n \r\n else:\r\n \r\n SoC_new = SoC_old + delta_t * (p_ev / lam) / Emax\r\n \r\n return SoC_new","sub_path":"data_process/ev_model.py","file_name":"ev_model.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"362048035","text":"# import time\nfrom utils import getLocalServerTime\nfrom Products.CMFCore.utils import getToolByName\nfrom DateTime import DateTime\n\n\ndef ObjectInitializedEventHandler(instance, event):\n \"\"\"called an object is created\n \"\"\"\n if instance.portal_type == 'BoxMovement':\n membership = getToolByName(instance, 'portal_membership')\n if membership.isAnonymousUser():\n member = 'anonymous'\n else:\n member = membership.getAuthenticatedMember().getUserName()\n\n instance.getField('ChangeUserName').set(instance, member)\n instance.getField('ChangeDateTime').set(instance, DateTime())\n\n boxMove(instance)\n updateLocalServerTime(instance)\n\ndef ObjectModifiedEventHandler(instance, event):\n \"\"\" Called if the object is modified\n \"\"\"\n if instance.portal_type == 'BoxMovement':\n membership = getToolByName(instance, 'portal_membership')\n if membership.isAnonymousUser():\n member = 'anonymous'\n else:\n member = membership.getAuthenticatedMember().getUserName()\n\n instance.getField('ChangeUserName').set(instance, member)\n instance.getField('ChangeDateTime').set(instance, DateTime())\n\n boxMove(instance)\n updateLocalServerTime(instance)\n \ndef boxMove(instance):\n wf = getToolByName(instance.getStorageLocation(), 'portal_workflow')\n\n old_loc = instance.getStorageLocation()\n new_loc = instance.getNewLocation()\n\n box_samples = old_loc.only_items_of_portal_type('Sample')\n free_positions = new_loc.get_free_positions()\n\n if len(box_samples) <= len(free_positions):\n for i, sample in enumerate(box_samples):\n loc_id = int(sample.getStorageLocation().id) - 1\n liberateBox(box_samples[i])\n sample.setStorageLocation(free_positions[loc_id])\n wf.doActionFor(free_positions[loc_id], 'occupy')\n\n old_loc.reindexObject()\n new_loc.reindexObject()\n\ndef liberateBox(instance):\n wf = getToolByName(instance.getStorageLocation(), 'portal_workflow')\n wf.doActionFor(instance.getStorageLocation(), 'liberate')\n instance.setStorageLocation(None)\n\ndef updateLocalServerTime(instance):\n instance.getField('DateCreated').set(instance, getLocalServerTime(instance.getField('DateCreated').get(instance)))\n","sub_path":"baobab/lims/subscribers/boxmovement.py","file_name":"boxmovement.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"625162075","text":"from listaExercicio.uteis.util import Util\n\n\ndef main():\n Util().enunciado('FAÇA UM PROGRAMA PARA CONTAR A QUANTIDADE DE NÚMEROS PARES ENTRE DOIS NÚMEROS QUAISQUER')\n\n primeiroValor = int(input('Digite o primeiro valor: '))\n segundoValor = int(input('Digite o segundo valor: '))\n quantidade = 0\n\n if primeiroValor > segundoValor:\n temp = primeiroValor\n primeiroValor = segundoValor\n segundoValor = temp\n\n for valores in range(primeiroValor, segundoValor+1):\n if valores % 2 == 0:\n quantidade += 1\n\n try:\n print('Existem {} numeros pares de {} a {}'.format(quantidade, temp, primeiroValor))\n except:\n print('Existem {} numeros pares de {} a {}'.format(quantidade, primeiroValor, segundoValor))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"exercicio43/exercicio43.py","file_name":"exercicio43.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"302596477","text":"\r\nimport socket\r\n\r\n\r\nHOST = socket.gethostbyname(socket.gethostname()) #\" \"\r\nPORT = 65432\r\n\r\nwith socket.socket(socket.AF_INET,socket.SOCK_STREAM) as socket:\r\n socket.bind((HOST,PORT))\r\n \r\n socket.listen()\r\n conn , addr = socket.accept()\r\n \r\n with conn:\r\n print(\"connected by\",addr)\r\n while True:\r\n data = conn.recv(1024)\r\n if not data:\r\n break\r\n conn.sendall(data) #echo back\r\n\r\n","sub_path":"echoserver.py","file_name":"echoserver.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"503358373","text":"import pandas as pd\nfrom datetime import datetime\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\ndef build_selenium():\n options = Options()\n options.add_argument('--headless') # suppresses the browser from opening\n options.add_argument('--disable-gpu')\n options.add_argument(\"--log-level=3\") # suppresses console errors\n\n driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)\n return driver\n\n\n# get texas schools from the NYTG_schools json object\ndef get_data(driver):\n driver.get('https://www.nytimes.com/interactive/2020/us/covid-college-cases-tracker.html')\n texas_schools = driver.execute_script(\"\"\"const texas_schools = (NYTG_schools.filter((s) => \n s['state'] === 'Texas'));\n return texas_schools\"\"\")\n\n nyt_date = driver.find_element_by_xpath('/html/body/div[1]/main/article/header/div[2]/p/time').text\n\n driver.quit()\n return [texas_schools, nyt_date]\n\n\n# parse json into df and output to csv\ndef parse_data(nyt_data):\n texas_schools = nyt_data[0]\n nyt_date = nyt_data[1]\n\n date_formatted = nyt_date.replace('Updated ', '').replace('Sept', 'Sep') # temp fix to coerce September into standard format\n date_out = datetime.strptime(date_formatted, '%b. %d, %Y')\n\n df = pd.DataFrame(texas_schools)[['nytname', 'city', 'county', 'death', 'infected']]\n df['Date'] = date_out\n \n df.to_csv('original-sources/historical/schools/nyt_schools_' + date_out.strftime('%Y-%m-%d') + '.csv',\n index=False)\n\n\ndriver = build_selenium()\nparse_data(get_data(driver))\n","sub_path":"nyt-scraping.py","file_name":"nyt-scraping.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"228544403","text":"import weakref\n\nfrom typing import Optional, Union, List\n\nfrom .http import Route, HTTPClient\nfrom .token import AccessTokenResponse\nfrom .user import User\n\n\n__all__: tuple = (\n \"OAuth2Client\",\n)\n\n\nclass OAuth2Client:\n \"\"\"\n A class representing a client interacting with the discord OAuth2 API.\n \"\"\"\n def __init__(\n self,\n *,\n client_id: int,\n client_secret: str,\n redirect_uri: str,\n scopes: Optional[List[str]] = None\n ):\n \"\"\"A class representing a client interacting with the discord OAuth2 API.\n\n :param client_id: The OAuth application's client_id\n :type client_id: int\n :param client_secret: The OAuth application's client_secret\n :type client_secret: str\n :param redirect_uri: The OAuth application's redirect_uri. Must be from one of the configured uri's on the developer portal\n :type redirect_uri: str\n :param scopes: A list of OAuth2 scopes, defaults to None\n :type scopes: Optional[List[str]], optional\n \"\"\"\n self._id = client_id\n self._auth = client_secret\n self._redirect = redirect_uri\n self._scopes = \" \".join(scopes) if scopes is not None else None\n\n self.http = HTTPClient()\n self.http._state_info.update(\n {\n \"client_id\": self._id,\n \"client_secret\": self._auth,\n \"redirect_uri\": self._redirect,\n \"scopes\": self._scopes,\n }\n )\n\n self._user_cache = weakref.WeakValueDictionary()\n\n async def exchange_code(self, code: str) -> AccessTokenResponse:\n \"\"\"Exchanges the code you receive from the OAuth2 redirect.\n\n :param code: The code you've received from the OAuth2 redirect\n :type code: str\n :return: A response class containing information about the access token\n :rtype: AccessTokenResponse\n \"\"\"\n route = Route(\"POST\", \"/oauth2/token\")\n post_data = {\n \"client_id\": self._id,\n \"client_secret\": self._auth,\n \"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": self._redirect,\n }\n if self._scopes is not None:\n post_data[\"scope\"] = self._scopes\n request_data = await self.http.request(route, data=post_data)\n token_resp = AccessTokenResponse(data=request_data)\n return token_resp\n\n async def refresh_token(self, refresh_token: Union[str, AccessTokenResponse]) -> AccessTokenResponse:\n \"\"\"Refreshes an access token. Takes either a string or an AccessTokenResponse.\n\n :param refresh_token: The refresh token you received when exchanging a redirect code\n :type refresh_token: Union[str, AccessTokenResponse]\n :return: A new access token response containg information about the refreshed access token\n :rtype: AccessTokenResponse\n \"\"\"\n refresh_token = (\n refresh_token if isinstance(refresh_token, str) else refresh_token.token\n )\n route = Route(\"POST\", \"/oauth2/token\")\n post_data = {\n \"client_id\": self._id,\n \"client_secret\": self._auth,\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token,\n }\n request_data = await self.http.request(route, data=post_data)\n token_resp = AccessTokenResponse(data=request_data)\n return token_resp\n\n async def fetch_user(self, access_token_response: AccessTokenResponse) -> User:\n \"\"\"Makes an api call to fetch a user using their access token.\n\n :param access_token_response: A class holding information about an access token\n :type access_token_response: AccessTokenResponse\n :return: Returns a User object holding information about the select user\n :rtype: User\n \"\"\"\n access_token = access_token_response.token\n route = Route(\"GET\", \"/users/@me\")\n headers = {\"Authorization\": \"Bearer {}\".format(access_token)}\n resp = await self.http.request(route, headers=headers)\n user = User(http=self.http, data=resp, acr=access_token_response)\n self._user_cache.update({user.id: user})\n return user\n\n def get_user(self, id: int) -> Optional[User]:\n \"\"\"Gets a user from the cache. The cache is a WeakValueDictionary, so objects may be removed without notice.\n\n :param id: The id of the user you want to get\n :type id: int\n :return: A possible user object. Returns None if no User is found in cache.\n :rtype: Optional[User]\n \"\"\"\n user = self._user_cache.get(id)\n return user\n\n async def close(self):\n \"\"\"Closes and performs cleanup operations on the client, such as clearing its cache.\n \"\"\"\n self._user_cache.clear()\n await self.http.close()\n","sub_path":"discord/ext/oauth/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"572562148","text":"# File: Sun.py\n\n# Description: Draws recursively a sun like figure\n\nimport math, turtle\n\ndef drawArcR (ttl, size, degrees):\n for iter in range (degrees):\n ttl.forward (size)\n ttl.right (1)\n\ndef drawArcL (ttl, size, degrees):\n for iter in range (degrees):\n ttl.forward (size)\n ttl.left (1)\n\ndef drawRay (ttl, size):\n for iter in range (2):\n drawArcR (ttl, size, 90)\n drawArcL (ttl, size, 90)\n\ndef drawSun (ttl, size, color):\n ttl.fillcolor (color)\n ttl.begin_fill ()\n for iter in range (9):\n drawRay (ttl, size)\n ttl.right (160)\n ttl.end_fill ()\n\ndef main():\n # put label on top of page\n turtle.title ('Sun Figure')\n\n # setup screen size\n turtle.setup (1000, 1000, 0, 0)\n\n # create a turtle object\n ttl = turtle.Turtle()\n\n # draw the sun figure\n drawSun (ttl, 1, 'red')\n\n # persist drawing\n turtle.done()\n\nmain()\n","sub_path":"turtle/sun.py","file_name":"sun.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"329722607","text":"from apps.databaseaccess.NBAStatisticsDBManager import NBAStatisticsDBManager\nfrom apps.databaseaccess.PlayerDAOImpl import PlayerDAOImpl\nfrom apps.data.NBASeasonEnum import NBASeasonEnum\n\nclass PlayerDAO(NBAStatisticsDBManager):\n\n def __init__(self, service_factory):\n super(PlayerDAO, self).__init__(service_factory)\n print('In PlayerDAO')\n\n def insert_players_game_statistics_for_season(self, season_player_statistics_array, scraping_history_request):\n connection = self.connection_manager.get_connection()\n cur = connection.cursor()\n\n PlayerDAOImpl.getInstance().insert_players_game_statistics_for_season(season_player_statistics_array,\n scraping_history_request,\n cur)\n\n connection.commit()\n self.connection_manager.disconnect(connection)\n\n def insert_players_game_statistics(self, date_of_games, players_game_statistics_obj, scraping_history_request):\n connection = self.connection_manager.get_connection()\n cur = connection.cursor()\n\n PlayerDAOImpl.getInstance().insert_players_game_statistics(date_of_games,\n players_game_statistics_obj,\n scraping_history_request,\n cur)\n\n connection.commit()\n self.connection_manager.disconnect(connection)\n\n def insert_players(self, players, scraping_history_request):\n connection = self.connection_manager.get_connection()\n cur = connection.cursor()\n\n PlayerDAOImpl.getInstance().insert_players(players, scraping_history_request, cur)\n\n connection.commit()\n self.connection_manager.disconnect(connection)\n\n def insert_nba_draft(self, draft_data, scraping_history_request):\n connection = self.connection_manager.get_connection()\n cur = connection.cursor()\n\n PlayerDAOImpl.getInstance().insert_nba_draft(draft_data, scraping_history_request, cur)\n\n connection.commit()\n self.connection_manager.disconnect(connection)\n\n def fetch_all_player_slug_ids(self):\n connection = self.connection_manager.get_connection()\n cur = connection.cursor()\n all_slug_ids = PlayerDAOImpl.getInstance().fetch_all_player_slug_ids(cur)\n self.connection_manager.disconnect(connection)\n return all_slug_ids\n","sub_path":"apps/databaseaccess/PlayerDAO.py","file_name":"PlayerDAO.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"545378751","text":"import xml.etree.ElementTree as ET\nimport os\n\n\nclass Grism(object):\n def __init__(self,element):\n attrib=element.attrib\n self.grism=attrib['grism']\n self.config=element.text\n self.blocking=attrib.get('blocking',None)\n \n \nclass Header(object):\n def __init__(self):\n self.data={}\n def from_xml(self,element):\n attrib=element.attrib\n self[attrib['keyword']]=(element.text,attrib['comment'])\n\n def __setitem__(self,k,v):\n # parse the input\n if isinstance(v,tuple):\n c=v[1]\n v=v[0]\n else:\n c=''\n\n # try to retype it\n try:\n v=int(v)\n except:\n try:\n v=float(v)\n except:\n pass\n\n # record the keyword,(value,comment) pair\n self.data[k]=(v,c)\n def __len__(self):\n return len(self.data)\n\n \n def items(self):\n return self.data.items()\n \n def __str__(self):\n out='Header Values:'\n if len(self)==0:\n out+='\\nis empty'\n else:\n for k,(v,c) in self.items():\n if isinstance(v,str):\n v=\"'\"+v+\"'\"\n out+='\\n{:8}= {} / {}'.format(k,v,c)\n return out\n \nclass Module(object):\n def __init__(self,module,path):\n self.name=module.attrib['name']\n\n self.path=os.path.join(path,self.name)\n \n # load some grisms\n self.grisms={}\n for g in module.findall('grism'):\n grism=Grism(g)\n self.grisms[(grism.grism,grism.blocking)]=grism\n\n # load the SIAF data\n self.siaf=module.findall('siaf')[0].attrib\n header=module.findall('header')\n\n # load some header keywords\n self.header=Header()\n for h in module.findall('header'):\n self.header.from_xml(h)\n\n # load the fits extensions\n exten=module.findall('extensions')[0].attrib\n self.exten={k:int(v) for k,v in exten.items()}\n\n\n def load_config(self,grism,blocking):\n k=(grism,blocking)\n if k in self.grisms:\n filename=os.path.join(self.path,grism,self.grisms[k].config)\n print(\"LOADING {}\".format(filename))\n conf=True\n\n else:\n conf=None\n \n return conf\n \n def __str__(self):\n return \"Grism module {}\".format(self.name)\n\n def __getitem__(self,k):\n return self.grisms[k]\n\n def __contains__(self,k):\n return k in self.grisms\n\n \nclass Detector(object):\n def __init__(self,telescope,instrument,detector):\n attrib=detector.attrib\n self.name=attrib['name']\n\n\n path=os.path.join(telescope.name,instrument.name,self.name)\n\n \n self.header=Header()\n self.header['TELESCOP']=(telescope.name,'telescope used to acquire data')\n self.header['INSTRUME']=(instrument.name,'identifier for instrument used to acquire data')\n for h in detector.findall('header'):\n self.header.from_xml(h)\n\n self.modules=[Module(el,path) for el in detector.findall('module')]\n \n def __str__(self):\n return \"Detector configuration for {}\".format(self.name)\n\n def __iter__(self):\n yield from self.modules\n\n \n \nclass Instrument(object):\n def __init__(self,module):\n self.name=module.attrib['name']\n\nclass Telescope(object):\n def __init__(self,module):\n self.name=module.attrib['name']\n\n\n \nclass Config(dict):\n def __init__(self):\n tree=ET.parse('instruments.xml')\n conf=tree.getroot()\n\n for telescope in conf:\n t=Telescope(telescope)\n for instrument in telescope:\n i=Instrument(instrument)\n for detector in instrument:\n d=Detector(t,i,detector)\n self[(t.name,i.name,d.name)]=d\n #i[d.name]=d\n #t[i.name]=i\n #self[t.name]=t\n \n def get(self,telescope,instrument,detector):\n try:\n modules=self[(telescope,instrument,detector)]\n except:\n raise KeyError(\"Invalid ({},{},{}) tuple.\".format(telescope,instrument,detector))\n\n return modules\n\n\nif __name__=='__main__':\n\n\n\n tree=ET.parse('instruments.xml')\n conf=tree.getroot()\n\n\n t=[tel.attrib['name'] for tel in conf]\n tel=conf[t.index('HST')]\n\n i=[ins.attrib['name'] for ins in tel]\n ins=tel[i.index('WFC3')]\n\n d=[det.attrib['name'] for det in ins]\n det=Detector(Telescope(tel),Instrument(ins),ins[d.index('IR')])\n\n\n\n\n x=Config()\n\n detector=x.get('HST','WFC3','IR')\n\n\n\n print(det.name,detector.name)\n print(type(detector),type(det))\n\n kjf\n\n\n print(\"STUFF WITH PHDU\") \n for m in detector:\n print(\"PROCESS EACH DETECTOR\")\n conf=m.load_config('g102',None)\n\n detector=x.get('JWST','NIRCAM','LONG')\n\n print(\"STUFF WITH PHDU\") \n for m in detector:\n print(\"PROCESS EACH DETECTOR\")\n conf=m.load_config('column','F480M')\n\n\n #detector=x.get('HST','WFC3','IR','g102')\n #for m in detector:\n # print(m.grisms)\n#\n","sub_path":"pylinear/grism/OLD/instruments_old.py","file_name":"instruments_old.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"364091125","text":"from datetime import datetime\n\nimport scrapy\nfrom choose_stocks.items import IndustryPerformance\n\nclass industry_performance(scrapy.Spider):\n\tname = \"industry_performance\"\n\tallowed_domains = [\"finviz.com\"]\n\tstart_urls = [\"http://www.finviz.com/groups.ashx?g=industry&v=140&o=name\"]\n\n\tdef parse(self, response):\n\t\treturn self.industry_performance(response)\n\n\tdef industry_performance(self, response):\n\t\tsectors = []\n\t\tnow = datetime.now()\n\t\tfor row in response.xpath('//tr[@class=\"table-dark-row-cp\"] | //tr[@class=\"table-light-row-cp\"]'):\n\t\t\tdata = IndustryPerformance()\n\t\t\tcols = row.xpath('td//text()').extract()\n\t\t\tdata['date'] = now.strftime('%Y-%m-%d')\n\t\t\tdata['industry'] = cols[1]\n\t\t\tdata['perf_week'] = cols[2]\n\t\t\tdata['perf_month'] =cols[3]\n\t\t\tdata['perf_quarter'] =cols[4]\n\t\t\tdata['perf_y'] =cols[5]\n\t\t\tdata['perf_ytd'] =cols[6]\n\t\t\tsectors.append(data)\n\t\treturn sectors","sub_path":"choose_stocks/spiders/industry_performance.py","file_name":"industry_performance.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"132102533","text":"import io\nimport json\nimport os\nimport neural_network as nn\nfrom time import time\n\n\ndef process(data, index):\n result = {'success' : True}\n if data['action'] == 'create':\n create_index(data, index)\n elif data['action'] == 'add' or data['action'] == 'update':\n globals()['index'][data['key']] = data['value']\n elif data['action'] == 'delete':\n globals()['index'].pop(data['key'])\n else:\n result['results'] = get(index['data'], data['key'])\n printjson(result, os.path.curdir + '/json/out.index.json')\n\n\ndef get(network, input_vector):\n return nn.feed_forward(network, input_vector)[-1]\n\n\ndef create_index(data, index):\n neural_network = data['data']\n index['data'] = nn.train(neural_network, data['tf'], data['idf'])\n printjson(index, index['current'])\n\n\ndef printjson(data, output):\n data['time'] = time()\n text = json.dumps(data,\n indent=4, sort_keys=True,\n separators=(',', ': '), ensure_ascii=False)\n with io.open(output, 'w', encoding='utf8') as outfile:\n outfile.write(text)\n\nif __name__ == '__main__':\n index = {}\n try:\n with io.open(os.path.curdir + '/index/index.json', 'r', encoding='utf8') as model:\n index = json.load(model)\n except:\n pass\n index['current'] = os.path.curdir + '/index/index.json'\n try:\n data = {}\n with io.open(os.path.curdir + '/json/in.index.json', 'r', encoding='utf8') as data_file:\n data = json.load(data_file)\n process(data, index)\n except Exception as e:\n print(e)\n pass","sub_path":"index/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"560824782","text":"import sys\nimport tweepy\nimport time\nfrom keys import key\n\nAPI_KEY = key['api_key']\nAPI_SECRET = key['api_secret']\nACCESS_TOKEN = key['access_token']\nACCESS_TOKEN_SECRET = key['access_token_secret']\ntarget_ac = \"WeAreNetflix\"\n\ntry:\n auth = tweepy.OAuthHandler(API_KEY,API_SECRET)\n auth.set_access_token(ACCESS_TOKEN,ACCESS_TOKEN_SECRET)\n\n api = tweepy.API(auth,wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)\n\n follower_ids = []\n for page in tweepy.Cursor(api.followers_ids, screen_name=target_ac).pages():\n follower_ids.extend(page)\n #print (follower_ids, len(follower_ids))\n with open('follower_list.txt','w') as file1:\n for id in follower_ids:\n file1.write(\"%s\\n\" % id)\n\n followee_ids = []\n for page in tweepy.Cursor(api.friends_ids, screen_name=target_ac).pages():\n followee_ids.extend(page)\n #print(followee_ids, len(followee_ids))\n with open('followee_list.txt','w') as file2:\n for id in followee_ids:\n file2.write(\"%s\\n\" % id)\n\nexcept tweepy.TweepError:\n print (\"tweepy.TweepError=\", tweepy.TweepError)\nexcept:\n e = sys.exc_info()[0]\n print (\"Error: %s\" % e)\n\n","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"271746067","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport pathlib\n\nfrom metadata.config.common import ConfigModel\nfrom metadata.ingestion.api.common import Record, WorkflowContext\nfrom metadata.ingestion.api.sink import Sink, SinkStatus\nfrom metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig\n\nlogger = logging.getLogger(__name__)\n\n\nclass FileSinkConfig(ConfigModel):\n filename: str\n\n\nclass FileSink(Sink):\n config: FileSinkConfig\n report: SinkStatus\n\n def __init__(\n self,\n ctx: WorkflowContext,\n config: FileSinkConfig,\n metadata_config: MetadataServerConfig,\n ):\n super().__init__(ctx)\n self.config = config\n self.metadata_config = metadata_config\n self.report = SinkStatus()\n\n fpath = pathlib.Path(self.config.filename)\n self.file = fpath.open(\"w\")\n self.file.write(\"[\\n\")\n self.wrote_something = False\n\n @classmethod\n def create(\n cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext\n ):\n config = FileSinkConfig.parse_obj(config_dict)\n metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)\n return cls(ctx, config, metadata_config)\n\n def write_record(self, record: Record) -> None:\n\n if self.wrote_something:\n self.file.write(\",\\n\")\n\n self.file.write(record.json())\n self.wrote_something = True\n self.report.records_written(record)\n\n def get_status(self):\n return self.report\n\n def close(self):\n self.file.write(\"\\n]\")\n self.file.close()\n","sub_path":"ingestion/src/metadata/ingestion/sink/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"290416095","text":"#\n# @lc app=leetcode.cn id=15 lang=python3\n#\n# [15] 三数之和\n#\n\n# @lc code=start\nfrom typing import List\nclass Solution:\n def __init__(self):\n self.res = []\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n \n if (not nums or len(nums)<3): return []\n nums.sort()\n print(nums)\n for i,v in enumerate(nums):\n if (v > 0):\n return self.res\n if (i>0 and v ==nums[i-1]):\n continue\n L=i+1\n R=len(nums)-1\n while(L0):\n R=R-1\n else:\n L=L+1\n return self.res\n\n# @lc code=end\nif __name__ == \"__main__\":\n test = Solution()\n print(test.threeSum([-1, 0, 1, 2, -1, -4]))","sub_path":"15.三数之和.py","file_name":"15.三数之和.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"428394080","text":"#!/usr/bin/env python\n\"\"\" lib/cloudtrax.py\n\n CloudTrax class for CloudScraper\n\n Copyright (c) 2013 The Goulburn Group. All Rights Reserved.\n\n http://www.goulburngroup.com.au\n\n Written by Alex Ferrara \n\n\"\"\"\n\nfrom BeautifulSoup import BeautifulSoup\nfrom lib.node import Node\nfrom lib.user import User\nimport cStringIO\nimport logging\nimport requests\nimport texttable\nimport pygal\nimport Image\n\n\n#\n# Helper functions\n#\n\ndef draw_table(entity_type, entities):\n \"\"\"Draws a text table representation of the data supplied\"\"\"\n\n header = {'gateway': ['Name\\n(mac)',\n 'Users',\n 'DL MB\\n(UL MB)',\n 'GWDL MB\\n(GWUL MB)',\n 'Up\\n(Down)',\n 'IP Address\\n(Firmware)'],\n 'relay': ['Name\\n(mac)',\n 'Users',\n 'DL MB\\n(UL MB)',\n 'Gateway\\n(Firmware)',\n 'Up\\n(Down)',\n 'Latency\\n(Hops)'],\n 'spare': ['Name\\n(mac)',\n 'Users',\n 'DL MB\\n(UL MB)',\n 'Up\\n(Down)',\n 'IP Address\\n(Firmware)']}\n\n table = texttable.Texttable()\n table.header(header[entity_type])\n\n for entity in entities:\n if entities[entity].get_type() == entity_type:\n table.add_row(entities[entity].get_table_row())\n\n return table.draw()\n\n\ndef distill_html(content, element, identifier):\n \"\"\"Accept some HTML and return the filtered output\"\"\"\n distilled_text = []\n\n trimed_content = BeautifulSoup(content).find(element, identifier)\n\n if element == 'table':\n\n try:\n for row in trimed_content.findAll('tr'):\n raw_values = []\n\n for cell in row.findAll('td'):\n raw_values.append(cell.findAll(text=True))\n\n # Watch out for blank rows\n if len(raw_values) > 0:\n # Create a new node object for each node in the network\n distilled_text.append(raw_values)\n\n except AttributeError:\n pass\n\n if element == 'select':\n\n try:\n for row in trimed_content.findAll('option', text=True):\n if len(row) > 0:\n distilled_text.append(row)\n\n except AttributeError:\n pass\n\n return distilled_text\n\n\ndef percentage(value, max_value):\n \"\"\"Returns a float representing the percentage that\n value is of max_value\"\"\"\n\n return (float(value) * 100) / max_value\n\n\nclass CloudTrax:\n \"\"\"CloudTrax connector class\"\"\"\n\n def __init__(self, config):\n \"\"\"Constructor\"\"\"\n self.nodes = dict()\n self.users = dict()\n self.usage = [0, 0]\n self.alerting = []\n\n self.session = requests.session()\n\n logging.info('Verbose output is turned on')\n\n self.config = config\n self.url = self.config.get_url()\n self.network = self.config.get_network()\n\n self.login()\n\n self.collect_nodes()\n self.collect_users()\n\n\n def login(self):\n \"\"\"Method to login and create a web session\"\"\"\n\n logging.info('Logging in to CloudTrax Dashboard')\n\n parameters = {'login': self.network['username'],\n 'login-pw': self.network['password'],\n 'status': 'View Status'}\n\n try:\n request = self.session.post(self.url['login'], data=parameters)\n request.raise_for_status()\n\n except requests.exceptions.HTTPError:\n logging.error('There was a HTTP error')\n exit(1)\n except requests.exceptions.ConnectionError:\n logging.error('There was a connection error')\n exit(1)\n\n # If the login referes to a master network with recursion,\n # we need to iterate through them to get our stats\n if self.network['recurse']:\n networks = distill_html(request.content, \n 'select',\n {'name': 'networks'})\n\n for network in networks:\n\n network = str(network).split(' ', 1)[0]\n\n if network not in self.network['networks']:\n self.network['networks'].append(network)\n\n return self.session\n\n def get_alerting(self):\n \"\"\"Return a list of alerting nodes\"\"\"\n return self.alerting\n\n def get_checkin_data(self, node_mac):\n \"\"\"Scrape checkin information on the current node\"\"\"\n\n parameters = {'mac': node_mac,\n 'legend': '0'}\n\n logging.info('Requesting node checkin status for %s', node_mac)\n\n request = self.session.get(self.url['checkin'], params=parameters)\n\n colour_counter = {'cccccc': 0, '1faa5f': 0, '4fdd8f': 0}\n\n checkin_img = Image.open(cStringIO.StringIO(request.content))\n\n row = 1\n\n pixelmap = checkin_img.load()\n\n for col in range(0, checkin_img.size[0]):\n pixel_colour = str(\"%x%x%x\" % (pixelmap[col, row][0],\n pixelmap[col, row][1],\n pixelmap[col, row][2]))\n\n if pixel_colour in colour_counter.keys():\n colour_counter[pixel_colour] += 1\n else:\n colour_counter[pixel_colour] = 1\n\n # Convert number of pixels into a percent\n time_as_gw = percentage(colour_counter['1faa5f'],\n checkin_img.size[0] - 2)\n time_as_relay = percentage(colour_counter['4fdd8f'],\n checkin_img.size[0] - 2)\n time_offline = percentage(colour_counter['cccccc'],\n checkin_img.size[0] - 2)\n time_online = time_as_gw + time_as_relay\n\n return (time_as_gw, time_as_relay, time_offline, time_online)\n\n def get_session(self):\n \"\"\"Return session id\"\"\"\n return self.session\n\n def get_sub_networks(self):\n \"\"\"Return a list of networks associated with this login\"\"\"\n return self.sub_networks\n\n def get_nodes(self):\n \"\"\"Return a list of node objects\"\"\"\n return self.nodes\n\n def get_users(self):\n \"\"\"Return a list of user objects\"\"\"\n return self.users\n\n def get_usage(self):\n \"\"\"Return network usage\"\"\"\n return self.usage\n\n def collect_nodes(self):\n \"\"\"Return network information scraped from CloudTrax\"\"\"\n\n for network in self.network['networks']:\n parameters = {'id': network,\n 'showall': '1',\n 'details': '1'}\n \n logging.info('Requesting network status') \n\n request = self.session.get(self.url['data'], params=parameters)\n\n logging.info('Received network status ok') \n\n if request.status_code == 200:\n for raw_values in distill_html(request.content, 'table',\n {'id': 'mytable'}):\n\n node = Node(raw_values,\n self.get_checkin_data(raw_values[2][0]),\n network)\n\n if node.is_alerting():\n logging.info('%s is alerting' % (node))\n self.alerting.append(node)\n\n self.nodes[node.get_mac()] = node\n\n else:\n logging.error('Request failed') \n exit(request.status_code)\n\n return self.nodes\n\n def collect_users(self):\n \"\"\"Return a list of wifi user statistics scraped from CloudTrax\"\"\"\n\n for network in self.network['networks']:\n parameters = {'id': network}\n \n logging.info('Requesting user statistics') \n\n request = self.session.get(self.url['user'], params=parameters)\n\n logging.info('Received user statistics ok') \n\n\n if request.status_code == 200:\n for raw_values in distill_html(request.content, 'table',\n {'class': 'inline sortable'}):\n\n user = User(raw_values)\n usage_dl = user.get_dl()\n usage_ul = user.get_ul()\n user_mac = user.get_mac()\n node_mac = user.get_node_mac()\n\n if user_mac in self.users.keys():\n self.users[user_mac].add_usage(usage_dl, usage_ul)\n else:\n self.users[user_mac] = user\n\n gateway = self.nodes[node_mac].add_usage(usage_dl, \n usage_ul)\n\n if gateway != 'self' and gateway != 'not reported':\n self.nodes[node_mac].add_gw_usage(usage_dl, usage_ul)\n\n self.usage[0] += usage_dl\n self.usage[1] += usage_ul\n\n else:\n logging.error('Request failed') \n exit(request.status_code)\n\n return self.users\n\n def graph(self, graph_type, title, arg, img_format='svg'):\n \"\"\"Return a rendered graph\"\"\"\n \n if graph_type == 'node':\n graph = self.graph_node_usage(arg)\n elif graph_type == 'user':\n graph = self.graph_user_usage()\n else:\n logging.error('Unknown graph type')\n exit(1)\n\n graph.title = title\n\n if img_format == 'png':\n return graph.render_to_png()\n\n return graph.render()\n\n def graph_node_usage(self, gw_only=False):\n \"\"\"Return a node graph\"\"\"\n\n graph_object = pygal.Pie()\n\n for node in self.nodes:\n if gw_only:\n if self.nodes[node].is_gateway():\n graph_object.add(self.nodes[node].get_name(), self.nodes[node].get_gw_usage())\n else:\n graph_object.add(self.nodes[node].get_name(), self.nodes[node].get_usage())\n\n return graph_object\n\n def graph_user_usage(self, gw_only=False):\n \"\"\"Return a user graph\"\"\"\n\n graph_object = pygal.XY(stroke=False)\n\n for user in self.users:\n graph_object.add(user, [(self.users[user].get_dl(),\n self.users[user].get_ul())])\n\n return graph_object\n\n def report_summary(self):\n \"\"\"Return a string containing a pretty summary report\"\"\"\n report = 'Summary statistics for the last 24 hours\\n'\n report += '----------------------------------------\\n\\n'\n if len(self.alerting) > 0:\n report += \"*** Warning - %s nodes are alerting ***\\n\\n\" % (len(self.alerting))\n\n report += \"Total users: %d\\n\" % len(self.users)\n\n report += \"Total downloads (MB): %.2f\\n\" % (float(self.usage[0]) / 1000)\n report += \"Total uploads (MB): %.2f\\n\" % (float(self.usage[1]) / 1000)\n report += '\\n\\n'\n\n return report\n\n def report_nodes(self):\n \"\"\"Return a string containing a pretty nodes report\"\"\"\n report = 'Node statistics for the last 24 hours\\n'\n report += '-------------------------------------\\n\\n'\n\n report += 'Gateway nodes\\n'\n report += draw_table('gateway', self.nodes)\n report += '\\n\\n'\n report += 'Relay nodes\\n'\n report += draw_table('relay', self.nodes)\n report += '\\n\\n'\n report += 'Spare nodes\\n'\n report += draw_table('spare', self.nodes)\n report += '\\n\\n'\n\n return report\n\n def report_users(self):\n \"\"\"Return a string containing a pretty user report\"\"\"\n report = 'User statistics for the last 24 hours\\n'\n report += '-------------------------------------\\n\\n'\n report += 'Users\\n'\n\n table = texttable.Texttable()\n table.header(['Name\\n(mac)',\n 'Last seen on',\n 'Blocked',\n 'DL MB',\n 'UL MB'])\n\n self.get_users()\n\n for user in self.users:\n table.add_row(self.users[user].get_table_row())\n\n report += table.draw()\n report += '\\n\\n'\n\n return report\n","sub_path":"lib/cloudtrax.py","file_name":"cloudtrax.py","file_ext":"py","file_size_in_byte":12290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"603853646","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom .forms import DepositForm, WithdrawalForm\nfrom .models import Withdrawal, Deposit\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages, auth\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\n\n\n# create a user\ndef createUser(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Account created succesfully')\n return redirect('login')\n else:\n form = UserCreationForm()\n return render(request, 'app/index.html', {'form': form})\n\n\n# log in to your account\ndef logIn(request):\n if request.user.is_authenticated:\n return redirect('user_page')\n\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n\n if user is not None:\n # correct username and password login the user\n auth.login(request, user)\n return redirect('user_page')\n else:\n messages.error(request, 'Error wrong username/password')\n\n return render(request, 'app/login.html')\n\n\n# log out of your account\ndef logout(request):\n auth.logout(request)\n return redirect('logout')\n\n\n# direct to admin page\ndef user_page(request):\n if not request.user.is_authenticated:\n return redirect('login')\n else:\n return render(request, 'app/user_page.html')\n\n\n@login_required()\ndef diposit_view(request):\n if not request.user.is_authenticated:\n raise get_object_or_404\n else:\n title = \"Deposit\"\n form = DepositForm(request.POST or None)\n\n if form.is_valid():\n deposit = form.save(commit=False)\n deposit.user = request.user\n # adds users deposit to balance.\n deposit.user.balance += deposit.amount\n deposit.user.save()\n deposit.save()\n messages.success(request, 'You Have Deposited {} $.'\n .format(deposit.amount))\n return redirect(\"user_page\")\n\n context = {\n \"title\": title,\n \"form\": form\n }\n return render(request, \"app/user_page.html\", context)\n\n\n@login_required()\ndef withdrawal_view(request):\n if not request.user.is_authenticated:\n raise get_object_or_404\n else:\n title = \"Withdraw\"\n form = WithdrawalForm(request.POST or None)\n\n if form.is_valid():\n withdrawal = form.save(commit=False)\n withdrawal.user = request.user\n\n # checks if user is tring Withdraw more than his balance.\n if withdrawal.user.balance >= withdrawal.amount:\n # substracts the users withdrawal from balance\n withdrawal.user.balance -= withdrawal.amount\n withdrawal.user.save()\n withdrawal.save()\n messages.error(request, 'You Have Withdrawn {} $.'\n .format(withdrawal.amount))\n return redirect(\"user_page\")\n\n else:\n messages.error(\n request,\n 'You Can Not Withdraw More Than Your Balance.'\n )\n\n context = {\n \"title\": title,\n \"form\": form\n }\n return render(request, \"app/user_page.html\", context)\n\n","sub_path":"Project-3/project/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"586762040","text":"# Text class is used to create text element or object\n# \n# You can visit my repository at\n# https://github.com/nutchun/PyCalculator\n# \n# This project is a part of Software Development Practice 1 course\n#\n# Developed by Nuttakan Chuntra\n# Computer Engineering student at KMUTNB\n# Student ID: 5901012630032\n# Bangkok, Thailand\n# Email: nut.ch40@gmail.com\n\nimport pygame\n\nclass Text:\n \"\"\"Draw text element\"\"\"\n\n def __init__(self, surface, text, fontName=\"Consolas\", fontSize=25, fontColor=(0, 0, 0), align=\"left\", pos=(0, 0), textArea=-1):\n self.surface = surface\n self.text = text\n self.fontName = fontName\n self.fontSize = fontSize\n self.fontColor = fontColor\n self.align = align\n self.pos = pos\n self.textArea = textArea\n self.bold = 0\n self.italic = 0\n self.labelSurf = None\n self.labelRect = None\n\n def __len__(self):\n \"\"\"Return the length of text\"\"\"\n return len(self.text)\n \n def setFontSize(self, size):\n self.fontSize = size\n\n def getRect(self):\n return self.labelRect\n \n def textDecor(self, bold=0, italic=0):\n self.bold = bold\n self.italic = italic\n\n def draw(self):\n self.label = pygame.font.SysFont(self.fontName, self.fontSize, self.bold, self.italic)\n self.labelSurf = self.label.render(self.text, True, self.fontColor)\n self.labelRect = self.labelSurf.get_rect()\n \n height = self.labelRect.h\n if self.align == \"left\":\n self.labelRect.top = self.pos[1] - height / 2\n self.labelRect.left = self.pos[0]\n elif self.align == \"center\":\n self.labelRect.center = (self.pos[0], self.pos[1])\n elif self.align == \"right\":\n self.labelRect.top = self.pos[1] - height / 2\n self.labelRect.right = self.pos[0]\n if self.textArea != -1:\n self.labelRect = self.textArea\n self.surface.blit(self.labelSurf, self.labelRect)\n","sub_path":"Text.py","file_name":"Text.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"510647722","text":"import sys\nsys.path.insert(0, '.')\n\nfrom runners.experiment_utils import load_experiment, save_experiment\nfrom cookiecutter_repo import dataset, test, model\nfrom cookiecutter_repo.utils import loaders\nimport logging\nfrom runners.utils import build_parser_for_yml_script, load_yaml\nfrom argparse import ArgumentParser\nimport os\n\ndef main(path_to_yml_file):\n config, exp, path_to_yml_file = load_experiment(path_to_yml_file)\n\n if 'test' not in config['datasets']:\n logging.error('Test dataset must be specified!')\n \n test_classes = config['test_config']['testers']\n testers = []\n for key in test_classes:\n TestClass = getattr(test, key)\n args = test_classes[key]\n testers.append((TestClass, args))\n\n _datasets = {}\n\n for key in ['test']:\n if key in config['datasets']:\n _datasets[key] = loaders.load_dataset(\n config['datasets'][key]['class'],\n config['datasets'][key]['folder'],\n config['dataset_config']\n )\n else:\n _datasets[key] = None\n\n _tester = test.EvaluationRunner(\n testers,\n config['algorithm_config'],\n _datasets['test'],\n config['info']['output_folder'],\n max_workers=config['test_config']['num_workers'],\n use_blocking_executor=config['test_config']['use_blocking_executor']\n )\n _tester.run()\n\nif __name__ == '__main__':\n parser = build_parser_for_yml_script()\n args = vars(parser.parse_args())\n main(args['spec'])","sub_path":"{{cookiecutter.repo_name}}/scripts/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"216638143","text":"from puzzlesolver.solver.Solver import Solver\nfrom puzzlesolver.util import *\nfrom puzzlesolver.puzzles.Hanoi import Hanoi\nfrom puzzlesolver.PuzzlePlayer import PuzzlePlayer\nimport queue as q\n\nclass GeneralSolver(Solver):\n def __init__(self, *args, **kwargs):\n self.values = {}\n self.remoteness = {}\n\n def getRemoteness(self, puzzle):\n self.solve(puzzle)\n if hash(puzzle) in self.remoteness: return self.remoteness[hash(puzzle)]\n return PuzzleValue.UNSOLVABLE\n\n def solve(self, puzzle):\n if hash(puzzle) in self.values: self.values[hash(puzzle)] \n def helper(self, puzzle):\n queue = q.Queue()\n queue.put(puzzle)\n while not queue.empty():\n puzzle = queue.get()\n for move in puzzle.generateMoves():\n nextPuzzle = puzzle.doMove(move)\n if hash(nextPuzzle) not in self.remoteness:\n self.values[hash(nextPuzzle)] = PuzzleValue.SOLVABLE\n self.remoteness[hash(nextPuzzle)] = self.remoteness[hash(puzzle)] + 1\n queue.put(nextPuzzle)\n\n ends = puzzle.generateSolutions()\n for end in ends: \n self.values[hash(end)] = PuzzleValue.SOLVABLE\n self.remoteness[hash(end)] = 0\n helper(self, end)\n if hash(puzzle) not in self.values: self.values[hash(puzzle)] = PuzzleValue.UNSOLVABLE\n return self.values[hash(puzzle)]\n\ndef testTutorial():\n puzzle = Hanoi()\n solver = GeneralSolver()\n solver.solve(puzzle)\n assert solver.getRemoteness(puzzle) == 7","sub_path":"tests/testSolverTutorial.py","file_name":"testSolverTutorial.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"139009614","text":"import pytest\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport torchvision\nfrom recogners.models import dropout_rbm\n\n\ndef test_rbm_properties():\n new_dropout_rbm = dropout_rbm.DropoutRBM()\n\n assert new_dropout_rbm.p == 0.5\n\n\ndef test_rbm_hidden_sampling():\n new_dropout_rbm = dropout_rbm.DropoutRBM()\n\n v = torch.ones(1, 128)\n\n probs, states = new_dropout_rbm.hidden_sampling(v)\n\n assert probs.size(1) == 128\n assert states.size(1) == 128\n\n\ndef test_rbm_reconstruct():\n test = torchvision.datasets.MNIST(\n root='./data', train=False, download=True, transform=torchvision.transforms.ToTensor())\n\n test_batches = DataLoader(test, batch_size=10000,\n shuffle=True, num_workers=1)\n\n new_dropout_rbm = dropout_rbm.DropoutRBM(n_visible=784, n_hidden=128, steps=1,\n learning_rate=0.1, momentum=0, decay=0, temperature=1, dropout=0.5)\n\n e, v = new_dropout_rbm.reconstruct(test_batches)\n\n assert e >= 0\n assert v.size(1) == 784\n","sub_path":"tests/recogners/models/test_dropout_rbm.py","file_name":"test_dropout_rbm.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"270034401","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef lineFromPoints(point1, point2):\n k = (point2[1] - point1[1]) / (point2[0] - point1[0])\n b = - point1[0] * k + point1[1]\n return [k, b]\n\n\ndef distanceToLine(xPoint, xLine):\n temp1 = xLine[0] * xPoint[0] - xPoint[1] + xLine[1]\n temp2 = math.sqrt(xLine[0] * xLine[0] + 1)\n return temp1 / temp2\n\n\n# 输出列表中在直线上方最远的点的下标\n# 如果所有的点都在直线的下方,则输出-1\ndef getMaxPoint(xPointList, xline):\n tempDistance = []\n tempPointList = []\n for iPoint in xPointList:\n tempDis = distanceToLine(iPoint, xline)\n if tempDis < 0:\n tempDistance.append(tempDis)\n tempPointList.append(iPoint)\n if tempDistance:\n maxIndex = tempDistance.index(min(tempDistance))\n tempIndex = xPointList.index(tempPointList[maxIndex])\n return tempPointList[maxIndex], tempIndex\n else:\n return -1, -1\n\n\ndef listSplit(xList, xIndex):\n return xList[0:xIndex], xList[xIndex+1:]\n\n\ndef testWork(xStartPoint, xEndPoint, xPointList):\n beginLine = lineFromPoints(xStartPoint, xEndPoint)\n maxPoint, maxIndex = getMaxPoint(xPointList, beginLine)\n if maxIndex is not -1:\n subList1, subList2 = listSplit(xPointList, maxIndex)\n print(maxPoint)\n if len(subList1) > 0:\n subStartPoint = xStartPoint\n subEndPoint = xPointList[maxIndex]\n testWork(subStartPoint, subEndPoint, subList1)\n if len(subList2) > 0:\n subStartPoint = xPointList[maxIndex]\n subEndPoint = xEndPoint\n testWork(subStartPoint, subEndPoint, subList2)\n\n\n# 定义起始点和终止点\nstartPoint = (-1, np.random.uniform(0, 1))\nendPoint = (20, np.random.uniform(0, 1))\n\n# 定义两点之间的随机边缘\nx = list(range(20))\ny = list(np.random.rand(20))\nplt.scatter(*startPoint, color='black')\nplt.scatter(*endPoint, color='black')\nplt.scatter(x, y, c='r', marker='*')\nplt.plot(x, y, 'b--')\nplt.show()\nallPoint = list(zip(x, y))\nprint('***********************************************')\ntestWork(startPoint, endPoint, allPoint)\n","sub_path":"test/testPointLine.py","file_name":"testPointLine.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"590704269","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 19 17:14:29 2018\n\n@author: GTayl\n\"\"\"\n################################## Set-up ##########################################\n\n# Import the required packages\nimport pandas as pd\nimport time\nimport os\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\n\n# Change the working directory\nos.chdir(\"C:\\\\Users\\\\GTayl\\\\Desktop\\\\Finance Modeling\\\\Wikipedia\")\ncwd = os.getcwd()\nfrom Wikipedia_Page_Features import Get_Wiki_Page_Data \n\n################################## Data-Prep ##########################################\n\n# Read in Seed List\nseed_file = \"Machine_Learning_Seed_List.xlsx\"\nseed_import = pd.read_excel(cwd+\"\\\\Seeds\\\\\"+seed_file,names=['Page','Tag'],header=None)\n\n# Dedoop Seeds and Define Tags\n\n# Obtain deduped seed list\nseed_list = pd.DataFrame(seed_import['Page']).drop_duplicates()\n\ndef seed_tag_collector(seed_import, seed):\n \n # Subset Dataframe for Seed Entry only\n temp = seed_import[seed_import['Page']==seed]\n \n # Get a list of all the tags that apply to that seed and convert to a Kumu compliant text string\n tag_list = list(temp['Tag'])\n tag_string = \"\"\n for tag in tag_list:\n tag_string = tag_string+str(tag)+\"|\"\n tag_string = tag_string[:-1]\n return(tag_string)\n\n# Generate Seed List with Coresponding Tags\nseed_list['Tags'] = \"\"\nseed_list['Tags'] = seed_list.apply(lambda row: seed_tag_collector(seed_import, row['Page']),axis=1)\n\n# test = seed_list.head(100)\n# seed_list = test\n\n################################## Wikipedia API Call ##########################################\n\n# Initalize Master Lists\nMaster_Node_List = pd.DataFrame(columns=['Label','Tags','Description','Average_pg_views'])\nMaster_Direct_Edge_List = pd.DataFrame(columns=['To','From','Strength','Tag'])\nMaster_Implied_Edge_List = pd.DataFrame(columns=['To','From','Strength','Tag'])\n\n# API Call for Seed Set\nfor index, row in seed_list.iterrows():\n print(\"Collecting data for: \"+str(row['Page']))\n page = Get_Wiki_Page_Data(str(row['Page']))\n\n # Append node features\n Master_Node_List = Master_Node_List.append({'Label':page['title'], 'Tags':row['Tags'],'Description':page['description'], 'Average_pg_views':page['avg_page_views']}, ignore_index=True)\n \n # Append edge features\n for e in page['explicit_links']:\n Master_Direct_Edge_List = Master_Direct_Edge_List.append({\"To\":str(e), \"From\":page['title'], \"Strength\":1, \"Tag\":\"Direct\"}, ignore_index=True)\n \n for e in page['implied_links']:\n Master_Implied_Edge_List = Master_Implied_Edge_List.append({\"To\":str(e), \"From\":page['title'], \"Strength\":1, \"Tag\":\"Implied\"}, ignore_index=True)\n \n time.sleep(1.5)\n \n# Cleaning Direct Edge List\n# Cleaned_Edges = Master_Edge_List[Master_Edge_List['Tag']=='Direct']\n\n################################## Wikipedia API Call - Secondary Links ##########################################\n \nCleaned_Edges = pd.DataFrame(Master_Direct_Edge_List['To']).drop_duplicates()\n#Cleaned_Edges = Cleaned_Edges.rename(index=str, columns={'To':'Label'})\n#s1 = pd.merge(Master_Node_List, Cleaned_Edges, how='outer', on=['Label'])\n\n# Unique Nodes that were not in the original seed set\nUnique_New_Nodes = list(set(Cleaned_Edges.To).difference(Master_Node_List.Label))\n\nSecondary_Node_List = pd.DataFrame(columns=['Label','Tags','Description','Average_pg_views'])\nSecondary_Direct_Edge_List = pd.DataFrame(columns=['To','From','Strength','Tag'])\nSecondary_Implied_Edge_List = pd.DataFrame(columns=['To','From','Strength','Tag'])\n\n# API Call for Secondary Set\nfor Page in Unique_New_Nodes:\n print(\"Collecting data for: \"+str(Page))\n page = Get_Wiki_Page_Data(str(Page))\n\n # Append node features\n Secondary_Node_List = Secondary_Node_List.append({'Label':page['title'], 'Tags':\"\",'Description':page['description'], 'Average_pg_views':page['avg_page_views']}, ignore_index=True)\n\n # Append edge features\n for e in page['explicit_links']:\n Secondary_Direct_Edge_List = Secondary_Direct_Edge_List.append({\"To\":str(e), \"From\":page['title'], \"Strength\":1, \"Tag\":\"Direct\"}, ignore_index=True)\n \n #for e in page['implied_links']:\n #Secondary_Implied_Edge_List = Secondary_Implied_Edge_List.append({\"To\":str(e), \"From\":page['title'], \"Strength\":1, \"Tag\":\"Implied\"}, ignore_index=True)\n \n time.sleep(0.5)\n\n################################## Exporing Data ##########################################\n \n# Export Edges and Nodes Lists\nMaster_Node_List.to_excel(cwd+\"\\\\Seeds\\\\\"+'ML_Node_List_Master_Final.xlsx',index=False)\nMaster_Direct_Edge_List.to_excel(cwd+\"\\\\Seeds\\\\\"+'ML_Direct_Edge_List_Master_Final.xlsx',index=False) \nSecondary_Node_List.to_excel(cwd+\"\\\\Seeds\\\\\"+'ML_Node_List_Secondary_Final.xlsx',index=False)\nSecondary_Direct_Edge_List.to_excel(cwd+\"\\\\Seeds\\\\\"+'ML_Direct_Edge_List_Secondary_Final.xlsx',index=False)\n\n################################## Exploring Data ##########################################\n\n# EDA of Edges\n# Master Edge Counts\nMaster_Edge_Counts = pd.DataFrame(Master_Direct_Edge_List['To'])\nMaster_Edge_Counts = Master_Edge_Counts['To'].value_counts()\n\n# Secondary Edge Counts (only for nodes in the Master or Secondary Lists)\nComplete_Node_List = Unique_New_Nodes + list(Master_Node_List.Label)\nSubset_Edges_List = Secondary_Direct_Edge_List[Secondary_Direct_Edge_List['To'].isin(Complete_Node_List)]\n\n\nJoint_Edge_Counts = pd.DataFrame(Master_Direct_Edge_List['To'])\nJoint_Edge_Counts = Joint_Edge_Counts.append(Subset_Edges_List, ignore_index=True)\nJoint_Edge_Counts = Joint_Edge_Counts['To'].value_counts()\n\nJoint_Edge_Counts.to_excel(cwd+\"\\\\Seeds\\\\\"+'ML_Joint_Edge_Counts_Final.xlsx',index=False)\n\n","sub_path":"Wikipedia/Wikipedia_Node_Edge_Extractor.py","file_name":"Wikipedia_Node_Edge_Extractor.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"201183125","text":"# put your python code here\n\ndef neighbours(i, j):\n return [i-1, j], [i+1, j], [i, j-1], [i, j+1]\n\n\ndef label_area(m, i, j, label):\n m[i][j] = label\n for ni, nj in neighbours(i, j):\n if m[ni][nj] == 1:\n label_area(m, ni, nj, label)\n\nh, w = map(int, input().split())\nm = []\n\nfor i in range(h):\n line = map(int, input().split())\n m.append(list(line))\n\nlabel = 2\nfor i in range(h):\n for j in range(w):\n if m[i][j] == 1:\n label_area(m, i, j, label)\n label += 1\n\nareas = {}\n\nfor i in range(h):\n for j in range(w):\n value = m[i][j]\n if value != 0:\n if value not in areas:\n areas[value] = 1\n else:\n areas[value] += 1\n\nfor area in sorted(areas.values()):\n print(area)\n\n\n","sub_path":"final/subject_tour/inf1503_ar/task_02/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"336198256","text":"#!/usr/bin/env python3\n\n# Script for generating a lexicon from the speakers corpus\n\nimport argparse\nimport collections\nimport glob\nfrom lxml import etree\nimport os\nimport re\nimport sys\nimport unicodedata\n\n# load the utterances and their text\ndef utterances(speaker_dir):\n labdir = os.path.join(speaker_dir, 'hts_labels')\n for txtfn in glob.iglob(speaker_dir + '/txt/*.txt'):\n speaker = os.path.basename(txtfn).split('_')[0]\n group = os.path.basename(txtfn).split('_')[1]\n with open(txtfn, encoding='utf8') as txt:\n for line in txt:\n number, utterance_text = line.split(None,1)\n utterance_name = f\"{speaker}_{group}_{number.strip().strip('.')}\"\n labelfn = os.path.join(labdir, group, utterance_name + '.lab')\n if not os.path.isfile(labelfn):\n sys.stderr.write(f\"WARN: cannot find label file for {utterance_name}\\n\")\n continue\n utt = {\n 'name' : utterance_name,\n 'text' : utterance_text.strip(),\n 'labelfn' : labelfn\n }\n yield(utt)\n return\n\n# loads a word indexed file\ndef load_pronunciations(utt):\n utt['pron'] = {}\n word_to_idx = {}\n phones_pat = re.compile(\"^(?P.*?)~(?P.*?)-(?P.*?)\\+(?P.*?)=(?P[^:]*)\")\n with open(utt['labelfn'], encoding='utf8') as lab:\n for phonelab in lab:\n phonelab = phonelab.strip()\n _, _, label = phonelab.split(None, 3)\n label = label.split('/')\n label = dict(zip(['P']+label[1::2], label[0::2]))\n phone_mobj = phones_pat.match(label['P'])\n phone = phone_mobj.group('p3')\n vowel = label['B'].split('|')[-1]\n if phone == vowel:\n phone += label['B'][0]\n word_in_phrase = label['E'].split('+',1)[1].split(':',1)[1].split('+')[0]\n phrase_in_utterance = label['H'].split('=',1)[1].split(':')[1].split('=')[0]\n try:\n word_in_phrase = int(word_in_phrase)\n phrase_in_utterance = int(phrase_in_utterance)\n except:\n continue\n wid = (word_in_phrase, phrase_in_utterance)\n widx = len(word_to_idx)\n if wid not in word_to_idx:\n word_to_idx[wid] = widx\n word = word_to_idx[wid]\n if word not in utt['pron']:\n utt['pron'][word] = ''\n else:\n utt['pron'][word] += ' '\n utt['pron'][word] += phone\n return\n\n\ndef align_words_to_pron(utt):\n txt = re.sub(r'[()\\[\\]{}\"\\'!?.,;:|]', '', utt['text'])\n words = txt.split()\n if len(words) != len(utt['pron']):\n return False\n utt['lex'] = {}\n for widx, pron in utt['pron'].items():\n utt['lex'][words[widx].lower()] = pron\n return True\n\n\ndef fix_entry(word, pron):\n \n # put 0 on missing stress characters\n def _fix_missing_stress(pron):\n stress_characters = ['@', 'a', 'a@', 'e', 'i', 'o', 'u']\n phones = pron.split()\n pron = []\n for p in phones:\n if p in stress_characters:\n p += '0'\n pron.append(p)\n pron = ' '.join(pron)\n return pron\n\n if '-' in word:\n return None, None\n\n word = unicodedata.normalize('NFD', word) # make sure words are fully decomposed\n pron = _fix_missing_stress(pron)\n\n stress_count = pron.count('1')\n unstress_count = pron.count('0')\n if stress_count == 1:\n return word, pron\n if unstress_count == 1:\n # switched the only unstressed with a stressed vowel\n pron = pron.replace('0','1')\n return word, pron\n\n # cant fix the stress\n return None, None\n\n\ndef make_lexicon_xml(lexicon):\n lexicon_xml = etree.Element('lexicon')\n for grapheme in sorted(lexicon.keys()):\n for pron_idx, pron in enumerate(lexicon[grapheme]):\n lex = etree.SubElement(lexicon_xml, 'lex')\n lex.set('pron', pron)\n lex.set('entry', 'full' if (pron_idx == 0) else f\"variant_{pron_idx}\")\n lex.set('default', 'true' if (pron_idx == 0) else 'false')\n lex.text = grapheme\n return lexicon_xml\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", action = \"count\", default = 0)\n parser.add_argument(\"speaker_dir\", help = \"base directory of the speaker in the corpus\")\n parser.add_argument(\"output\", type = argparse.FileType('wb'), help = \"output lexicon\")\n args = parser.parse_args()\n\n lexicon = collections.defaultdict(set)\n \n for utt in utterances(args.speaker_dir):\n if args.verbose:\n print(f\"INFO loading: {utt['name']}\")\n load_pronunciations(utt)\n if not align_words_to_pron(utt):\n print(f\"WARN could not align words to pronunciations for: {utt['name']}\")\n continue\n if args.verbose:\n print(f\"INFO adding {len(utt['lex'])} words to lexicon\")\n for w, p in utt['lex'].items():\n word, pron = fix_entry(w, p)\n if pron is not None:\n lexicon[word].add(pron)\n\n lexicon_xml = make_lexicon_xml(lexicon)\n args.output.write(\n etree.tostring(lexicon_xml, encoding='utf8', pretty_print=True, xml_declaration=True)\n )\n args.output.write(b'\\n')\n \n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"idlak-egs/tts_tangle_swara/s2/make_lexicon.py","file_name":"make_lexicon.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"298350107","text":"# -*- coding: utf-8 -*-\r\nimport tkinter as tk\r\n\r\n\r\n\r\nclass AboutWindow:\r\n def __init__(self,master=None,*args,**kw):\r\n self.master=master\r\n self.window=tk.Toplevel(master)\r\n self.window.title('О программе')\r\n g,x,y=self.master.geometry().split('+')\r\n x,y,w,h=int(x),int(y),int(g.split('x')[0]),int(g.split('x')[1])\r\n w1,h1=300,100\r\n self.window.geometry(newGeometry='%sx%s+%s+%s'%(w1,h1,x+int(w/2-w1/2),y+int(h/2-h1/2)))\r\n self.window.transient(master)\r\n self.window.columnconfigure(0,weight=1)\r\n AboutWindowText='\\nПривет\\nКак дела?\\nЯ думаю тебе подсказки не нужны!'\r\n self.label=tk.Label(self.window,text=AboutWindowText,anchor=tk.CENTER)\r\n self.label.grid(row=0,column=0,sticky=tk.W+tk.E)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n root=tk.Tk()\r\n root.after(100,lambda: AboutWindow(root))\r\n root.mainloop()\r\n","sub_path":"libs/AboutWindow.py","file_name":"AboutWindow.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"162832625","text":"\"\"\"A video player class.\"\"\"\r\nimport pywin32_bootstrap\r\nfrom .video_library import VideoLibrary\r\nfrom random import choice\r\n\r\n\r\nclass VideoPlayer:\r\n \"\"\"A class used to represent a Video Player.\"\"\"\r\n\r\n def __init__(self):\r\n global current_video, paused, playlistDB, playlist_case_map, flag_of_all_videos, error_messages\r\n current_video = False\r\n paused = False\r\n playlistDB = {}\r\n playlist_case_map = {}\r\n flag_of_all_videos = {}\r\n # some of the repeating error phases saved here.\r\n error_messages = {'no_vid': 'Video does not exist',\r\n 'no_vid_playing': \"No video is currently playing\",\r\n 'not_paused': 'Video is not paused',\r\n 'no_playlist': 'Playlist does not exist',\r\n 'playlist_exist': 'A playlist with the same name already exists',\r\n 'not_in_playlist': 'Video is not in playlist',\r\n 'already_flagged': f'Video is currently flagged (reason: %s)',\r\n 'already_added': 'Video already added',\r\n 'not_flagged': 'Video is not flagged'\r\n }\r\n self._video_library = VideoLibrary()\r\n\r\n def is_id_real(self, video_id):\r\n \"\"\"\r\n return video. If flagged, return the reason, too.\r\n :param video_id: The video_id in question.\r\n \"\"\"\r\n video_in_question = self._video_library.get_video(video_id=video_id)\r\n if video_in_question is None:\r\n return False, False\r\n elif video_in_question in flag_of_all_videos:\r\n return video_in_question, flag_of_all_videos[video_in_question]\r\n else:\r\n return video_in_question, False\r\n\r\n def get_all_videos_not_flagged(self):\r\n return list(self._video_library.get_all_videos() - flag_of_all_videos.keys())\r\n\r\n def video_info_formatting(self, vid):\r\n \"\"\"\r\n Show the video info\r\n :param vid: The video in question\r\n \"\"\"\r\n tags = \" \".join(list(vid.tags))\r\n output = f\"{vid.title} ({vid.video_id}) [{tags}]\"\r\n if vid in flag_of_all_videos:\r\n output += f' - FLAGGED (reason: {flag_of_all_videos[vid]})'\r\n return output\r\n\r\n def number_of_videos(self):\r\n num_videos = len(self._video_library.get_all_videos())\r\n print(f\"{num_videos} videos in the library\")\r\n\r\n def show_all_videos(self):\r\n \"\"\"Returns all videos.\"\"\"\r\n list_videos = self._video_library.get_all_videos()\r\n list_videos.sort(key=lambda _videos: _videos.title)\r\n print(\"Here's a list of all available videos:\")\r\n for vid in list_videos:\r\n print(self.video_info_formatting(vid))\r\n\r\n def play_video(self, video_id):\r\n \"\"\"Plays the respective video.\r\n\r\n Args:\r\n video_id: The video_id to be played.\r\n \"\"\"\r\n global current_video, paused\r\n video, flag_reason = self.is_id_real(video_id)\r\n paused = False\r\n local_error_message = \"Cannot play video: \"\r\n\r\n # if video do not exist\r\n if video is False:\r\n print(local_error_message + error_messages['no_vid'])\r\n # if video is flagged\r\n elif flag_reason is not False:\r\n print(local_error_message + error_messages['already_flagged'] % flag_reason)\r\n else:\r\n if current_video is not False:\r\n print(f\"Stopping video: {current_video.title}\")\r\n current_video = video\r\n print(f\"Playing video: {video.title}\")\r\n\r\n def stop_video(self):\r\n \"\"\"Stops the current video.\"\"\"\r\n global current_video\r\n\r\n if current_video is not False:\r\n print(f\"Stopping video: {current_video.title}\")\r\n current_video = False\r\n else:\r\n print(\"Cannot stop video: \" + error_messages['no_vid_playing'])\r\n\r\n def play_random_video(self):\r\n \"\"\"Plays a random video from the video library.\"\"\"\r\n list_playable_videos = self.get_all_videos_not_flagged()\r\n if len(list_playable_videos) == 0:\r\n print('No videos available')\r\n else:\r\n random_video = choice(list_playable_videos)\r\n self.play_video(random_video.video_id)\r\n\r\n def pause_video(self):\r\n \"\"\"Pauses the current video.\"\"\"\r\n global paused, current_video\r\n if current_video is False:\r\n print(\"Cannot pause video: \" + error_messages['no_vid_playing'])\r\n elif paused:\r\n print(f\"Video already paused: {current_video.title}\")\r\n else:\r\n paused = True\r\n print(f\"Pausing video: {current_video.title}\")\r\n\r\n def continue_video(self):\r\n \"\"\"Resumes playing the current video.\"\"\"\r\n global paused, current_video\r\n if current_video is False:\r\n print(\"Cannot continue video: \" + error_messages['no_vid_playing'])\r\n elif not paused:\r\n print(f\"Cannot continue video: \" + error_messages['not_paused'])\r\n else:\r\n paused = False\r\n print(f\"Continuing video: {current_video.title}\")\r\n\r\n def show_playing(self):\r\n \"\"\"Displays video currently playing.\"\"\"\r\n global paused, current_video\r\n if current_video is False:\r\n print(\"No video is currently playing\")\r\n else:\r\n print(\"Currently playing: \" + self.video_info_formatting(current_video) + '%s'\r\n % (\" - PAUSED\" if paused else \"\"))\r\n\r\n def is_playlist_real(self, playlist_name):\r\n \"\"\"Checks if the playlist already exist.\r\n if exists, return the original playlist name\r\n if not, return False\r\n\r\n Args:\r\n playlist_name: The playlist name.\r\n \"\"\"\r\n if not playlist_name.lower() in playlist_case_map.keys():\r\n original_playlist_name = False\r\n else:\r\n original_playlist_name = playlist_case_map[playlist_name.lower()]\r\n return original_playlist_name\r\n\r\n def create_playlist(self, playlist_name):\r\n \"\"\"Creates a playlist with a given name.\r\n\r\n Args:\r\n playlist_name: The playlist name.\r\n \"\"\"\r\n if self.is_playlist_real(playlist_name) is False:\r\n playlist_case_map[playlist_name.lower()] = playlist_name\r\n playlistDB[playlist_name] = []\r\n print(f\"Successfully created new playlist: {playlist_name}\")\r\n else:\r\n print(\"Cannot create playlist: A playlist with the same name already exists\")\r\n\r\n def add_to_playlist(self, playlist_name, video_id):\r\n \"\"\"Adds a video to a playlist with a given name.\r\n\r\n Args:\r\n playlist_name: The playlist name.\r\n video_id: The video_id to be added.\r\n \"\"\"\r\n add = False\r\n video, flag_reason = self.is_id_real(video_id)\r\n original_playlist_name = self.is_playlist_real(playlist_name)\r\n local_error_message = f\"Cannot add video to {playlist_name}: \"\r\n\r\n if original_playlist_name is False:\r\n print(local_error_message + error_messages['no_playlist'])\r\n\r\n elif video is False:\r\n print(local_error_message + error_messages['no_vid'])\r\n\r\n elif flag_reason is not False:\r\n print(local_error_message + error_messages['already_flagged'] % flag_reason)\r\n\r\n elif video in playlistDB[original_playlist_name]:\r\n print(local_error_message + error_messages['already_added'])\r\n else:\r\n add = True\r\n\r\n if add:\r\n playlistDB[original_playlist_name].append(video)\r\n print(f'Added video to {playlist_name}: {video.title}')\r\n\r\n def show_all_playlists(self):\r\n \"\"\"Display all playlists.\"\"\"\r\n playlistlist = sorted(list(playlistDB.keys()), key=str.lower)\r\n\r\n if len(playlistlist) is 0:\r\n print('No playlists exist yet')\r\n else:\r\n print('Showing all playlists:')\r\n for playlist in playlistlist:\r\n print(f' {playlist}')\r\n\r\n def show_playlist(self, playlist_name):\r\n \"\"\"Display all videos in a playlist with a given name.\r\n\r\n Args:\r\n playlist_name: The playlist name.\r\n \"\"\"\r\n original_playlist_name = self.is_playlist_real(playlist_name)\r\n if original_playlist_name is False:\r\n print(f'Cannot show playlist {playlist_name}: ' + error_messages['no_playlist'])\r\n else:\r\n list_of_videos = playlistDB[original_playlist_name]\r\n\r\n print(f'Showing playlist: {playlist_name}')\r\n if len(list_of_videos) == 0:\r\n print('No videos here yet')\r\n else:\r\n for video in list_of_videos:\r\n print(self.video_info_formatting(video))\r\n\r\n def remove_from_playlist(self, playlist_name, video_id):\r\n \"\"\"Removes a video to a playlist with a given name.\r\n\r\n Args:\r\n playlist_name: The playlist name.\r\n video_id: The video_id to be removed.\r\n \"\"\"\r\n remove = False\r\n video, _ = self.is_id_real(video_id)\r\n original_playlist_name = self.is_playlist_real(playlist_name)\r\n cannot_remove = f\"Cannot remove video from {playlist_name}: \"\r\n\r\n if original_playlist_name is False:\r\n print(cannot_remove + error_messages['no_playlist'])\r\n elif video is False:\r\n print(cannot_remove + error_messages['no_vid'])\r\n elif video not in playlistDB[original_playlist_name]:\r\n print(cannot_remove + error_messages['not_in_playlist'])\r\n else:\r\n remove = True\r\n\r\n if remove:\r\n playlistDB[original_playlist_name].remove(video)\r\n print(f'Removed video from {playlist_name}: {video.title}')\r\n\r\n def clear_playlist(self, playlist_name):\r\n \"\"\"Removes all videos from a playlist with a given name.\r\n\r\n Args:\r\n playlist_name: The playlist name.\r\n \"\"\"\r\n original_playlist_name = self.is_playlist_real(playlist_name)\r\n if original_playlist_name is False:\r\n print(f'Cannot clear playlist {playlist_name}: ' + error_messages['no_playlist'])\r\n else:\r\n playlistDB[original_playlist_name].clear()\r\n print(f'Successfully removed all videos from {playlist_name}')\r\n\r\n def delete_playlist(self, playlist_name):\r\n \"\"\"Deletes a playlist with a given name.\r\n\r\n Args:\r\n playlist_name: The playlist name.\r\n \"\"\"\r\n original_playlist_name = self.is_playlist_real(playlist_name)\r\n if original_playlist_name is False:\r\n print(f'Cannot delete playlist {playlist_name}: ' + error_messages['no_playlist'])\r\n else:\r\n del playlistDB[original_playlist_name]\r\n del playlist_case_map[original_playlist_name.lower()]\r\n print(f'Deleted playlist: {playlist_name}')\r\n\r\n def search_display(self, key_word, match_list):\r\n \"\"\"Display search results using match list\r\n\r\n :param key_word: search term or video tag\r\n :param match_list: list of videos that match with the search\r\n :return:\r\n \"\"\"\r\n if len(match_list) == 0:\r\n print(f'No search results for {key_word}')\r\n else:\r\n match_list.sort(key=lambda _videos: _videos.title)\r\n print(f'Here are the results for {key_word}:')\r\n for index, vid in enumerate(match_list):\r\n print(f'{index + 1}) ' + self.video_info_formatting(vid))\r\n\r\n print('Would you like to play any of the above? If yes, specify the number of the video.')\r\n print(\"If your answer is not a valid number, we will assume it's a no.\")\r\n index_wanted_raw = input()\r\n try:\r\n index_wanted = int(index_wanted_raw) - 1\r\n self.play_video(match_list[index_wanted].video_id)\r\n except:\r\n pass\r\n\r\n def search_videos(self, search_term):\r\n \"\"\"Display all the videos whose titles contain the search_term.\r\n\r\n Args:\r\n search_term: The query to be used in search.\r\n \"\"\"\r\n match_list = [vid for vid in self.get_all_videos_not_flagged() if search_term.lower() in vid.title.lower()]\r\n self.search_display(search_term, match_list)\r\n\r\n def search_videos_tag(self, video_tag):\r\n \"\"\"Display all videos whose tags contains the provided tag.\r\n\r\n Args:\r\n video_tag: The video tag to be used in search.\r\n \"\"\"\r\n match_list = [vid for vid in self.get_all_videos_not_flagged() if video_tag.lower() in vid.tags]\r\n self.search_display(video_tag, match_list)\r\n\r\n def flag_video(self, video_id, flag_reason=\"Not supplied\"):\r\n \"\"\"Mark a video as flagged.\r\n\r\n Args:\r\n video_id: The video_id to be flagged.\r\n flag_reason: Reason for flagging the video.\r\n \"\"\"\r\n global current_video\r\n video, flag_reason_old = self.is_id_real(video_id)\r\n if video is False:\r\n print('Cannot flag video: ' + error_messages['no_vid'])\r\n elif flag_reason_old is not False:\r\n print('Cannot flag video: Video is already flagged')\r\n else:\r\n flag_of_all_videos[video] = flag_reason\r\n if current_video == video:\r\n self.stop_video()\r\n print(f\"Successfully flagged video: {video.title} (reason: {flag_reason})\")\r\n\r\n def allow_video(self, video_id):\r\n \"\"\"Removes a flag from a video.\r\n\r\n Args:\r\n video_id: The video_id to be allowed again.\r\n \"\"\"\r\n video, flag_reason = self.is_id_real(video_id)\r\n\r\n if video is False:\r\n print('Cannot remove flag from video: ' + error_messages['no_vid'])\r\n elif flag_reason is False:\r\n print('Cannot remove flag from video: ' + error_messages['not_flagged'])\r\n else:\r\n del flag_of_all_videos[video]\r\n print(f'Successfully removed flag from video: {video.title}')\r\n","sub_path":"google-code-sample/python/src/video_player.py","file_name":"video_player.py","file_ext":"py","file_size_in_byte":14111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"591981801","text":"import json\nimport scrapy\nfrom scrapy import cmdline\nimport csv\nimport os\nfrom config.config import configs\nfrom app import db\n\nconfig = configs()\n# NHIỆM VỤ SỐ 1\n\n'''\nMục đích: Là lấy toàn bộ dữ liệu về lịch sử giá cổ phiếu sử dụng scrapy.\n'''\n'''\n# Đoạn mã này với mục đích tập hợp toàn bộ link của toàn bộ khoảng 1.800 mã cổ phiếu. \n# Tuy nhiên do số lượng quá lớn nên tạm thời sẽ không sử dụng đoạn mã này mà chỉ dùng khoảng 5 mã cổ phiếu\nma_co_phieu = ma_co_phieus.query.all()\nstart_urls1 = []\nfor ma_co_phieu2 in ma_co_phieu:\n ma_co_phieu1 = ma_co_phieu2.code\n url_csv_file = config[\"file_csv_url\"][\"lich_su_gia_co_phieu\"] + \"-{0}\".format(ma_co_phieu1)\n start_urls1.append(config[\"url\"][\"lich_su_gia_co_phieu\"] + \"{0}\".format(ma_co_phieu1))\n'''\n\n\nstart_urls1 = ['https://www.cophieu68.vn/historyprice.php?id=AAT',\n 'https://www.cophieu68.vn/historyprice.php?id=AAV',\n 'https://www.cophieu68.vn/historyprice.php?id=ABB',\n 'https://www.cophieu68.vn/historyprice.php?id=ABI',\n 'https://www.cophieu68.vn/historyprice.php?id=ABR']\nfields = ['STT',\n 'Ma_cty',\n 'Ngay',\n 'Gia_tham_chieu',\n 'Len_xuong',\n 'Phan_tram',\n 'Dong_cua',\n 'Khoi_luong',\n 'Mo_cua',\n 'Cao_nhat',\n 'Thap_nhat',\n 'Giao_dich_thoa_thuan',\n 'Nuoc_ngoai_mua',\n 'Nuoc_ngoai_ban']\n\nfor star_url in start_urls1:\n page = star_url.split(\"=\")[-1]\n if os.path.exists(r\"..\\\\..\\\\..\\\\..\\\\file_csv\\\\lich_su_gia_co_phieu_{0}.csv\".format(page)):\n os.remove(r\"..\\\\..\\\\..\\\\..\\\\file_csv\\\\lich_su_gia_co_phieu_{0}.csv\".format(page))\n with open(r\"..\\\\..\\\\..\\\\..\\\\file_csv\\\\lich_su_gia_co_phieu_{0}.csv\".format(page), 'a') as f:\n write = csv.writer(f)\n write.writerow(fields)\n\nclass CpSpider(scrapy.Spider):\n name = 'cp_item'\n start_urls = start_urls1\n #for i in range(1, 72):\n # start_urls.append('https://www.cophieu68.vn/historyprice.php?currentPage={0}&id={1}'.format(i, Ten_cty))\n def parse(self, response):\n rows = []\n page = response.url.split(\"=\")[-1]\n for table in response.css('table.stock'):\n for j in range (2, 120):\n record = [ table.xpath('tr[{0}]/td[1]/text()'.format(j)).get(),\n page,\n table.xpath('tr[{0}]/td[2]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[3]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[4]/span[1]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[5]/span[1]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[6]/span[1]/strong[1]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[7]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[8]/span[1]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[9]/span[1]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[10]/span[1]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[11]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[12]/text()'.format(j)).get(),\n table.xpath('tr[{0}]/td[13]/text()'.format(j)).get()\n ]\n if record[3] is not None:\n rows.append(record)\n Page = response.xpath(\"//ul[@id='navigator']/li[9]/a[1]/@href\").get()\n if Page is not None:\n next_page = response.xpath(\"//ul[@id='navigator']/li[9]/a[1]/@href\").get()\n elif response.xpath(\"//ul[@id='navigator']/li[6]/span[1]/text()\").get() == \"70\":\n next_page = response.xpath(\"//ul[@id='navigator']/li[7]/a[1]/@href\").get()\n elif response.xpath(\"//ul[@id='navigator']/li[7]/span[1]/text()\").get() == \"71\":\n next_page = None\n else:\n next_page = response.xpath(\"//ul[@id='navigator']/li[6]/a[1]/@href\").get()\n if next_page is not None:\n yield response.follow(next_page, self.parse)\n\n\n\n with open(r\"..\\\\..\\\\..\\\\..\\\\file_csv\\\\lich_su_gia_co_phieu_{0}.csv\".format(page), 'a') as f:\n write = csv.writer(f)\n write.writerows(rows)\n\ncmdline.execute(\"scrapy runspider history_stock_scarpy.py\".split())\n\n\n","sub_path":"app/main/service/file_csv_history_code/history_stock_scarpy.py","file_name":"history_stock_scarpy.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"533023134","text":"\"\"\"Your are given an array of integers prices, for which the i-th element is \nthe price of a given stock on day i; and a non-negative integer fee representing \na transaction fee.You may complete as many transactions as you like, but you need \nto pay the transaction fee for each transaction. You may not buy more than 1 share \nof a stock at a time (ie. you must sell the stock share before you buy again.)\nReturn the maximum profit you can make.\"\"\"\n\n\nclass Solution(object):\n def maxProfit(self, prices, fee):\n n = len(prices)\n cash, hold = 0, -prices[0]\n for i in range(1,n):\n cash = max(cash,prices[i] + hold - fee)\n hold = max(hold, cash-prices[i])\n return cash","sub_path":"Dynamic Programming/BestTimeToBuyAndSellStockWithTransactionFee.py","file_name":"BestTimeToBuyAndSellStockWithTransactionFee.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"371113533","text":"import json\nfrom pathlib import Path\n\nfrom bs4 import BeautifulSoup\n\ndir_name = Path(\"test_literal_block_directive\")\nhtml_filename = \"test_json_literal.html\"\n\n\n\n\ndef test_base_name_in_html(app, values):\n app.build()\n html_doc = Path(app.outdir / dir_name / html_filename).read_text()\n text = extract_pre_from_html(html_doc)\n d = json.loads(text)\n assert d == json.loads(values.json_month_lengths)\n\n\ndef extract_pre_from_html(html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n pre = soup.find('pre')\n return pre.get_text()\n\n","sub_path":"tests/test_literal_block_directive.py","file_name":"test_literal_block_directive.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"186982643","text":"#!/usr/bin/env python\nfrom mininet.cli import CLI\nfrom mininet.net import Mininet\nfrom mininet.link import Link,TCLink\n\nif '__main__' == __name__:\n net = Mininet(link=TCLink) # Get a mininet object with TCLink\n # add hosts to net object\n h1 = net.addHost('h1') \n h2 = net.addHost('h2')\n h3 = net.addHost('h3')\n br1 = net.addHost('br1')\n # create link between to hosts\n net.addLink(h1,br1)\n net.addLink(h2,br1)\n net.addLink(h3,br1)\n # build net structure\n net.build()\n # clear ip addr\n h1.cmd(\"ifconfig h1-eth0 0\")\n h2.cmd(\"ifconfig h2-eth0 0\")\n h3.cmd(\"ifconfig h3-eth0 0\")\n br1.cmd(\"ifconfig br1-eth0 0\")\n br1.cmd(\"ifconfig br1-eth1 0\")\n br1.cmd(\"ifconfig br1-eth2 0\")\n # add bridge\n br1.cmd(\"brctl addbr mybr\")\n # add interface to bridge\n br1.cmd(\"brctl addif mybr br1-eth0\")\n br1.cmd(\"brctl addif mybr br1-eth1\")\n br1.cmd(\"brctl addif mybr br1-eth2\")\n # set bridge up\n br1.cmd(\"ifconfig mybr up\")\n # set ip address\n h1.cmd(\"ifconfig h1-eth0 192.168.10.1/24\")\n h2.cmd(\"ifconfig h2-eth0 192.168.10.2/24\")\n h3.cmd(\"ifconfig h3-eth0 192.168.10.3/24\")\n\n # start CommandLine Interface\n CLI(net)\n # release net structure\n net.stop()\n","sub_path":"bridge/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"575877250","text":"import selenium.webdriver\n\nimport win32api,win32con\n\n\ndef zhucebiao():\n key = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE,\n 'Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl\\FEATURE_BROWSER_EMULATION',\n 0, win32con.KEY_ALL_ACCESS)\n print(key)\n a=win32api.RegQueryValue(key, '')\n print(a)\n\n\n\ndef Iejierong():\n Ieb = selenium.webdriver.Ie()\n mate = 'meta http-equiv=\"X-UA-Compatible\" content=\"IE=9\"'\n Ieb.get(\"ww.baid.com\")\n Ieb.execute_script(mate)\n\nzhucebiao()","sub_path":"test01/test003.py","file_name":"test003.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629335983","text":"SIMPLE_CONFIG = {\n \"apiKey\": \"AIzaSyBfGClE3cLOfs0c0rmGnYvZerwIky9rVgg\",\n \"authDomain\": \"pirebase-test.firebaseapp.com\",\n \"databaseURL\": \"https://pirebase-test.firebaseio.com\",\n \"storageBucket\": \"pirebase-test.appspot.com\",\n}\n\nSERVICE_ACCOUNT_PATH = \"../secret.json\"\n\nSERVICE_CONFIG = dict(SIMPLE_CONFIG, serviceAccount=SERVICE_ACCOUNT_PATH)\n","sub_path":"tests/config.template.py","file_name":"config.template.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"173351337","text":"class EmptyLinkedList(Exception):\n def __init__(self):\n super().__init__('Lista vazia')\n\n\nclass Aluno:\n def __init__(self, nome: str, nota: float):\n self.__nome = nome\n self.__nota = nota\n self.__prox = None\n self.__ante = None\n\n def __repr__(self):\n return f'{self.__nome}:{self.__nota}'\n\n @property\n def prox(self):\n return self.__prox\n \n @prox.setter\n def prox(self, prox):\n self.__prox = prox\n\n @property\n def ante(self):\n return self.__ante\n \n @ante.setter\n def ante(self, ante):\n self.__ante = ante\n\nclass ListaCircular:\n def __init__(self):\n self.__inicio = None\n self.__final = None\n\n def append(self, aluno: Aluno):\n \n if self.__inicio == None:\n aluno.prox = aluno\n aluno.ante = aluno\n self.__inicio = aluno\n self.__final = aluno\n print(aluno, aluno.ante, aluno.prox)\n \n else:\n if self.__inicio == self.__final:\n aluno.prox = self.__inicio\n aluno.ante = self.__inicio\n self.__final = aluno\n self.__inicio.prox=aluno\n print(aluno, aluno.ante, aluno.prox)\n\n\n else: \n aluno.prox = self.__inicio\n aluno.ante = self.__final \n self.__final.prox = aluno\n self.__final = aluno\n self.__inicio.ante = aluno\n print(aluno, aluno.ante, aluno.prox, self.__inicio.ante, self.__inicio.prox)\n \n def pop(self, valor):\n aux1 = 1\n atual = self.__inicio\n\n if atual == None:\n raise EmptyLinkedList\n\n while aux1 < valor:\n aux1 +=1\n atual = atual.prox\n\n if aux1 == valor:\n if atual == self.__inicio:\n self.__inicio = self.__inicio.prox\n \n aux = atual.ante\n aux.prox = atual.prox\n aux = atual.prox\n aux.ante = atual.ante\n return atual\n\n def __repr__(self, ):\n saida = ''\n atual = self.__inicio\n\n while ((atual.prox != self.__inicio) ): \n saida += f'{atual}'\n atual = atual.prox\n if atual.prox != self.__inicio.prox and atual != self.__inicio:\n saida += ' -> '\n saida += f'{atual}'\n return f'[{saida}]'\n\nlista = ListaCircular()\nlista.append(Aluno('A1', 9.8))\nlista.append(Aluno('A2', 7.8))\nlista.append(Aluno('A3', 9.8) )\nlista.append(Aluno('A4', 8.8) )\nlista.append(Aluno('A5', 8.8) )\nlista.append(Aluno('A6', 8.8) )\nlista.append(Aluno('A7', 8.8) )\nlista.append(Aluno('A8', 8.8) )\nlista.append(Aluno('A9', 8.8) )\nlista.pop(1)\nprint(lista) ","sub_path":"fila_circular.py","file_name":"fila_circular.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"557058923","text":"import asyncio\r\nfrom discord.ext import commands\r\n\r\n\r\nclient = commands.Bot(command_prefix='!*')\r\n\r\n\r\n@client.event\r\n@asyncio.coroutine\r\ndef on_ready():\r\n print('Logged in as')\r\n print(client.user.name)\r\n print(client.user.id)\r\n print('------')\r\n\r\n\r\n@client.command(name=\"*\")\r\n@commands.has_permissions(administrator=True)\r\n@asyncio.coroutine\r\ndef start(*message2):\r\n if 'message2' not in locals():\r\n message = \"\"\r\n else:\r\n message = \"\"\r\n for x in message2:\r\n message = message + x + \" \"\r\n while True:\r\n yield from asyncio.sleep(43200)\r\n yield from client.say(message)\r\n\r\n\r\nloop = asyncio.get_event_loop()\r\n\r\n\r\n@client.command(name=\"stop\")\r\n@commands.has_permissions(administrator=True)\r\n@asyncio.coroutine\r\ndef stop():\r\n while True:\r\n loop.stop\r\n loop.close\r\n\r\n\r\nclient.run(\"MzI1Njg0NjM5MjUzMzk3NTA3.DCb0_A.LFAXgf-cVt0ZfsZFZwT9KqGKbrc\") #LegionOW\r\n","sub_path":"rem-inderOW.py","file_name":"rem-inderOW.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"18809642","text":"#!/usr/bin/python\nimport pygtk\nimport gtk\nimport os\nWILDCARD = '*'\nfrom common import *\nclass SessionConfigDlg:\n def __init__(self, parent, manager, view):\n self.dialog = gtk.Dialog(\"Configuration Session\",None,gtk.DIALOG_MODAL)\n self.manager = manager\n self.view = view\n okButton = gtk.Button(\"OK\", gtk.STOCK_OK)\n cancelButton = gtk.Button(\"Cancel\", gtk.STOCK_CANCEL)\n\n okButton.connect(\"clicked\", self.onButton, self)\n cancelButton.connect(\"clicked\", self.onCancel, self)\n self.dialog.action_area.pack_start(okButton,False,False)\n self.dialog.action_area.pack_start(cancelButton, False,False)\n table = gtk.Table(6,2)\n table.set_row_spacings(6)\n table.set_col_spacings(6)\n frame = gtk.Frame(\"Configure\")\n frame.add(table)\n \n protocolLab = gtk.Label(\"Protocol :\")\n addressLab = gtk.Label(\"Address :\")\n serviceGroupLab = gtk.Label(\"ServiceGroup :\")\n scaleLab = gtk.Label(\"Scale :\")\n numberLab = gtk.Label(\"Number :\")\n startLab = gtk.Label(\"Start : \") \n \n hbox = gtk.HBox()\n self.singleRadio = gtk.RadioButton(None,\"Individual\")\n self.mutipleRadio = gtk.RadioButton(self.singleRadio,\"Mutil\")\n self.configureFileRadio = gtk.RadioButton(self.mutipleRadio, \"Config File\")\n self.singleRadio.connect(\"toggled\", self.onSingleRadio, self)\n self.mutipleRadio.connect(\"toggled\", self.onMutipleRadio, self)\n self.configureFileRadio.connect(\"toggled\", self.onConfigureFileRadio, self)\n\n self.singleRadio.set_active(True)\n \n hbox.pack_start(self.singleRadio)\n hbox.pack_start(self.mutipleRadio)\n hbox.pack_start(self.configureFileRadio)\n\n table.attach(protocolLab, 0, 1, 0, 1)\n table.attach(hbox,0, 2, 1, 2)\n table.attach(addressLab, 0, 1, 2, 3)\n table.attach(serviceGroupLab, 0, 1, 3, 4)\n table.attach(startLab, 0, 1, 4, 5)\n table.attach(numberLab, 0, 1, 5, 6)\n \n \n self.combox = gtk.combo_box_new_text()\n #TODO should get the supported protocol from a configuration file.\n self.combox.append_text(\"RTSP\")\n self.combox.append_text(\"VLC\")\n self.combox.append_text(\"Demo\")\n self.combox.set_active(0)\n\n self.address = gtk.Entry()\n self.address.set_text(\"rtsp://192.168.0.\");\n adjustment1 = gtk.Adjustment(0, 1, 2000, 1, 0, 0)\n adjustment2 = gtk.Adjustment(0, 0, 2000, 1, 0, 0)\n adjustment3 = gtk.Adjustment(0, 0, 2000, 1, 0, 0)\n \n self.number = gtk.SpinButton(adjustment1, 0, 0)\n self.serviceGroup = gtk.SpinButton(adjustment2, 0, 0)\n self.startPoint = gtk.SpinButton(adjustment3, 0, 0)\n\n self.startPoint.set_sensitive(False)\n\n table.attach(self.combox, 1, 2, 0, 1)\n table.attach(self.address, 1, 2, 2, 3)\n table.attach(self.serviceGroup ,1, 2, 3, 4)\n table.attach(self.startPoint, 1, 2, 4, 5)\n table.attach(self.number, 1, 2, 5, 6)\n \n self.dialog.vbox.pack_start(frame)\n self.dialog.show_all()\n\n def onSingleRadio(self, widget, data = None):\n if widget.get_active():\n data.address.set_text(\"rtsp://192.168.0\")\n data.serviceGroup.set_sensitive(True)\n data.number.set_sensitive(True)\n data.startPoint.set_sensitive(False)\n\n def onMutipleRadio(self, widget, data = None):\n if widget.get_active():\n data.address.set_text(\"rtsp://192.168.0\")\n data.serviceGroup.set_sensitive(True)\n data.number.set_sensitive(True)\n data.startPoint.set_sensitive(True)\n\n def onConfigureFileRadio(self, widget, data = None):\n if widget.get_active():\n data.address.set_text(\"./config.cfg\")\n data.serviceGroup.set_sensitive(False)\n data.number.set_sensitive(False)\n data.startPoint.set_sensitive(False)\n \n def onButton(self, widget, data):\n model = data.combox.get_model()\n index = data.combox.get_active()\n protocol = model[index][0]\n address = data.address.get_text()\n number = data.number.get_value_as_int()\n serviceGroup = data.serviceGroup.get_value_as_int()\n startPoint = data.startPoint.get_value_as_int()\n\n if data.mutipleRadio.get_active():\n createSession(protocol, address, serviceGroup, number, data.manager, self.view, startingPoint = startPoint)\n \n if data.singleRadio.get_active():\n if address.find(WILDCARD) >= 0:\n message = \"Your adddress contain a wildcard %s\" % WILDCARD\n messageBox = gtk.MessageDialog(data.dialog, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_NONE, message)\n messageBox.show_all()\n return\n createSession(protocol, address, serviceGroup, number, data.manager, self.view)\n \n if data.configureFileRadio.get_active():\n config = readConfigFromFile(address)\n if config == None:\n m = \"Your configuration file have error or is nonexistent.\"\n messageBox = gtk.MessageDialog(data.dialog, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_NONE, m)\n messageBox.show_all()\n return False\n createSession(config.protocol, config.address, config.param, config.number, data.manager, self.view)\n \n self.dialog.destroy()\n \n def onCancel(self, widget, data):\n self.dialog.destroy()\n\n","sub_path":"ui/SessionConfigDlg.py","file_name":"SessionConfigDlg.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"404077533","text":"#!/usr/bin/env python\n\nfrom flexbe_core import EventState, Logger\nimport rospy\nfrom vizbox.msg import Story\n\n\nclass Set_Story(EventState):\n \"\"\"\n set_story\n -- titre string the title\n -- storyline string[] the steps\n\n <= done what's suppose to be written is written\n \"\"\"\n\n def __init__(self, titre, storyline):\n \"\"\"set the story\"\"\"\n super(Set_Story, self).__init__(outcomes=['done'])\n self.pub = rospy.Publisher(\"/story\", Story)\n\n self.msg = Story()\n self.msg.title = titre\n self.msg.storyline = storyline\n\n def execute(self, userdata):\n \"\"\"execute what needs to be executed\"\"\"\n self.pub.publish(self.msg)\n Logger.loginfo('Success')\n return 'done'\n","sub_path":"sara_flexbe_states/src/sara_flexbe_states/story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"98971955","text":"import logging\nimport base64\nfrom django.utils import simplejson as json\nfrom google.appengine.api import urlfetch\n\nlog = logging.getLogger(__file__)\n\nclass Development:\n APP_KEY = 'yFNGdwOrRXCtxLi99jAIaw'\n APP_SECRET = 'uOkN-EKbYBc6xTPmVNl0K6bkS3A-Q'\n APP_MASTER_SECRET = 'KgtMVYZUQ8m2TXi9-e3kkw'\n\nclass Produciton:\n APP_KEY = 'xEU2Ee1cS_OeJZtqyO7hMw'\n APP_SECRET = 'uOkN-0ydSMWi9hF9UdzC-Q'\n APP_MASTER_SECRET = '5LprI3aFTDmZEQWC2RVQyg'\npush_enabled = False\nprofile = Development\n\ndef send_push_notification(deviceToken, alertMsg, badgeNumber=0, **customDict):\n if not push_enabled:\n return\n log.info(\"sending notification... %r\", alertMsg)\n\n url = 'https://go.urbanairship.com/api/push/'\n\n auth_string = 'Basic ' + base64.encodestring('%s:%s' % (profile.APP_KEY, profile.APP_MASTER_SECRET))[:-1]\n log.info(\"auth string: '%s'\",auth_string)\n msgDict = {\"aps\": {\"badge\": badgeNumber, \"alert\": alertMsg}, \"device_tokens\": [deviceToken]}\n msgDict.update(customDict)\n body = json.dumps(msgDict)\n if (len(body) >= 256):\n log.error(\"Unable to send notification body is to large! (%i > 256)\", len(body))\n return False\n log.info(\"Body %s\", body)\n data = urlfetch.fetch(url, headers={'content-type': 'application/json','authorization' : auth_string}, payload=body, method=urlfetch.POST)\n\n if data.status_code == 200:\n log.info(\"Remote Notification successfully sent to UrbanAirship, status code= %r, content=%r\",data.status_code, data.content )\n else:\n log.error(\"Remote Notification not sent! Status code = %r, content=%r\", data.status_code, data.content)\n return data.status_code == 200\n\ndef register(device_token, secret_key=profile.APP_SECRET):\n if not push_enabled:\n return True\n secret_key = secret_key.strip('\\n')\n device_token = device_token.strip('\\n')\n url = 'https://go.urbanairship.com/api/device_tokens/%s/' % (device_token)\n\n auth_string = 'Basic ' + base64.encodestring('%s:%s' % (profile.APP_KEY, secret_key))[:-1]\n log.info(\"auth string: '%r'\", auth_string)\n log.info(\"Request url = '%s'\", url)\n data = urlfetch.fetch(url, headers={'authorization' : auth_string}, payload=None, method=urlfetch.PUT)\n\n if data.status_code == 200:\n log.info(\"Device registered sucessfuly in UrbanAirship, status code= %r, content=%r\",data.status_code, data.content)\n else:\n log.error(\"Unable to register device in UrbanAirship! Status code = %r, content=%r\", data.status_code, data.content)\n return data.status_code == 200\n\ndef set_configuration(conf):\n pass\n","sub_path":"service/src/b2m/urban_airmail.py","file_name":"urban_airmail.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"460647888","text":"from collections import deque\n\nimport numpy as np\nimport pickle\nfrom mujoco_py import MujocoException\n\nfrom baselines.her.util import convert_episode_to_batch_major, store_args\n\n# Global constants repeated in policy_network.py\n# Really bad style\n# Sorry.\nCOLOR_FEATURES = 4\nENV_FEATURES = 10\nBLOCK_BASE_FEATURES = 15\nBLOCK_FEATURES = BLOCK_BASE_FEATURES + COLOR_FEATURES\n\n# Code for the colors used in puzzle solving\nGREY = 0\nRED = 1\nGREEN = 2\nBLUE = 3\nNUM_COLORS = 4\n\ndef get_color(one_hot):\n assert(len(one_hot) == NUM_COLORS)\n return np.argmax(one_hot)\n\nclass RolloutStudent:\n\n @store_args\n def __init__(self, make_env, policy, expert, dims, logger, T, rollout_batch_size=1,\n exploit=False, use_target_net=False, compute_Q=False, noise_eps=0,\n random_eps=0, history_len=100, render=False, gamma=None, \n beta_final=None, annealing_coeff=None, **kwargs):\n \"\"\"Rollout worker generates experience by interacting with one or many environments.\n\n Args:\n make_env (function): a factory function that creates a new instance of the environment\n when called\n policy (object): the policy that is used to act\n dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)\n logger (object): the logger that is used by the rollout worker\n rollout_batch_size (int): the number of parallel rollouts that should be used\n exploit (boolean): whether or not to exploit, i.e. to act optimally according to the\n current policy without any exploration\n use_target_net (boolean): whether or not to use the target net for rollouts\n compute_Q (boolean): whether or not to compute the Q values alongside the actions\n noise_eps (float): scale of the additive Gaussian noise\n random_eps (float): probability of selecting a completely random action\n history_len (int): length of history for statistics smoothing\n render (boolean): whether or not to render the rollouts\n \"\"\"\n self.kwargs = self.policy.kwargs\n\n self.envs = [make_env() for _ in range(rollout_batch_size)]\n assert self.T > 0\n\n self.info_keys = [key.replace('info_', '') for key in dims.keys() if key.startswith('info_')]\n\n self.success_history = deque(maxlen=history_len)\n self.Q_history = deque(maxlen=history_len)\n\n self.n_episodes = 0\n self.g = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # goals\n self.initial_o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations\n self.initial_ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals\n self.reset_all_rollouts()\n self.clear_history()\n\n # ------------\n self.gamma = gamma\n self.time = 0.0\n self.beta_final = beta_final\n self.annealing_coeff = annealing_coeff\n self.expert = expert\n # ------------\n\n def reset_rollout(self, i, test=False):\n \"\"\"Resets the `i`-th rollout environment, re-samples a new goal, and updates the `initial_o`\n and `g` arrays accordingly.\n \"\"\"\n obs = self.envs[i].reset()\n if test:\n # Set difficulty to maximum\n obs = self.envs[i].unwrapped.set_test()\n self.initial_o[i] = obs['observation']\n self.initial_ag[i] = obs['achieved_goal']\n self.g[i] = obs['desired_goal']\n\n def reset_all_rollouts(self, test=False):\n \"\"\"Resets all `rollout_batch_size` rollout workers.\n \"\"\"\n for i in range(self.rollout_batch_size):\n self.reset_rollout(i, test)\n\n def increase_difficulty(self):\n max_level = False\n for env in self.envs:\n max_level = env.unwrapped.increase_difficulty()\n if not max_level:\n return self.envs[0].unwrapped.get_difficulty()\n else:\n return None\n\n def trim(self, o, g, ag, dimo, dimg, num_objs=4):\n # No need to trim\n if o.shape[-1] == dimo:\n return o, g, ag\n # If the shapes don't match, it means there are extra blocks\n # we need to get rid of\n\n if len(o.shape) == 1:\n # TODO: Fix this\n assert(False)\n if 'Variation' in self.kwargs['info']['env_name']:\n # The observation includes number of blocks \n # followed by environment features and block features\n o_ = o[1:ENV_FEATURES+1]\n num_blocks = (o.shape[0] - 1 - ENV_FEATURES) // BLOCK_FEATURES\n for i in range(num_blocks):\n start = ENV_FEATURES + 1 + i * BLOCK_FEATURES\n o_ = np.concatenate([o_, o[start:start+BLOCK_BASE_FEATURES]])\n else:\n o_ = o[:dimo]\n \n g_, ag_ = []\n max_num_objs = (int)(len(g) ** 0.5)\n for i in range(len(g)):\n if (i // max_num_objs < num_objs and\n i % max_num_objs < num_objs):\n g_.append(g[i])\n ag_.append(ag[i])\n assert(len(g_) == dimg)\n g_ = np.asarray(g_)\n ag_ = np.asarray(ag_)\n else:\n batch_size = o.shape[0]\n g_ = [[] for _ in range(batch_size)]\n ag_ = [[] for _ in range(batch_size)]\n max_num_objs = (int)(g.shape[1] ** 0.5)\n for i in range(g.shape[1]):\n if (i // max_num_objs < num_objs and\n i % max_num_objs < num_objs):\n for j in range(batch_size):\n g_[j].append(g[j][i])\n ag_[j].append(ag[j][i])\n assert(len(g_[0]) == dimg)\n g_ = np.asarray(g_)\n ag_ = np.asarray(ag_)\n if 'Variation' in self.kwargs['info']['env_name']:\n # The observation includes number of blocks\n # followed by environment features and block features\n o_ = o[:,1:ENV_FEATURES+1]\n num_blocks = num_objs - 2\n max_num_blocks = max_num_objs - 2\n # Only picks the colored blocks\n block_features = np.empty([batch_size, BLOCK_BASE_FEATURES*num_blocks])\n for i in range(batch_size):\n temp = np.zeros(0)\n for j in range(max_num_blocks):\n start = ENV_FEATURES + 1 + j * BLOCK_FEATURES\n color = get_color(o[i, start+BLOCK_BASE_FEATURES:\n start+BLOCK_FEATURES])\n if (color == GREEN or color == BLUE): \n temp = np.concatenate([temp, o[i,start:start+BLOCK_BASE_FEATURES]])\n block_features[i] = temp\n o_ = np.concatenate([o_, block_features], axis=1)\n else:\n o_ = o[:,:dimo]\n assert(o_.shape[1] == dimo)\n return o_, g_, ag_\n\n def generate_rollouts(self, render=False, test=False, exploit=False):\n \"\"\"Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current\n policy acting on it accordingly.\n \"\"\"\n self.reset_all_rollouts(test)\n\n # Annealing\n if self.expert != None:\n beta = self.beta()\n else:\n beta = 0\n\n # compute observations\n o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations\n ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals\n o[:] = self.initial_o\n ag[:] = self.initial_ag\n\n # generate episodes\n obs, achieved_goals, acts, goals, successes, returns, sigmas = [], [], [], [], [], [], []\n info_values = [np.empty((self.T, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]\n for t in range(self.T):\n if np.random.rand() < beta:\n # The expert is in charge\n o_, g_, ag_ = self.trim(o, self.g, ag, self.expert.dimo, self.expert.dimg)\n policy_output = self.expert.get_actions(o_, ag_, g_, compute_raw=True)\n u, raw = policy_output\n else:\n policy_output = self.policy.get_actions(\n o, ag, self.g, exploit=exploit)\n u, raw, sigma = policy_output\n # We can't report sigma accurately when we are using the expert\n if self.expert != None:\n sigma = np.zeros((self.rollout_batch_size, self.dims['u']))\n\n if u.ndim == 1:\n # The non-batched case should still have a reasonable shape.\n u = u.reshape(1, -1)\n raw = raw.reshape(1, -1)\n\n o_new = np.empty((self.rollout_batch_size, self.dims['o']))\n ag_new = np.empty((self.rollout_batch_size, self.dims['g']))\n success = np.zeros(self.rollout_batch_size)\n # --------------\n r_new = np.zeros(self.rollout_batch_size)\n # --------------\n # compute new states and observations\n for i in range(self.rollout_batch_size):\n # print(u[i])\n try:\n # We don't ignore reward here \n # because we need to compute the return\n curr_o_new, r, _, info = self.envs[i].step(u[i])\n if 'is_success' in info:\n success[i] = info['is_success']\n o_new[i] = curr_o_new['observation']\n ag_new[i] = curr_o_new['achieved_goal']\n # --------------\n r_new[i] = r\n # --------------\n for idx, key in enumerate(self.info_keys):\n info_values[idx][t, i] = info[key]\n if render:\n self.envs[i].render()\n except MujocoException as e:\n self.logger.info(str(e))\n self.logger.info('Exception thrown by Mujoco. Giving up on life...')\n assert(False)\n return self.generate_rollouts(render, test)\n\n if np.isnan(o_new).any():\n self.logger.info('NaN caught during rollout generation. Trying again...')\n self.reset_all_rollouts(test)\n return self.generate_rollouts(render, test)\n\n obs.append(o.copy())\n achieved_goals.append(ag.copy())\n successes.append(success.copy())\n acts.append(raw.copy())\n goals.append(self.g.copy())\n sigmas.append(sigma.copy())\n # ---------\n returns.append(r_new.copy())\n for t_ in range(t):\n r_new = r_new.copy()\n returns[t_] += self.gamma ** (t - t_) * r_new\n # ---------\n o[...] = o_new\n ag[...] = ag_new\n obs.append(o.copy())\n achieved_goals.append(ag.copy())\n self.initial_o[:] = o\n\n episode = dict(o=obs,\n u=acts,\n g=goals,\n ag=achieved_goals,\n # --------\n G=returns,\n sigma=sigmas)\n # --------\n for key, value in zip(self.info_keys, info_values):\n episode['info_{}'.format(key)] = value\n\n # stats\n successful = np.array(successes)[-1, :]\n assert successful.shape == (self.rollout_batch_size,)\n success_rate = np.mean(successful)\n\n self.success_history.append(success_rate)\n self.n_episodes += self.rollout_batch_size\n\n return convert_episode_to_batch_major(episode)\n\n def beta(self):\n return (1.0 - self.beta_final) * np.exp(-self.time / self.annealing_coeff) + self.beta_final\n\n def anneal(self):\n self.time += 1.0\n if self.expert != None:\n self.logger.info(\"Beta = {}\".format(self.beta()))\n\n def clear_history(self):\n \"\"\"Clears all histories that are used for statistics\n \"\"\"\n self.success_history.clear()\n self.Q_history.clear()\n\n def current_success_rate(self):\n return np.mean(self.success_history)\n\n def current_mean_Q(self):\n return np.mean(self.Q_history)\n\n def save_policy(self, path):\n \"\"\"Pickles the current policy for later inspection.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.policy, f)\n\n def save_policy_weights(self, path):\n self.policy.save_weights(path)\n\n def logs(self, prefix='worker'):\n \"\"\"Generates a dictionary that contains all collected statistics.\n \"\"\"\n logs = []\n logs += [('success_rate', np.mean(self.success_history))]\n if self.compute_Q:\n logs += [('mean_Q', np.mean(self.Q_history))]\n logs += [('episode', self.n_episodes)]\n\n if prefix is not '' and not prefix.endswith('/'):\n return [(prefix + '/' + key, val) for key, val in logs]\n else:\n return logs\n\n def seed(self, seed):\n \"\"\"Seeds each environment with a distinct seed derived from the passed in global seed.\n \"\"\"\n for idx, env in enumerate(self.envs):\n env.seed(seed + 1000 * idx)\n","sub_path":"gym_blocks/policy_gradient/rollout.py","file_name":"rollout.py","file_ext":"py","file_size_in_byte":13491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"377555073","text":"# -*- coding:utf-8-*-\nimport tensorflow as tf\nimport numpy as np\nimport math\nimport scipy.misc\nimport GANs.tempered_gan.ops as ops\nimport GANs.tempered_gan.utils as utl\n\n# modified from ls_gan\n\n\nclass TemperedGAN(object):\n def __init__(self, da_f, in_s, s, lr, no_s, tr_e, ba_s, di_s):\n self.data_folder = da_f\n self.input_size = in_s\n self.size = s\n self.learning_rate = lr\n self.noise_size = no_s\n self.training_epochs = tr_e\n self.batch_size = ba_s\n self.display_step = di_s\n self.k = 50000\n self.data = None\n self.chunk_size = None\n\n def _load_dataset(self):\n self.data, self.chunk_size = utl.load_img(self.data_folder, self.input_size, self.batch_size)\n\n def _get_batches(self, batch_index):\n batch = self.data[batch_index:batch_index + self.batch_size, :, :, :]\n return batch\n\n @staticmethod\n def _mse_loss(pred, data):\n loss_val = tf.reduce_mean(tf.multiply((pred - data), (pred - data))) / 2\n return loss_val\n\n def generate_net(self, noise, train=True, reuse=False):\n \"\"\"\n :param noise: source noise z\n :param train:\n :param reuse:\n :return:\n \"\"\"\n # layer height, width\n s_h, s_w, _ = self.input_size\n s_h2, s_w2 = utl.get_out_size(s_h, 2), utl.get_out_size(s_w, 2)\n s_h4, s_w4 = utl.get_out_size(s_h2, 2), utl.get_out_size(s_w2, 2)\n s_h8, s_w8 = utl.get_out_size(s_h4, 2), utl.get_out_size(s_w4, 2)\n s_h16, s_w16 = utl.get_out_size(s_h8, 2), utl.get_out_size(s_w8, 2)\n with tf.variable_scope('generator', reuse=reuse):\n # AttributeError: 'tuple' object has no attribute 'as_list\n z = ops.full_connect(noise, output_num=self.size * 8 * s_h16 * s_w16, name='g_full', reuse=reuse)\n # reshape [batch_size, h, w, c]\n h0 = tf.reshape(z, [-1, s_h16, s_w16, self.size * 8])\n h0 = ops.batch_normalizer(h0, train=train, name='g_bn0', reuse=reuse)\n h0 = ops.lrelu(h0, name='g_l0')\n\n h1 = ops.deconv2d(h0, output_size=[self.batch_size, s_h8, s_w8, self.size * 4], name='g_h1', reuse=reuse)\n h1 = ops.batch_normalizer(h1, train=train, name='g_bn1', reuse=reuse)\n h1 = ops.lrelu(h1, name='g_l1')\n\n h2 = ops.deconv2d(h1, output_size=[self.batch_size, s_h4, s_w4, self.size * 2], name='g_h2', reuse=reuse)\n h2 = ops.batch_normalizer(h2, train=train, name='g_bn2', reuse=reuse)\n h2 = ops.lrelu(h2, name='g_l2')\n\n h3 = ops.deconv2d(h2, output_size=[self.batch_size, s_h2, s_w2, self.size * 1], name='g_h3', reuse=reuse)\n h3 = ops.batch_normalizer(h3, train=train, name='g_bn3', reuse=reuse)\n h3 = ops.lrelu(h3, name='g_l3')\n\n h4 = ops.deconv2d(h3, output_size=[self.batch_size, ] + self.input_size, name='g_h4', reuse=reuse)\n x_generate = tf.nn.tanh(h4, name='g_t4')\n return x_generate\n\n def discriminator_net(self, lx, reuse=False):\n \"\"\"\n :param lx: the images from lens\n :param reuse:\n :return:\n \"\"\"\n # layer height, width\n s_h, s_w, _ = self.input_size\n s_h2, s_w2 = utl.get_out_size(s_h, 2), utl.get_out_size(s_w, 2)\n s_h4, s_w4 = utl.get_out_size(s_h2, 2), utl.get_out_size(s_w2, 2)\n s_h8, s_w8 = utl.get_out_size(s_h4, 2), utl.get_out_size(s_w4, 2)\n s_h16, s_w16 = utl.get_out_size(s_h8, 2), utl.get_out_size(s_w8, 2)\n with tf.variable_scope('discriminator', reuse=reuse):\n h0 = ops.conv2d(lx, output_num=self.size, name='d_h0', reuse=reuse)\n h0 = ops.lrelu(h0, name='d_l0')\n\n h1 = ops.conv2d(h0, output_num=self.size * 2, name='d_h1', reuse=reuse)\n h1 = ops.batch_normalizer(h1, name='d_bn1', reuse=reuse)\n h1 = ops.lrelu(h1, name='d_l1')\n\n h2 = ops.conv2d(h1, output_num=self.size * 4, name='d_h2', reuse=reuse)\n h2 = ops.batch_normalizer(h2, name='d_bn2', reuse=reuse)\n h2 = ops.lrelu(h2, name='d_l2')\n\n h3 = ops.conv2d(h2, output_num=self.size * 8, name='d_h3', reuse=reuse)\n h3 = ops.batch_normalizer(h3, name='d_bn3', reuse=reuse)\n h3 = ops.lrelu(h3, name='d_l3')\n\n h4 = tf.reshape(h3, [self.batch_size, s_h16 * s_w16 * self.size * 8])\n\n h4 = ops.full_connect(h4, output_num=1, name='d_full', reuse=reuse)\n return h4\n\n def lens_net(self, x, reuse=False):\n \"\"\"\n :param x: input real data x\n :param reuse:\n :return:lens x: lx\n \"\"\"\n with tf.variable_scope('lens', reuse=reuse):\n h0 = ops.conv2d(x, output_num=self.size, stride=1, filter_size=3, name='l_h0')\n h0 = ops.lrelu(h0, name='l_l0')\n\n h1 = ops.res_block3_3(h0, name='l_res_1', reuse=reuse)\n h2 = ops.res_block3_3(h1, name='l_res_2', reuse=reuse)\n\n h3 = ops.conv2d(h2, output_num=3, stride=1, filter_size=3, name='l_h4')\n h3 = ops.lrelu(h3, leak=0.4, name='l_l4')\n h3 = h3 + x\n return h3\n\n def start_train(self):\n self._load_dataset()\n with tf.name_scope('inputs'):\n real_imgs = tf.placeholder(tf.float32, [None, ] + self.input_size, name='real_images')\n noise_imgs = tf.placeholder(tf.float32, [None, self.noise_size], name='noise_images')\n lamda = tf.placeholder(tf.float32, name='lamda')\n\n fake_imgs = self.generate_net(noise_imgs, train=True, reuse=False)\n lens_imgs = self.lens_net(real_imgs, reuse=False)\n\n lens_logits = self.discriminator_net(lens_imgs, reuse=False)\n fake_logits = self.discriminator_net(fake_imgs, reuse=True)\n\n with tf.name_scope('loss'):\n g_loss = self._mse_loss(fake_logits, tf.ones_like(fake_logits))\n l_loss_a = self._mse_loss(lens_logits, tf.zeros_like(lens_logits))\n l_loss_r = self._mse_loss(real_imgs, lens_imgs)\n l_loss = lamda * l_loss_a + l_loss_r\n\n d_fake_loss = self._mse_loss(fake_logits, tf.zeros_like(fake_logits))\n d_lens_loss = self._mse_loss(lens_logits, tf.ones_like(lens_logits))\n d_loss = d_fake_loss + d_lens_loss\n\n tf.summary.scalar('g_loss', g_loss)\n tf.summary.scalar('l_loss', l_loss)\n tf.summary.scalar('l_lamda', lamda)\n tf.summary.scalar('d_fake_loss', d_fake_loss)\n tf.summary.scalar('d_real_loss', d_lens_loss)\n tf.summary.scalar('d_loss', d_loss)\n with tf.name_scope('optimizer'):\n train_vars = tf.trainable_variables()\n gen_vars = [var for var in train_vars if var.name.startswith('generator')]\n dis_vars = [var for var in train_vars if var.name.startswith('discriminator')]\n lens_vars = [var for var in train_vars if var.name.startswith('lens')]\n # global_step = tf.Variable(0, trainable=False)\n # rate = tf.train.exponential_decay(self.learning_rate, global_step, 1024 * 4, 0.80, staircase=True)\n # rate = tf.maximum(rate, 0.00001)\n # d_trainer = tf.train.AdamOptimizer(rate, beta1=0.5).minimize(d_loss, var_list=dis_vars,\n # global_step=global_step)\n # g_trainer = tf.train.AdamOptimizer(rate, beta1=0.5).minimize(g_loss, var_list=gen_vars,\n # global_step=global_step)\n # g_trainer = tf.train.AdamOptimizer(rate, beta1=0.5).minimize(g_loss, var_list=gen_vars,\n # global_step=global_step)\n d_trainer = tf.train.AdamOptimizer(self.learning_rate * 2, beta1=0.0).minimize(d_loss, var_list=dis_vars)\n g_trainer = tf.train.AdamOptimizer(self.learning_rate * 4, beta1=0.0).minimize(g_loss, var_list=gen_vars)\n l_trainer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.0).minimize(l_loss, var_list=lens_vars)\n with tf.Session() as sess:\n saver = tf.train.Saver()\n # merge summary\n merged = tf.summary.merge_all()\n # choose dir\n writer = tf.summary.FileWriter('/home/ziyangcheng/python_save_file/temp_gan/tf_board', sess.graph)\n batch_index = 0 # init index\n cur_lamda = 1\n sess.run(tf.global_variables_initializer())\n for e in range(self.training_epochs):\n for batch_i in range(self.chunk_size):\n batch_data = self._get_batches(batch_index)\n batch_index = (batch_index + self.batch_size) % ((self.chunk_size - 1) * self.batch_size)\n\n # noise\n noise = np.random.uniform(-1.0, 1.0, size=(self.batch_size, self.noise_size)).astype(np.float32)\n\n if (self.chunk_size * e + batch_i) <= self.k:\n cur_lamda = 1 - np.sin(((self.chunk_size * e + batch_i) * math.pi) / (2 * self.k))\n else:\n cur_lamda = 0\n\n # Run optimizers\n sess.run(d_trainer, feed_dict={real_imgs: batch_data, noise_imgs: noise})\n sess.run(g_trainer, feed_dict={noise_imgs: noise})\n check_imgs, whatever, _ = sess.run([fake_imgs, lens_imgs, l_trainer],\n feed_dict={noise_imgs: noise, real_imgs: batch_data,\n lamda: cur_lamda})\n\n if (self.chunk_size * e + batch_i) % self.display_step == 0:\n train_loss_d = sess.run(d_loss, feed_dict={real_imgs: batch_data, noise_imgs: noise})\n fake_loss_d = sess.run(d_fake_loss, feed_dict={noise_imgs: noise})\n lens_loss_d = sess.run(d_lens_loss, feed_dict={real_imgs: batch_data})\n # generator loss\n train_loss_g = sess.run(g_loss, feed_dict={noise_imgs: noise})\n # lens loss\n train_loss_l = sess.run(l_loss, feed_dict={real_imgs: batch_data, lamda: cur_lamda})\n\n merge_result = sess.run(merged,\n feed_dict={real_imgs: batch_data, noise_imgs: noise, lamda: cur_lamda})\n writer.add_summary(merge_result, self.chunk_size * e + batch_i)\n\n print(\n \"step {}/of epoch {}/{}...\".format(self.chunk_size * e + batch_i, e, self.training_epochs),\n \"Discriminator Loss: {:.4f}(Real: {:.4f} + Fake: {:.4f})...\".format(\n train_loss_d, lens_loss_d, fake_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g),\n \"Lens Loss: {:.4f}\".format(train_loss_l), \"cur_lamda: {:.4f}\".format(cur_lamda))\n\n # save pic\n scipy.misc.imsave('/home/ziyangcheng/python_save_file/temp_gan/output/train/' +\n str(self.chunk_size * e + batch_i) + '-' + str(0) + 'train.png',\n check_imgs[0])\n\n scipy.misc.imsave('/home/ziyangcheng/python_save_file/temp_gan/output/train/' +\n str(self.chunk_size * e + batch_i) + '-' + str(0) + 'lens.png',\n whatever[0])\n\n print('train done')\n # save sess\n saver.save(sess, '/home/ziyangcheng/python_save_file/temp_gan/save_model/2/temp_gan.ckpt')\n\n def generate(self, num):\n noise_imgs = tf.placeholder(tf.float32, [None, self.noise_size], name='noise_images')\n sample_imgs = self.generate_net(noise_imgs, train=False, reuse=tf.AUTO_REUSE)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, '/home/ziyangcheng/python_save_file/temp_gan/save_model/2/temp_gan.ckpt')\n sample_noise = np.random.uniform(-1.0, 1.0, size=(num, self.noise_size)).astype(np.float32)\n n_batch = num // self.batch_size\n for j in range(n_batch):\n samples = sess.run(sample_imgs,\n feed_dict={noise_imgs: sample_noise[(j * self.batch_size):((j + 1) * self.batch_size)]})\n for i in range(len(samples)):\n print('index', j * self.batch_size + i)\n scipy.misc.imsave(\n '/home/ziyangcheng/python_save_file/temp_gan/output/generate/2/' + str(\n j * self.batch_size + i) + 'generate.png', samples[i])\n print('generate done!')\n\n\nif __name__ == '__main__':\n data_folder = '/home/ziyangcheng/datasets/faces'\n a_temp_gan = TemperedGAN(data_folder, [96, 96, 3], 32, 0.0001, 1024, 45, 32, 512)\n a_temp_gan.start_train()\n # a_temp_gan.generate(100)\n","sub_path":"temp_gan.py","file_name":"temp_gan.py","file_ext":"py","file_size_in_byte":13200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"295787208","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread(\"scene.jpg\")\nimg2 = cv2.imread(\"case.jpg\")\n\norb = cv2.ORB_create()\nkeypoints, des = orb.detectAndCompute(img, None)\nkeypoints2, des2 = orb.detectAndCompute(img2, None)\n\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)\n\nmatch = bf.match(des, des2)\nmatch = sorted(match, key = lambda x:x.distance)\n\nimg3 = cv2.drawMatches(img, keypoints, img2, keypoints2, match[:100], None, flags = 2)\nplt.imshow(img3)\nplt.show()\n","sub_path":"featurematch.py","file_name":"featurematch.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"83798513","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n fal_title = fields.Char(\"Title\")\n fal_attachment = fields.Binary(\n string='Customer PO Attachment', filestore=True)\n fal_attachment_name = fields.Char(string='Attachment name')\n fal_partner_contact_person_id = fields.Many2one(\n 'res.partner',\n 'Contact Person'\n )\n\n # sale archive\n active = fields.Boolean(\n 'Active', default=True,\n help=\"If unchecked, it will allow you to hide\\\n the Sale Order without removing it.\")\n\n @api.onchange('partner_id', 'company_id')\n def onchange_partner_id(self):\n res = super(SaleOrder, self).\\\n onchange_partner_id()\n partner = self.partner_id\n self.fal_partner_contact_person_id = partner.child_ids and \\\n partner.child_ids[0].id or False\n return res\n","sub_path":"fal_sale_additional_info/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"433505420","text":"import numpy as np\r\nimport sympy as sp\r\nfrom sympy.interactive import printing\r\nprinting.init_printing(use_latex=True)\r\nfrom sympy import Eq,solve_linear_system,Matrix, symbols, solve,dsolve\r\nfrom numpy import linalg\r\n\r\nx, y = symbols('x y')\r\neq1=sp.Function('eq1')\r\neq2=sp.Function('eq2')\r\neq0 = Eq(x + y - 5)\r\neq3 = Eq(x - y + 3)\r\nsol_dict = solve((eq0,eq3), (x, y))\r\n\r\neq1=Eq(2*x-y,-4)\r\neq2=Eq(3*x-1,-2)\r\n\r\nrow1=[2,-1,-4]\r\nrow2=[3,-1,-2]\r\nnrow1=[2,-1]\r\nnrow2=[3,-1]\r\nx=sp.symbols('x')\r\nf=sp.Function('f')(x)\r\ndiffeq=Eq(f.diff(x,x)-5*f,0)\r\n\r\nmat=np.array([nrow1,nrow2])\r\nconst=np.array([-4,-2])\r\nansw=linalg.solve(mat,const)\r\n\r\nsystem=Matrix((row1,row2))\r\na=solve_linear_system(system,x,y)\r\nprint(sol_dict,a)\r\nprint(mat)\r\nprint(answ)\r\nprint(diffeq)\r\n\r\n\r\n\r\n","sub_path":"Bases/Section2.py","file_name":"Section2.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"550582949","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.inicio, name='inicio'),\n path('quienes_somos/', views.quienes_somos, name='quienes_somos'),\n path('galeria/', views.galeria, name='galeria'),\n path('formulario/', views.formulario, name='formulario'),\n path('post_list', views.post_list, name='post_list'),\n]","sub_path":"adoptame/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"560221144","text":"import copy\n\nfrom recipe_transformer.HTMLScraper import HTMLScraper\nfrom recipe_transformer.RecipeParser import RecipeParser\nfrom recipe_transformer.models.Ingredient import Ingredient\nfrom recipe_transformer.models.Method import Method\nfrom recipe_transformer.models.Step import Step\nfrom recipe_transformer.transformers.Transformer import Transformer\n\n\nclass ItalianTransformer(Transformer):\n def __init__(self, recipe):\n super().__init__(recipe)\n\n def apply_transformation(self, direction):\n \"\"\"\n To convert to Italian:\n Replace a spice with oregano\n Use olive oil instead of other oil/butter\n Make fresh pasta, cook it, and use the original recipe as a topping\n \"\"\"\n self.transformed_recipe = copy.deepcopy(self.recipe)\n\n # Replace butter/oil with olive oil\n for ingredient in self.transformed_recipe.ingredients:\n if \"oil\" in ingredient.name:\n for step in self.transformed_recipe.steps:\n if ingredient.name in step.sentence:\n step.sentence = step.sentence.replace(\"oil\", \"olive oil\")\n for descriptor in ingredient.descriptors:\n step.sentence = step.sentence.replace(descriptor, \"\")\n for preparation in ingredient.preparations:\n step.sentence = step.sentence.replace(preparation, \"\")\n\n ingredient.name = \"olive oil\"\n ingredient.descriptors = [\"extra-virgin\"]\n ingredient.preparations = []\n\n # Replace a spice with oregano\n for ingredient in self.transformed_recipe.ingredients:\n if ingredient.category == 'spice':\n for step in self.transformed_recipe.steps:\n direction = step.sentence\n if ingredient.name in direction:\n step.sentence = direction.replace(ingredient.name, \"dried oregano\")\n for descriptor in ingredient.descriptors:\n step.sentence = direction.replace(descriptor, \"\")\n for preparation in ingredient.preparations:\n step.sentence = direction.replace(preparation, \"\")\n\n ingredient.name = \"oregano\"\n ingredient.descriptors = [\"dried\"]\n ingredient.preparations = []\n break\n\n # Merge in pasta ingredients/tools/methods/steps\n pasta_recipe = self._get_pasta_recipe()\n self.transformed_recipe.add_ingredients(pasta_recipe.ingredients)\n self.transformed_recipe.add_tools(pasta_recipe.tools)\n self.transformed_recipe.add_methods(pasta_recipe.other_methods)\n self.transformed_recipe.add_steps(pasta_recipe.steps)\n\n # Combine the original recipe into the pasta\n combine_recipes = Step([Ingredient(self.transformed_recipe.name, 1, \"recipe\")], [Method(\"combine\")], [], \"\", \"Stir {0} into the pasta.\".format(\n self.transformed_recipe.name))\n self.transformed_recipe.add_step(combine_recipes)\n\n # Top with parmesan\n parmesan = Ingredient(\"parmesan cheese\", 16, \"oz\", [\"fresh\"], [\"grated\"])\n self.transformed_recipe.add_ingredient(parmesan)\n top_with_parmesan = Step([parmesan], [Method(\"top\")], [], \"\", \"Add grated parmesan to taste.\")\n self.transformed_recipe.add_step(top_with_parmesan)\n\n @staticmethod\n def _get_pasta_recipe():\n pasta_scraper = HTMLScraper(\n \"https://www.allrecipes.com/recipe/23703/plain-pasta/?internalSource=hub%20recipe&referringContentType=search%20results&clickId=cardslot%202\")\n pasta_parser = RecipeParser(pasta_scraper.html)\n pasta_recipe = pasta_parser.recipe\n pasta_recipe.add_step(Step([Ingredient(\"pasta\", 1, \"recipe\")], [], [Method(\"boil\")], \"3 minutes\", \"Boil the fresh pasta in a pot of salted water until \"\n \"al dente, \"\n \"roughly 3 minutes.\"))\n return pasta_recipe\n","sub_path":"recipe_transformer/transformers/ItalianTransformer.py","file_name":"ItalianTransformer.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"618175263","text":"import tablib\nimport toolz\nimport pytorch_lightning\nimport typing\nimport logging \n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"Tabular\"]\n\nclass Tabular(pytorch_lightning.loggers.base.LightningLoggerBase):\n def __init__(self,\n name: str=\"default\",\n version: int=0,\n ):\n super(Tabular, self).__init__() \n self._version = version\n self.file_name = name\n self.train_logs = tablib.Dataset()\n self.val_logs = tablib.Dataset()\n self.test_logs = tablib.Dataset()\n self.train_headers_written = False\n self.val_headers_written = False\n self.test_headers_written = False\n\n @property\n def name(self) -> str:\n return self.file_name\n\n def _append_train_losses(self,\n metrics: typing.Dict[str, typing.Any],\n epoch: int,\n step: int,\n ) -> None:\n loss = metrics['total_loss']\n train_metrics = toolz.dissoc(metrics, 'train', 'epoch', 'total_loss')\n if self.train_logs.headers is None and not self.train_headers_written:\n self.train_logs.headers = list(toolz.concat([\n [str('epoch'), str('iteration'), str('total_loss')],\n [k for k in train_metrics.keys()]\n ]))\n self.train_logs.append(list(\n toolz.concat([\n [epoch, step, loss],\n train_metrics.values()\n ])\n ))\n \n def _append_val_loss(self, \n metrics: typing.Dict[str, typing.Any],\n epoch: int,\n step: int,\n ) -> None:\n if self.val_logs.headers is None and not self.val_headers_written:\n self.val_logs.headers = list(toolz.concat([\n [str('epoch'), str('iteration')],\n [k for k in metrics.keys()]\n ]))\n self.val_headers = self.val_logs.headers\n self.val_logs.append(list(\n toolz.concat([\n [epoch, step],\n [metrics[k] for k in self.val_headers if k in metrics]\n ])\n ))\n\n def _append_test_metrics(self, \n metrics: typing.Dict[str, typing.Any],\n step: int,\n ) -> None:\n if self.test_logs.headers is None and not self.test_headers_written:\n self.test_logs.headers = list(toolz.concat([\n [str('iteration')],\n [k for k in metrics.keys()]\n ]))\n self.test_headers = self.test_logs.headers\n self.test_logs.append(list(\n toolz.concat([\n [step],\n [metrics[k] for k in self.test_headers if k in metrics]\n ])\n ))\n\n @pytorch_lightning.loggers.base.rank_zero_only\n def log_metrics(self, \n metrics: typing.Dict[str, typing.Any],\n step: int\n ) -> None:\n train_metrics = toolz.keymap(lambda k: k.replace('train_', ''), \n toolz.keyfilter(lambda k: k.startswith('train_'), metrics)\n )\n val_metrics = toolz.keymap(lambda k: k.replace('val_', ''), \n toolz.keyfilter(lambda k: k.startswith('val_'), metrics)\n )\n test_metrics = toolz.keymap(lambda k: k.replace('test_', '').replace('/epoch_0', ''), \n toolz.keyfilter(lambda k: k.startswith('test_'), metrics)\n )\n if train_metrics:\n self._append_train_losses(\n toolz.assoc(train_metrics, 'total_loss', metrics['total_loss']), \n metrics['epoch'],\n step\n )\n elif test_metrics: \n self._append_test_metrics(test_metrics, step)\n return\n if val_metrics:\n self._append_val_loss(val_metrics, metrics['epoch'], step)\n\n @pytorch_lightning.loggers.base.rank_zero_only\n def log_hyperparams(self,\n params: typing.Dict[str, typing.Any] #TODO or namespace object ?\n ) -> None:\n \"\"\"Record hyperparameters\n :param params: argparse.Namespace containing the hyperparameters\n \"\"\"\n data = tablib.Dataset()\n data.headers = [k for k in dict(params).keys()]\n data.append([v for v in dict(params).values()])\n with open(self.name + \"_hparams.yaml\", 'w') as f:\n f.write(data.export('yaml'))\n\n @pytorch_lightning.loggers.base.rank_zero_only\n def save(self) -> None:\n if self.train_logs.height:\n \"\"\"Save train log data\"\"\"\n with open(self.name + \"_train.csv\", 'a', newline='') as f:\n f.write(self.train_logs.export('csv'))\n if not self.train_headers_written and self.train_logs.headers is not None:\n self.train_headers_written = True\n self.train_logs.wipe() \n if self.val_logs.height:\n \"\"\"Save val log data\"\"\"\n with open(self.name + \"_val.csv\", 'a', newline='') as f:\n f.write(self.val_logs.export('csv'))\n if not self.val_headers_written and self.val_logs.headers is not None:\n self.val_headers_written = True\n self.val_logs.wipe()\n if self.test_logs.height:\n \"\"\"Save val log data\"\"\"\n with open(self.name + \"_test.csv\", 'a', newline='') as f:\n f.write(self.test_logs.export('csv'))\n if not self.test_headers_written and self.test_logs.headers is not None:\n self.test_headers_written = True\n self.test_logs.wipe()\n\n @pytorch_lightning.loggers.base.rank_zero_only\n def finalize(self, \n status: str\n ) -> None:\n \"\"\"Do any processing that is necessary to finalize an experiment\n :param status: Status that the experiment finished with (e.g. success, failed, aborted)\n \"\"\"\n self.save()\n self.close()\n\n @property\n def rank(self) -> int:\n \"\"\"\n Process rank. In general, metrics should only be logged by the process\n with rank 0\n \"\"\"\n return self._rank\n\n @rank.setter\n def rank(self, value: int) -> None:\n \"\"\"Set the process rank\"\"\"\n self._rank = value\n\n @property\n def version(self) -> int:\n \"\"\"Return the experiment version\"\"\"\n return self._version\n\n @property\n def experiment(self) -> typing.Any:\n return self.name","sub_path":"moai/log/lightning/loggers/tabular.py","file_name":"tabular.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"40439321","text":"'''\nCalculates the sky transmission and emission at the observed lambda\nand computes the ADR if requested\n'''\nimport os\nimport logging\n\nimport numpy as np\n\nfrom config import *\nfrom modules.misc_utils import path_setup\nfrom modules.rebin import *\nfrom modules.adr import apply_adr\n\nimport matplotlib.pylab as plt\n\nbgpath = path_setup('../../' + config_data[\"data_dir\"] + 'sky/')\n\n\ndef sky_background(lambs, air_mass, dit, debug_plots, output_file):\n\t'''Function that generates a sky background curve combining\n\tsky continuum, sky thermal emission and sky emission lines.\n\t\n\tInputs:\n\t\tlambs: array of wavelengths for datacube\n\t\tair_mass: Air mass of the observation\n\n\t\tdit: exposure time [s]. This determins how the sky emission\n\t\tline amplitudes vary through the exposure.\n\t\t\n\tOutputs:\n\t\tsky_radiance: array of total sky background for DIT\n\t\t\t[units of photons/m^2/um/arcsec^2]\n\t'''\n\tinbuilt_airmasses = [1.1, 1.3, 1.5, 2.0]\n\tif air_mass not in inbuilt_airmasses:\n\t\traise HSIMError('Error: ' + str(air_mass) + ' is not a valid air_mass. Valid options are: ' + \",\".join([str(_) for _ in inbuilt_airmasses]))\n\n\n\t#determine the closest data to the airmass value given and find it's location in the data file\n\tclosest_X = min(inbuilt_airmasses, key=lambda x:abs(x - air_mass))\n\tdata_index = inbuilt_airmasses.index(closest_X) + 1\n\n\t#load sky transmission & extinction files, then reduce to the columns required\n\tsky_em_all_X = np.genfromtxt(os.path.join(bgpath, 'radiance.txt'), delimiter=',')\n\t\n\tsky_em_lambda = sky_em_all_X[:,0]\n\tsky_em_flux = sky_em_all_X[:,data_index]\n\n\t# rebin sky emission\n\tsky_radiance = dit*rebin1d(lambs, sky_em_lambda, sky_em_flux)\n\t\t\n\tif debug_plots:\n\t\tplt.clf()\n\t\tmask_plot = (sky_em_lambda > lambs[0])*(sky_em_lambda < lambs[-1])\n\t\tplt.plot(sky_em_lambda[mask_plot], dit*sky_em_flux[mask_plot], label=\"Skycalc 0.15A\")\n\t\tplt.plot(lambs, sky_radiance, label=\"rebin\")\n\t\tplt.legend()\n\t\tplt.xlabel(r\"wavelength [$\\mu$m]\")\n\t\tplt.ylabel(r\"sky emission [photons/m$^2$/$\\mu$m/arcsec$^2$]\")\n\t\tplt.savefig(output_file + \"_sky_em.pdf\")\n\t\tnp.savetxt(output_file + \"_sky_em.txt\", np.c_[lambs, sky_radiance])\n\n\t\n\treturn sky_radiance\n\n\ndef moon_background(lambs, moon, dit, debug_plots, output_file):\n\t'''Function that generates a moon background curve\n\t\n\tInputs:\n\t\tlambs: array of wavelengths for datacube\n\t\tmoon: Fractional moon illumination\n\n\t\tdit: exposure time [s]. This determins how the sky emission\n\t\t\n\tOutputs:\n\t\tsky_radiance: array of total sky background for DIT\n\t\t\t[units of photons/m^2/um/arcsec^2]\n\t'''\n\t\n\tif moon not in [0., 0.5, 1.0]:\n\t\traise HSIMError('Error: ' + str(moon) + ' is not a valid Moon illumination. Valid options are: 0, 0.5, 1.0')\n\t\n\tif moon > 0.:\n\t\tinbuilt_moon = [0.5, 1.0]\n\n\t\t#determine the closest data to the airmass value given and find it's location in the data file\n\t\tclosest_X = min(inbuilt_moon, key=lambda x:abs(x - moon))\n\t\tdata_index = inbuilt_moon.index(closest_X) + 1\n\n\t\t#load sky transmission & extinction files, then reduce to the columns required\n\t\tmoon_em_all_X = np.genfromtxt(os.path.join(bgpath, 'moon.txt'), delimiter=',')\n\t\t\n\t\tmoon_em_lambda = moon_em_all_X[:,0]\n\t\tmoon_em_flux = moon_em_all_X[:,data_index]\n\n\t\t# rebin moon emission\n\t\tmoon_radiance = dit*rebin1d(lambs, moon_em_lambda, moon_em_flux)\n\telse:\n\t\tmoon_em_lambda = lambs\n\t\tmoon_em_flux = lambs*0.\n\t\tmoon_radiance = moon_em_flux\n\t\n\tif debug_plots:\n\t\tplt.clf()\n\t\tmask_plot = (moon_em_lambda > lambs[0])*(moon_em_lambda < lambs[-1])\n\t\tplt.plot(moon_em_lambda[mask_plot], dit*moon_em_flux[mask_plot], label=\"Skycalc 0.15A\")\n\t\tplt.plot(lambs, moon_radiance, label=\"rebin\")\n\t\tplt.legend()\n\t\tplt.xlabel(r\"wavelength [$\\mu$m]\")\n\t\tplt.ylabel(r\"moon emission [photons/m$^2$/$\\mu$m/arcsec$^2$]\")\n\t\tplt.savefig(output_file + \"_moon_em.pdf\")\n\t\tnp.savetxt(output_file + \"_moon_em.txt\", np.c_[lambs, moon_radiance])\n\n\treturn moon_radiance\n\n\n#Sky throughput curve generated just using wavelength array.\ndef sky_transmission(lambs, air_mass, debug_plots, output_file):\n\t'''Function that generates a full throughput curve combining\n\tsky transmission & sky extinctionp.\n\n\tInputs:\n\t\tlambs: array of wavelengths for datacube\n\t\tair_mass: Air mass of the observation\n\n\tOutputs:\n\t\tcube_total_sky_trans: array of total throughput\n\t\t\tfor each wavelength value in lambs\n\t'''\n\t#convert from zenith angle to airmass\n\tinbuilt_airmasses = [1.1, 1.3, 1.5, 2.0]\n\tif air_mass not in inbuilt_airmasses:\n\t\traise HSIMError('Error: ' + str(air_mass) + ' is not a valid air_mass. Valid options are: ' + \",\".join([str(_) for _ in inbuilt_airmasses]))\n\n\t#determine the closest data to the airmass value given and find it's location in the data file\n\tclosest_X = min(inbuilt_airmasses, key=lambda x:abs(x - air_mass))\n\tdata_index = inbuilt_airmasses.index(closest_X) + 1\n\n\t#load sky transmission & extinction files, then reduce to the columns required\n\tsky_trans_all_X = np.genfromtxt(os.path.join(bgpath, 'transmission.txt'), delimiter=',')\n\t\n\tsky_tr_lambda = sky_trans_all_X[:,0]\n\tsky_tr = sky_trans_all_X[:,data_index]\n\n\tfinal_tr = rebin1d(lambs, sky_tr_lambda, sky_tr)\n\n\tif debug_plots:\n\t\tplt.clf()\n\t\tmask_plot = (sky_tr_lambda > lambs[0])*(sky_tr_lambda < lambs[-1])\n\t\tplt.plot(sky_tr_lambda[mask_plot], sky_tr[mask_plot], label=\"Skycalc 0.15A\")\n\t\tplt.plot(lambs, final_tr, label=\"rebin\")\n\t\tplt.legend()\n\t\tplt.xlabel(r\"wavelength [$\\mu$m]\")\n\t\tplt.ylabel(r\"sky transmission\")\n\t\tplt.savefig(output_file + \"_sky_tr.pdf\")\n\t\tnp.savetxt(output_file + \"_sky_tr.txt\", np.c_[lambs, final_tr])\n\n\t\n\treturn final_tr\n\t\n\n\n\ndef sim_sky(cube, back_emission, header, ext_lambs, cube_lamb_mask, DIT, air_mass, moon, site_temp, adr_switch, debug_plots=False, output_file=\"\"):\n\t''' Simulates sky effects\n\tInputs:\n\t\tcube: Input datacube (RA, DEC, lambda)\n\t\tback_emission: Input background emission outside of the FoV\n\t\theader: FITS header\n\t\text_lambs: extended lambda array [um]\n\t\tcube_lamb_mask: mask array to get the lambs of the cube\n\t\tDIT: Exposure time [s]\n\t\tair_mass: Air mass of the observation\n\t\tmoon: Fractional moon illumination\n\t\tsite_temp: Telescope temperature [K]\n\t\tadr_switch: Boolean - turn ADR on or off\n\t\tdebug_plots: Produce debug plots\n\t\toutput_file: File name for debug plots\n\tOutputs:\n\t\tcube: Cube including sky emission, transmission and ADR\n\t\tback_emission: back_emission including sky\n\t'''\n\t\n\t# Get sky transmission\n\tlogging.info(\"Calculating sky transmission\")\n\tsky_trans = sky_transmission(ext_lambs, air_mass, debug_plots, output_file)\n\t\n\t# Get sky emission (lines + continuum)\n\tlogging.info(\"Calculating sky emission\")\n\tsky_emission = sky_background(ext_lambs, air_mass, DIT, debug_plots, output_file)\n\t\n\t# Get moon emission\n\tlogging.info(\"Calculating Moon emission\")\n\tmoon_emission = moon_background(ext_lambs, moon, DIT, debug_plots, output_file)\n\tback_emission = back_emission + sky_emission + moon_emission\n\t\n\n\t# Add sky emission/transmission to the input cube\n\tsky_trans_cube = sky_trans[cube_lamb_mask]\n\tsky_trans_cube.shape = (np.sum(cube_lamb_mask),1,1)\n\tcube *= sky_trans_cube\n\n\tsky_emission_cube = sky_emission[cube_lamb_mask] + moon_emission[cube_lamb_mask]\n\tsky_emission_cube.shape = (np.sum(cube_lamb_mask),1,1)\n\tcube += sky_emission_cube\n\t\n\t# Add atmospheric differential refration\n\tif adr_switch == \"True\":\n\t\tlogging.info(\"Calculating ADR\")\n\t\tlambs = ext_lambs[cube_lamb_mask]\n\t\tcube = apply_adr(cube, header, lambs, site_temp, air_mass, debug_plots=False, output_file=output_file)\n\t\t\n\t\t\n\treturn cube, back_emission\n\n","sub_path":"hsim/src/sim_sky.py","file_name":"sim_sky.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"348407773","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 29 19:13:45 2021\r\n\r\n@author: HP\r\n\"\"\"\r\n'''\r\nfast solution\r\n\r\nfrom collections import Counter\r\n\r\ndef isValid(s):\r\n c = Counter(Counter(s).values())\r\n if len(c)==1:\r\n return \"YES\"\r\n if len(c)>2:\r\n return \"NO\"\r\n if 1 in c.values() and (c[min(c.keys())]==1 or (max(c.keys()) - min(c.keys())==1)):\r\n return \"YES\"\r\n else:\r\n return \"NO\"\r\n\r\n'''\r\n\r\ndef isValid(s):\r\n b = set(s)\r\n lst=[]\r\n if len(b)==len(s):\r\n return \"YES\"\r\n for i in b:\r\n lst.append(s.count(i))\r\n myset= set(lst)\r\n mylst=[i for i in myset]\r\n if len(mylst)>2:\r\n return \"NO\"\r\n else:\r\n x = mylst[0]\r\n y = mylst[1]\r\n if x+1==y:\r\n if (lst.count(x)==1 or lst.count(y)==1):\r\n return \"YES\"\r\n else:\r\n return \"NO\"\r\n else:\r\n if x==1 and lst.count(x)==1:\r\n return \"YES\"\r\n else:\r\n return \"NO\"\r\n \r\nprint(isValid('aabbccddeefghi'))\r\nprint(isValid('aaaaabc'))\r\nprint(isValid(\"a\"))","sub_path":"sherlockAnd_string_validation.py","file_name":"sherlockAnd_string_validation.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"544162756","text":"'''\n BME280で取得してデータを指定したローカルMQTTサーバ(ブリッジ)\n に対して1回パブリッシュする。\n\n'''\nimport sys, os, re\nimport json\n\nimport argparse\n\nimport paho.mqtt.client as mqtt # MQTTのライブラリをインポート\nfrom time import sleep # 3秒間のウェイトのために使う\n\nimport bme280_sample as BME280 # スイッチサイエンスのサンプルを修正したもの\n\nSCRIPT_NAME = os.path.basename(__file__)\nCLIENT_ID = os.uname()[1] + \"_\" + SCRIPT_NAME # クライアントID(ユニークでなければならないので注意)\nMQTT_HOST = \"\"\nMQTT_PORT = 1883\nKEEP_ALIVE = 60\nTOPIC = \"\"\nQOS = 1\n\n# ブローカーに接続できたときの処理\ndef on_connect(client, userdata, flag, rc):\n print(\"Connected with result code \" + str(rc))\n return\n\n# ブローカーが切断したときの処理\ndef on_disconnect(client, userdata, rc):\n if rc != 0:\n print(\"Unexpected disconnection.\")\n else:\n print(\"disconnected.\")\n return\n\n# publishが完了したときの処理\ndef on_publish(client, userdata, mid):\n print(\"publish: {0}\".format(mid))\n return\n\n# メイン関数 この関数は末尾のif文から呼び出される\ndef main():\n myBME280 = BME280.BME280()\n myBME280.setup()\n myBME280.get_calib_param()\n\n client = mqtt.Client(clean_session=True) # クラスのインスタンス(実体)の作成\n client.on_connect = on_connect # 接続時のコールバック関数を登録\n client.on_disconnect = on_disconnect # 切断時のコールバックを登録\n client.on_publish = on_publish # メッセージ送信時のコールバック\n\n client.connect(MQTT_HOST, MQTT_PORT, KEEP_ALIVE)\n\n # 通信処理スタート\n client.loop_start()\n\n # センサーからのデータの読み出し\n _d = myBME280.readData()\n _d[\"sensor\"] = \"BME280\"\n # json.dumpsで正しいjsonにしてから投げるのがよい。\n _jd = json.dumps(_d)\n print(_jd)\n client.publish(TOPIC, _jd, QOS) # トピック名とメッセージを決めて送信\n client.disconnect()\n return\n\nif __name__ == '__main__': # importされないときだけmain()を呼ぶ\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--hostname\", type=str, default=\"localhost\", help=\"hostname or ip\")\n parser.add_argument(\"--port\", type=int, default=1883, help=\"Port number override\")\n parser.add_argument(\"--keepalive\", type=int, default=60, help=\"\")\n parser.add_argument(\"--topic\", type=str, default=\"l2l/test\", help=\"Targeted topic\")\n parser.add_argument(\"--qos\", type=int, default=1, help=\"0,1,2\")\n\n args = parser.parse_args()\n MQTT_HOST = args.hostname\n MQTT_PORT = args.port\n KEEP_ALIVE = args.keepalive\n TOPIC = args.topic\n QOS = args.qos\n\n main() # メイン関数を呼び出す\n","sub_path":"bme280_local_pub_once.py","file_name":"bme280_local_pub_once.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"645984157","text":"import unittest\nimport os\nfrom knapsack import knapsack\n\ndef read_items(symbols_path,sep=\"\\t\"):\n items = []\n line_number = 0 \n sack_size = None\n with open(symbols_path) as f: \n for line in f: \n if line_number == 0:\n sack_size = int(line.split(sep=sep)[0])\n line_number+=1\n continue\n line_array = line.strip(\"\\n\").split(sep=sep)\n item_value = int(line_array[0])\n item_weight = int(line_array[1])\n items.append((item_value,item_weight))\n line_number+=1\n \n return sack_size,items\n\ndef get_assignment_answer(result_encoding):\n min_len = float(\"inf\")\n max_len = float(\"-inf\")\n for symbol in result_encoding:\n\n symbol_len = len(result_encoding[symbol])\n min_len = len(result_encoding[symbol]) if symbol_len < min_len else min_len \n max_len = len(result_encoding[symbol]) if symbol_len > max_len else max_len \n\n return [max_len,min_len]\n\ndef get_test_inputs(path):\n inputs = []\n for file in os.listdir(path):\n if \"input\" in file:\n inputs.append(os.path.join(path, file))\n inputs.sort()\n return inputs\n\ndef read_output(test_case_path):\n with open(test_case_path.replace(\"input\",\"output\")) as f: \n for answer in f: \n return int(answer) \n\n\nclass TestKnapsack(unittest.TestCase):\n def test_coursera_cases(self):\n test_cases_path = 'greedy_algorithms_mst_dynamic_programming/week4/test_cases'\n test__files = get_test_inputs(test_cases_path)\n for test_input in test__files:\n print(\"Testing \"+ test_input)\n test_case = read_items(test_input,sep=\" \")\n expected = read_output(test_input)\n\n final_answer = knapsack(test_case[0],test_case[1])\n\n self.assertEqual(expected,final_answer)\n\n print(\"Test OK\")\n \n def test_assigment1(self):\n print(\"Testing Assigment 1\")\n test_input = 'greedy_algorithms_mst_dynamic_programming/week4/knapsack/assigment1.txt'\n test_case = read_items(test_input,\" \")\n \n final_answer = knapsack(test_case[0],test_case[1])\n\n \n\n print(\"Final Answer: {}\".format(final_answer))\n\n def test_assigment2(self):\n print(\"Testing Assigment 2\")\n test_input = 'greedy_algorithms_mst_dynamic_programming/week4/knapsack/assigment2.txt'\n test_case = read_items(test_input,\" \")\n \n final_answer = knapsack(test_case[0],test_case[1])\n\n \n\n print(\"Final Answer: {}\".format(final_answer))\n\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"greedy_algorithms_mst_dynamic_programming/week4/knapsack/test_knapsack.py","file_name":"test_knapsack.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"525372312","text":"from wsresource import WsResource\nfrom ..services.jobservice import JobServiceFactory\nfrom ..models.job import Job\n\nclass JobResource(WsResource):\n def __init__(self, root):\n WsResource.__init__(self, root)\n self._service = JobServiceFactory.build()\n\n '''\n schedule a new job\n -- curl \"http://127.0.0.1:6800/jobs?name=abc&version=1&spider=spider2\" -X POST\n '''\n def render_POST(self, txrequest):\n name = txrequest.args[b'name'][0].decode('utf-8')\n version = txrequest.args[b'version'][0].decode('utf-8')\n spider = txrequest.args[b'spider'][0].decode('utf-8')\n\n job = Job(name, version, spider)\n\n job_id = self._service.post(job)\n\n return {'job_id': job_id}\n\n\n '''\n cancel an existing job\n '''\n def render_DELETE(self, txrequest):\n txrequest.setResponseCode(501)\n return None\n\n '''\n list all jobs\n -- curl \"http://127.0.0.1:6800/jobs?name=abc&version=1&spider=spider1\" -X GETALL\n '''\n def render_GETALL(self, txrequest):\n status = None\n if b'status' in txrequest.args:\n status = txrequest.args[b'status'][0].decode('utf-8')\n\n data = self._service.getall(status)\n return map(lambda p : self.__to_json(p), data)\n\n\n def __to_json(self, job):\n return {\n 'name': job.project_name,\n 'version': job.project_version,\n 'spider': job.spider,\n 'job_id': job.job_id,\n 'status': job.status,\n 'priority': job.priority,\n 'updated_at': int(job.updated_at.strftime(\"%s\")) * 1000,\n 'created_at': int(job.created_at.strftime(\"%s\")) * 1000\n }","sub_path":"scrapyd/resources/jobresource.py","file_name":"jobresource.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"590160140","text":"# Tutorial example. Doesn't depend on any third party GUI framework.\n# Tested with CEF Python v57.0+\n\nfrom cefpython3 import cefpython as cef\nimport base64\nimport platform\nimport sys\nimport os\nimport threading\nimport traceback\n\n\ndef main():\n check_versions()\n sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error\n\n settings = {\n # \"remote_debugging_port\": 49152,\n }\n\n cef.SetGlobalClientCallback(\"OnAfterCreated\", on_after_create)\n\n cef.Initialize(settings=settings)\n\n browser = cef.CreateBrowserSync(url='file:///rsrc/index.html',\n window_title=\"2. Handlers\")\n clientHandler = ClientHandler()\n browser.SetClientHandler(clientHandler)\n\n bindings = cef.JavascriptBindings()\n testObject = TestObject()\n bindings.SetObject(\"testObject\", testObject)\n browser.SetJavascriptBindings(bindings)\n\n cef.MessageLoop()\n cef.Shutdown()\n\n\ndef on_after_create(browser, **_):\n print(\"Created\")\n pass\n\n\nclass ClientHandler(object):\n def OnLoadingStateChange(self, browser, is_loading, **_):\n print(\"Loading : {0}\".format(is_loading))\n if not is_loading:\n browser.ExecuteFunction(\"test_function\")\n browser.ExecuteFunction(\"call_test_object\")\n\n def OnConsoleMessage(self, browser, message, **_):\n print(\"JS Console Message : {0}\".format(message))\n\n\ndef check_versions():\n ver = cef.GetVersion()\n print(\"[tutorial.py] CEF Python {ver}\".format(ver=ver[\"version\"]))\n print(\"[tutorial.py] Chromium {ver}\".format(ver=ver[\"chrome_version\"]))\n print(\"[tutorial.py] CEF {ver}\".format(ver=ver[\"cef_version\"]))\n print(\"[tutorial.py] Python {ver} {arch}\".format(\n ver=platform.python_version(),\n arch=platform.architecture()[0]))\n assert cef.__version__ >= \"57.0\", \"CEF Python v57.0+ required to run this\"\n\n\ndef jsWrap(function):\n '''Wrapper to handle returns and exceptions'''\n\n def wrappedFunction(callObject, successCallback, exceptionCallback, *args):\n try:\n result = function(args)\n successCallback.Call(result)\n except Exception as exception:\n # Get the traceback info\n exc_tb = sys.exc_info()[2]\n tracebackList = traceback.extract_tb(exc_tb, 5)\n\n formattedTracebackList = []\n for tracebackEntry in tracebackList:\n formattedTracebackList.append({\n \"name\": tracebackEntry.name,\n \"filename\": tracebackEntry.filename,\n \"lineNumber\": tracebackEntry.lineno,\n \"line\": tracebackEntry.line\n })\n\n formattedException = {\n \"type\": type(exception),\n \"args\": exception.args,\n \"message\": str(exception),\n \"traceback\": formattedTracebackList\n }\n\n exceptionCallback.Call(formattedException)\n\n return wrappedFunction\n\n\nclass TestObject(object):\n def test_method(self):\n print(\"Test method called on Python TestObject\")\n return [1, 2, 3, 4]\n\n def test_method_throws_exception(self):\n raise (Exception(\"Here's an exception\"))\n\n testMethod = jsWrap(test_method)\n testMethodThrowsException = jsWrap(test_method_throws_exception)\n\n\nif __name__ == '__main__':\n main()","sub_path":"scratchpads/cef/bridged.py","file_name":"bridged.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"411114723","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpRequest\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport cx_Oracle\nimport os\nfrom django.views import generic\nfrom django.db import connections\n\n\ndef remainders(request, msisdn):\n cursor = connections['ppcdb'].cursor()\n cur = cursor.connection.cursor()\n refCursor = cursor.connection.cursor()\n\n i_subs_id=None\n i_client_app_type=\"MyTcell_Lite_Web\"\n i_client_app_version=\"v1\"\n i_int_request_id=1\n o_exit_location_id = cur.var(cx_Oracle.STRING)\n o_responce_id = cur.var(cx_Oracle.NUMBER)\n o_result = cur.var(cx_Oracle.NUMBER)\n o_err_msg = cur.var(cx_Oracle.STRING)\n\n cur.callproc('mytcell_lite_pack.get_subs_pack_remainders', (i_subs_id, msisdn, i_client_app_type, i_client_app_version, \n i_int_request_id, refCursor, o_exit_location_id, o_responce_id, o_result, o_err_msg) )\n\n remainders={}\n minutes_simple=[]\n minutes_unlim=[]\n sms_simple=[]\n sms_unlim=[]\n gprs_simple=[]\n gprs_unlim=[]\n columns = [i[0] for i in refCursor.description]\n\n for row in refCursor:\n if row[2]==1:\n minutes_simple.append(dict(zip(columns, row)))\n elif row[2]==2:\n sms_simple.append(dict(zip(columns, row)))\n elif row[2]==3:\n gprs_simple.append(dict(zip(columns, row)))\n elif row[2]==4:\n minutes_unlim.append(dict(zip(columns, row)))\n elif row[2]==5:\n sms_unlim.append(dict(zip(columns, row)))\n elif row[2]==6:\n gprs_unlim.append(dict(zip(columns, row)))\n\n remainders={\n 'minutes': {'minutes_simple': minutes_simple, \n 'minutes_unlim':minutes_unlim\n },\n 'sms': {'sms_simple': sms_simple, \n 'sms_unlim':sms_unlim\n },\n 'gprs': {'gprs_simple': gprs_simple, \n 'gprs_unlim': gprs_unlim\n }\n }\n\n remainders['results']=[{\n 'o_exit_location_id' : o_exit_location_id.getvalue(),\n 'o_responce_id' : int(o_responce_id.getvalue()),\n 'err_code' : int(o_result.getvalue()),\n 'err_msg' : o_err_msg.getvalue()\n }]\n\n cur.close()\n cursor.close()\n\n return render(request, \"mytcell_lite_app/ostatki.html\", context=remainders)\n\n","sub_path":"mytcell_lite_app/remainders_view.py","file_name":"remainders_view.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"634122936","text":"import logging, os, sys, time, requests, json\nfrom datetime import datetime\nfrom multiprocessing import Process, Queue\nfrom urllib.parse import urlparse\nimport pandas as pd\nimport sqlalchemy as s\nfrom sqlalchemy import MetaData\nfrom sqlalchemy.ext.automap import automap_base\nfrom workers.worker_base import Worker\n\nclass RepoInfoWorker(Worker):\n def __init__(self, config):\n \n # Define what this worker can be given and know how to interpret\n given = [['github_url']]\n models = ['repo_info']\n\n # Define the tables needed to insert, update, or delete on\n data_tables = ['repo_info']\n operations_tables = ['worker_history', 'worker_job']\n\n # Run the general worker initialization\n super().__init__(config, given, models, data_tables, operations_tables)\n\n # Define data collection info\n self.tool_source = 'Repo Info Worker'\n self.tool_version = '0.0.1'\n self.data_source = 'GitHub API'\n\n def repo_info_model(self, task, repo_id):\n\n github_url = task['given']['github_url']\n\n logging.info(\"Beginning filling the repo_info model for repo: \" + github_url + \"\\n\")\n\n owner, repo = self.get_owner_repo(github_url)\n\n url = 'https://api.github.com/graphql'\n\n query = \"\"\"\n {\n repository(owner:\"%s\", name:\"%s\"){\n updatedAt\n hasIssuesEnabled\n issues(states:OPEN) {\n totalCount\n }\n hasWikiEnabled\n forkCount\n defaultBranchRef {\n name\n }\n watchers {\n totalCount\n }\n id\n licenseInfo {\n name\n url\n }\n stargazers {\n totalCount\n }\n codeOfConduct {\n name\n url\n }\n issue_count: issues {\n totalCount\n }\n issues_closed: issues(states:CLOSED) {\n totalCount\n }\n pr_count: pullRequests {\n totalCount\n }\n pr_open: pullRequests(states: OPEN) {\n totalCount\n }\n pr_closed: pullRequests(states: CLOSED) {\n totalCount\n }\n pr_merged: pullRequests(states: MERGED) {\n totalCount\n }\n ref(qualifiedName: \"master\") {\n target {\n ... on Commit {\n history(first: 0){\n totalCount\n }\n }\n }\n }\n }\n }\n \"\"\" % (owner, repo)\n\n # Hit the graphql endpoint and retry 3 times in case of failure\n num_attempts = 0\n success = False\n while num_attempts < 3:\n logging.info(\"Hitting endpoint: {} ...\\n\".format(url))\n r = requests.post(url, json={'query': query}, headers=self.headers)\n self.update_gh_rate_limit(r)\n\n try:\n data = r.json()\n except:\n data = json.loads(json.dumps(r.text))\n\n if 'errors' in data:\n logging.info(\"Error!: {}\".format(data['errors']))\n if data['errors']['message'] == 'API rate limit exceeded':\n self.update_gh_rate_limit(r)\n continue\n\n if 'data' in data:\n success = True\n data = data['data']['repository']\n break\n else:\n logging.info(\"Request returned a non-data dict: {}\\n\".format(data))\n if data['message'] == 'Not Found':\n logging.info(\"Github repo was not found or does not exist for endpoint: {}\\n\".format(url))\n break\n if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':\n self.update_gh_rate_limit(r, temporarily_disable=True)\n continue\n if data['message'] == 'Bad credentials':\n self.update_gh_rate_limit(r, bad_credentials=True)\n continue\n num_attempts += 1\n if not success:\n self.register_task_failure(task, repo_id, \"Failed to hit endpoint: {}\".format(url))\n return\n\n # Get committers count info that requires seperate endpoint\n committers_count = self.query_committers_count(owner, repo)\n\n # Put all data together in format of the table\n logging.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\\n')\n rep_inf = {\n 'repo_id': repo_id,\n 'last_updated': data['updatedAt'] if 'updatedAt' in data else None,\n 'issues_enabled': data['hasIssuesEnabled'] if 'hasIssuesEnabled' in data else None,\n 'open_issues': data['issues']['totalCount'] if data['issues'] else None,\n 'pull_requests_enabled': None,\n 'wiki_enabled': data['hasWikiEnabled'] if 'hasWikiEnabled' in data else None,\n 'pages_enabled': None,\n 'fork_count': data['forkCount'] if 'forkCount' in data else None,\n 'default_branch': data['defaultBranchRef']['name'] if data['defaultBranchRef'] else None,\n 'watchers_count': data['watchers']['totalCount'] if data['watchers'] else None,\n 'UUID': None,\n 'license': data['licenseInfo']['name'] if data['licenseInfo'] else None,\n 'stars_count': data['stargazers']['totalCount'] if data['stargazers'] else None,\n 'committers_count': committers_count,\n 'issue_contributors_count': None,\n 'changelog_file': None,\n 'contributing_file': None,\n 'license_file': data['licenseInfo']['url'] if data['licenseInfo'] else None,\n 'code_of_conduct_file': data['codeOfConduct']['url'] if data['codeOfConduct'] else None,\n 'security_issue_file': None,\n 'security_audit_file': None,\n 'status': None,\n 'keywords': None,\n 'commit_count': data['ref']['target']['history']['totalCount'] if data['ref'] else None,\n 'issues_count': data['issue_count']['totalCount'] if data['issue_count'] else None,\n 'issues_closed': data['issues_closed']['totalCount'] if data['issues_closed'] else None,\n 'pull_request_count': data['pr_count']['totalCount'] if data['pr_count'] else None,\n 'pull_requests_open': data['pr_open']['totalCount'] if data['pr_open'] else None,\n 'pull_requests_closed': data['pr_closed']['totalCount'] if data['pr_closed'] else None,\n 'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None,\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': self.data_source\n }\n\n result = self.db.execute(self.repo_info_table.insert().values(rep_inf))\n logging.info(f\"Primary Key inserted into repo_info table: {result.inserted_primary_key}\\n\")\n self.results_counter += 1\n\n logging.info(f\"Inserted info for {owner}/{repo}\\n\")\n\n #Register this task as completed\n self.register_task_completion(task, repo_id, \"repo_info\")\n\n def query_committers_count(self, owner, repo):\n logging.info('Querying committers count\\n')\n url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100'\n committers = 0\n\n try:\n while True:\n r = requests.get(url, headers=self.headers)\n self.update_gh_rate_limit(r)\n committers += len(r.json())\n\n if 'next' not in r.links:\n break\n else:\n url = r.links['next']['url']\n except Exception:\n logging.exception('An error occured while querying contributor count\\n')\n\n return committers\n\n","sub_path":"workers/repo_info_worker/repo_info_worker/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"261157539","text":"# Template Tag\r\nfrom datetime import date, datetime, timedelta\r\nfrom stswim.schedule.models import *\r\nimport calendar\r\n\r\nfrom django import template\r\n\r\nregister = template.Library()\r\n#\r\n# TODO\r\n# - Write logic to prevent next/previous month buttons from breaking. (going out of range 1-12, going out of range of season)\r\n# - Clean up context variables to template\r\n# - Cleaner way to find deltas?\r\n#\t\r\ndef season_month_cal(context):\r\n\tseason = context['season']\r\n\tdisplay_month = context['display_month']\r\n\tuser = context['user']\r\n\tperms = context['perms']\r\n\tseason = Season.objects.get(id = season.id)\r\n\tyear = season.start_date.year\r\n\ttoday = date.today()\r\n\tif not display_month:\r\n\t\tmonth = date.today().month\r\n\telse:\r\n\t\tmonth = int(display_month)\r\n\tfirst_weekday_of_month, month_days = calendar.monthrange(year, month)\r\n\tfirst_day_of_season = season.start_date\r\n\tfirst_day_of_month = date(year, month, 1)\r\n\tlast_day_of_month = date(year, month, month_days)\r\n\t\r\n\tif first_day_of_month.weekday()+1 == 7:\r\n\t\tfirst_day_of_calendar_delta = 0\r\n\telse:\r\n\t\tfirst_day_of_calendar_delta = first_day_of_month.weekday()+1\r\n\t\t\r\n\tfirst_day_of_calendar = first_day_of_month - timedelta(first_day_of_calendar_delta)\r\n\t\r\n\tif 7 - last_day_of_month.weekday() == 1:\r\n\t\tlast_day_of_calendar_delta = 7\r\n\telse:\r\n\t\tlast_day_of_calendar_delta = 7 - last_day_of_month.weekday()\r\n\t\t\r\n\tlast_day_of_calendar = last_day_of_month + timedelta(last_day_of_calendar_delta)\r\n\tlesson_slot_list = LessonSlot.objects.all()\r\n\t\r\n\tmonth_cal = []\r\n\tweek = []\r\n\tweek_headers = []\r\n\t\r\n\ti = 0\r\n\tday = first_day_of_calendar\r\n\twhile day <= last_day_of_calendar:\r\n\t\tif i < 7:\r\n\t\t\tweek_headers.append(day)\r\n\t\tcal_day = {}\r\n\t\tcal_day['day'] = day\r\n\t\tcal_day['lessons'] = False\r\n\t\tcal_day_lessons = []\r\n\t\tfor lesson in lesson_slot_list:\r\n\t\t\tif day == lesson.start_datetime.date():\r\n\t\t\t\tcal_day_lessons.append(lesson)\r\n\t\tcal_day['lessons'] = cal_day_lessons\r\n\t\tweek.append(cal_day)\r\n\t\tif day.weekday() == 5:\r\n\t\t\tmonth_cal.append(week)\r\n\t\t\tweek = []\r\n\t\ti += 1\r\n\t\tday += timedelta(1)\r\n\t\t\r\n\treturn{'calendar': month_cal, \r\n\t\t\t'user': user,\r\n\t\t\t'perms': perms,\r\n\t\t\t'headers': week_headers, \r\n\t\t\t'season': season, \r\n\t\t\t'current_month': first_day_of_month.strftime(\"%B\"),\r\n\t\t\t'today' : today ,\r\n\t\t\t'next_month': first_day_of_month.month+1, \r\n\t\t\t'prev_month': first_day_of_month.month-1}\r\n\t\r\nregister.inclusion_tag('schedule/month_cal.html', takes_context=True)(season_month_cal)\r\n\r\ndef month_cal(year=date.today().year, month=date.today().month, day=date.today().day):\r\n\tfirst_day_of_month = date(year, month, 1)\r\n\tlast_day_of_month = get_last_day_of_month(year, month)\r\n\tfirst_day_of_week = date(year, month, day)\r\n\tfirst_day_of_calendar = first_day_of_month - timedelta(first_day_of_month.weekday()+1)\r\n\tlast_day_of_calendar = first_day_of_calendar + timedelta(7 - first_day_of_calendar.weekday())\r\n\t\r\n\tmonth_cal = []\r\n\tweek = []\r\n\tweek_headers = []\r\n\t\r\n\ti = 0\r\n\tday = first_day_of_calendar\r\n\twhile i <= 7:\r\n\t\tif i < 7:\r\n\t\t\tweek_headers.append(day)\r\n\t\tcal_day = {}\r\n\t\tcal_day['day'] = day\r\n\t\tif day.month == month:\r\n\t\t\tcal_day['in_month'] = True\r\n\t\telse:\r\n\t\t\tcal_day['in_month'] = False\r\n\t\tweek.append(cal_day)\r\n\t\tif day.weekday() == 5:\r\n\t\t\tmonth_cal.append(week)\r\n\t\t\tweek = []\r\n\t\ti += 1\r\n\t\tday += timedelta(1)\r\n\t\t\r\n\treturn{'calendar': month_cal, 'headers': week_headers}\r\n\t\r\nregister.inclusion_tag('schedule/month_cal.htm')(month_cal)","sub_path":"schedule/templatetags/schedule_tags.py","file_name":"schedule_tags.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"277984970","text":"number = input('What is your number?: ')\n\nif number % 2:\n print('Odd')\nelse:\n print('Even')\n\n# '50' % 1 -> TypeError: not all arguments converted during string formatting\n# '50 %s' % 1 -> 50 1\n# 50 % 1 -> 0\n","sub_path":"control-flow/solution/operators_is_odd.py","file_name":"operators_is_odd.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"275351786","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 8 13:21:02 2019\n\n@author: Nicoleta Cristea cristn@uw.edu \n\"\"\"\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom xrviz.dashboard import Dashboard\nimport pandas as pd\nimport seaborn as sns\n#import csv\n\n# \npath = '/Users/carina/Desktop/data/Water_table_skagit/Map.Soil.TableDepth.asc.historic'\n\nvalues = np.loadtxt(path)\n# \npath_grids = '/Users/carina/Desktop/code/SkagitLandslideHazards/y_x_grids.mat'\n\n# \ngrids = loadmat(path_grids)\n#wd = pd.DataFrame(data=values[1:,1:], # values\n# index=data[1:,0], # 1st column as index\n# columns=data[0,1:]) # 1st row as the column names \n# \npath_dates = '/Users/carina/Desktop/data/Water_table_skagit/dates_max_sat_sauk_old_files /export_historic_dates'\n#dates = np.loadtxt(path_dates + \"export_historic_dates\", delimiter='\\t')\n#in_txt = csv.reader(open((path_dates + \"export_historic_dates\"), \"rb\"), delimiter = '\\t')\n#list(csv.reader(open(path_dates+'export_historic_dates.txt', 'rb'), delimiter='\\t'))\n\ndates = pd.read_csv(path_dates, sep=\" \", header=None)\n#df = pd.read_fwf('path_dates + \"export_historic_dates\")\ndates.columns = [\"Map\", \"txt\", \"number\", \"date\"] #this works with the new format\n#dates.columns = [\"Index\",\"Map\", \"txt\", \"number\", \"date\"] #this works with the new format\ndates['date']\n\n# \n# concatenate data into xarray \nno_time = values.shape[0]/1020\none_image = values[:1020]\nsecond_image = values[1020:2040]\n\nfig = plt.figure(figsize=(6, 3.2))\n\n#plot one image to check \n\nax = fig.add_subplot(111)\nax.set_title('colorMap')\nplt.imshow(one_image)\nax.set_aspect('equal')\n\ncax = fig.add_axes([0.12, 0.1, 0.78, 0.8])\ncax.get_xaxis().set_visible(False)\ncax.get_yaxis().set_visible(False)\ncax.patch.set_alpha(0)\ncax.set_frame_on(False)\nplt.colorbar(orientation='vertical')\nplt.show()\n# \nlist_arrays = np.vsplit(values, no_time)\n#all_arrays = np.vstack(list_arrays)\nall_arrays = np.asarray(list_arrays)\n\n# \n#get sizes and reshape arrays to match dimensions and sizes \n#plot to check results\n\nnum = dates['date'].size\nyears = np.linspace(1, no_time, num)\n\nx_ = np.linspace(1, 916, num = 916)\ny_ = np.linspace(1, 1020, num = 1020)\n\n#test interpolation\n\nx = grids[\"X_\"][:1]\ny = grids[\"Y_\"][:, 1:2]\n\n# check grids \nx = x.T\n#y = y.T\n\nfig = plt.figure(figsize=(6, 3.2))\nplt.plot(x_, x)\n\nfig = plt.figure(figsize=(6, 3.2))\nplt.plot(y_, y)\n\nx1_ = np.linspace(1, 916, num = 2725)\ny1_ = np.linspace(1, 916, num = 2720)\n\nx1 = grids[\"X_1\"][:1]\ny1 =grids[\"Y_1\"][:, 1:2]\n\n# check grids \nx1 = x1.T\nt = y_.shape\ny = y.reshape(t) \n\nt = x_.shape\nx = x.reshape(t) \n\nfig = plt.figure(figsize=(6, 3.2))\nplt.plot(x1_, x1)\n\nfig = plt.figure(figsize=(6, 3.2))\nplt.plot(y1_, y1)\n\n\n\n# \n\nds_wt = xr.Dataset(data_vars = {'wt': (('time', 'y', 'x'), all_arrays)})\n\n#%% \n#ds_wt['time'] = years \nds_wt['time'] = dates[\"date\"].tolist()\nds_wt['y'] = y\nds_wt['x'] = x\n#ds_wt = ds_wt.set_coords(['time','y', 'x'])\nds_wt.isel(time=[0]).to_array().plot() #shows upsidedown than imshow\n\n# \n\n\ny1 = y1.reshape(y1.shape[0])\nx1 = x1.reshape(x1.shape[0])\n\ndsi = ds_wt.interp(y = y1, x = x1)\n\n\n# \n\n#first image to numpy \n#one_image_res = dsi.isel(time=[0]).to_array().values\n\n#plot first image \n\ndsi.isel(time=[0]).to_array().plot() #shows upsidedown than imshow\n\n# \n# or, select by date/name\ndsi.sel(time= '05/31/1969-00').to_array().plot()\n \n# \n#dsi.to_netcdf('dtw_historic_with_dates.nc')\ndsi.to_netcdf('dtw_historic_with_dates_netcdf3.nc', format = 'NETCDF3_64BIT')\n# \n\ndashboard = Dashboard(ds_wt)\ndashboard.show()\n# \n#%% select points to plot tiime variation and probability distribution\n\none_location = ds_wt.isel(x=[600], y = [500]).to_array()\n\n#plot one_location \n\n#ds_wt.isel(x=[500], y = [500]).to_array().plot() #shows upsidedown than imshow\n\n\n# how to define an xrray dataset \n\nsns.distplot(one_location);\n\nsns.distplot(one_location, bins=20, kde=False, rug=True); \n\n\n# ds = xr.Dataset({'temperature': (['x', 'y', 'time'], temp),\n# ....: 'precipitation': (['x', 'y', 'time'], precip)},\n# ....: coords={'lon': (['x', 'y'], lon),\n# ....: 'lat': (['x', 'y'], lat),\n# ....: 'time': pd.date_range('2014-09-06', periods=3),\n# ....: 'reference_time': pd.Timestamp('2014-09-05')})\n\n ","sub_path":"saturation_code/process_wt_grids_with_vis.py","file_name":"process_wt_grids_with_vis.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"191633962","text":"# This file is a part of Dramatiq.\n#\n# Copyright (C) 2017,2018 CLEARTYPE SRL \n#\n# Dramatiq is free software; you can redistribute it and/or modify it\n# under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or (at\n# your option) any later version.\n#\n# Dramatiq is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public\n# License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see .\n\nimport ctypes\nimport platform\nimport signal\nimport threading\nimport time\nimport warnings\n\nfrom ..logging import get_logger\nfrom .middleware import Middleware\n\n_current_platform = platform.python_implementation()\n_supported_platforms = {\"CPython\"}\n\n\nclass TimeLimitExceeded(BaseException):\n \"\"\"Raised asynchronously inside worker threads when actors exceed\n their time limits.\n\n This is intentionally *not* a subclass of DramatiqError to avoid\n it being caught unintentionally.\n \"\"\"\n\n\nclass TimeLimit(Middleware):\n \"\"\"Middleware that cancels actors that run for too long.\n Currently, this is only available on CPython.\n\n Note:\n This works by setting an async exception in the worker thread\n that runs the actor. This means that the exception will only get\n called the next time that thread acquires the GIL. Concretely,\n this means that this middleware can't cancel system calls.\n\n Parameters:\n time_limit(int): The maximum number of milliseconds actors may\n run for.\n interval(int): The interval (in milliseconds) with which to\n check for actors that have exceeded the limit.\n \"\"\"\n\n def __init__(self, *, time_limit=600000, interval=1000):\n self.logger = get_logger(__name__, type(self))\n self.time_limit = time_limit\n self.interval = interval\n self.deadlines = {}\n\n def _handle(self, signum, mask):\n current_time = time.monotonic()\n for thread_id, deadline in self.deadlines.items():\n if deadline and current_time >= deadline:\n self.logger.warning(\"Time limit exceeded. Raising exception in worker thread %r.\", thread_id)\n self.deadlines[thread_id] = None\n if _current_platform == \"CPython\":\n self._kill_thread_cpython(thread_id)\n else: # pragma: no cover\n self.logger.critical(\"Cannot kill threads on platform %r.\", _current_platform)\n\n def _kill_thread_cpython(self, thread_id):\n thread_id = ctypes.c_long(thread_id)\n exception = ctypes.py_object(TimeLimitExceeded)\n count = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, exception)\n if count == 0: # pragma: no cover\n self.logger.critical(\"Failed to set exception in worker thread.\")\n elif count > 1: # pragma: no cover\n self.logger.critical(\"Exception was set in multiple threads. Undoing...\")\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.c_long(0))\n\n @property\n def actor_options(self):\n return {\"time_limit\"}\n\n def after_process_boot(self, broker):\n self.logger.debug(\"Setting up timers...\")\n signal.setitimer(signal.ITIMER_REAL, self.interval / 1000, self.interval / 1000)\n signal.signal(signal.SIGALRM, self._handle)\n\n if _current_platform not in _supported_platforms: # pragma: no cover\n warnings.warn(\n \"TimeLimit cannot kill threads on your current platform (%r).\" % _current_platform,\n category=RuntimeWarning, stacklevel=2,\n )\n\n def before_process_message(self, broker, message):\n actor = broker.get_actor(message.actor_name)\n limit = message.options.get(\"time_limit\") or actor.options.get(\"time_limit\", self.time_limit)\n deadline = time.monotonic() + limit / 1000\n self.deadlines[threading.get_ident()] = deadline\n\n def after_process_message(self, broker, message, *, result=None, exception=None):\n self.deadlines[threading.get_ident()] = None\n","sub_path":"venv/Lib/site-packages/dramatiq/middleware/time_limit.py","file_name":"time_limit.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"144908218","text":"import joblib\nimport numpy as np\nimport pandas as pd\nimport sklearn\nimport streamlit as st\n\n\nfrom why.pages import (\n correlation,\n global_effects,\n home,\n importance,\n local_effects,\n performance,\n)\nfrom why import data, models\nfrom why import Explainer\n\nPAGES = {\n \"Home\": home,\n \"Performance\": performance,\n \"Feature Importance\": importance,\n \"Feature Correlation\": correlation,\n \"Global Effects\": global_effects,\n \"Local Effects\": local_effects,\n}\n\nst.set_option(\"deprecation.showfileUploaderEncoding\", False)\n\n\ndef main():\n st.sidebar.title(\"Navigation\")\n page = PAGES[st.sidebar.radio(\"Go to\", list(PAGES.keys()))]\n\n st.sidebar.title(\"Settings\")\n dataset = st.sidebar.selectbox(\n \"Select a dataset\",\n [\"Car Insurance Cold Calls\", \"Cervical Cancer\", \"Upload my own data\"],\n )\n\n if dataset == \"Upload my own data\":\n train_data = st.sidebar.file_uploader(\n \"Upload a training dataset in CSV format (comma separated, with headers, UTF-8)\",\n type=\"csv\",\n encoding=\"utf-8\",\n )\n if train_data:\n train = pd.read_csv(train_data)\n test_data = st.sidebar.file_uploader(\n \"Upload a test dataset in CSV format (comma separated, with headers, UTF-8)\",\n type=\"csv\",\n encoding=\"utf-8\",\n )\n if test_data:\n test = pd.read_csv(test_data)\n target = None\n else:\n train, test, target = data.load_data(dataset=dataset)\n if \"train\" in locals().keys() and \"test\" in locals().keys():\n if not target:\n target = st.sidebar.selectbox(\n \"Select the target column\",\n [\"No target column selected\"] + sorted(test.columns),\n )\n if not target == \"No target column selected\":\n mode = st.sidebar.selectbox(\n \"Select the problem type\",\n [\n \"No problem type selected\",\n \"Binary Classification\",\n \"Multi Class Classification\",\n ],\n )\n if not mode == \"No problem type selected\":\n mode = mode.lower().replace(\" \", \"_\")\n model_type = st.sidebar.selectbox(\n \"Select the model type\",\n [\"No model type selected\", \"Random Forest\", \"Upload my own model\"],\n )\n if not model_type == \"No model type selected\":\n if model_type == \"Upload my own model\":\n model_buffer = st.sidebar.file_uploader(\n f\"Upload a trained scikit-learn model (saved via joblib.dump() from scikit-learn version {sklearn.__version__}\",\n type=None,\n )\n if model_buffer:\n model = joblib.load(model_buffer)\n else:\n model = models.get_model(model_type)\n if \"model\" in locals().keys():\n random_feature = st.sidebar.radio(\n \"Insert a random feature to investigate its effect on the explanations\",\n [\"No\", \"Yes\"],\n key=\"random_feature\",\n )\n random_feature = True if random_feature == \"Yes\" else False\n feats_to_remove = st.sidebar.multiselect(\n \"Select features to remove from the model\",\n options=sorted(train.columns),\n default=None,\n )\n features = (\n list(set(train.columns) - set(feats_to_remove))\n if feats_to_remove\n else None\n )\n seed = st.sidebar.number_input(\n label=\"Update the random seed\",\n min_value=1,\n max_value=100,\n value=42,\n step=1,\n )\n np.random.seed(seed)\n explainer = Explainer(\n train=train,\n test=test,\n target=target,\n model=model,\n features=features,\n mode=mode,\n random_feature=random_feature,\n )\n page.write(explainer)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"329711174","text":"#!/usr/bin/env python\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport matplotlib.ticker as plticker\nfrom pprint import pprint\nimport math\n\n'''\n#need to preprocess the file\nfwname = open('InferenceDeviceNames.txt', 'w');\nfwjson = open('InferenceJSON.json', 'w');\njson_arr = []\nwith open('personagraphAPIdataCMUEncrypted_02132014.csv') as fp:\n for line in fp:\n \tif \"error\" not in line and \"empty\" not in line:\n\t \tparts = line.split(',')\n\t \t#cleanline = '{},{},{}'.format(parts[0],'_'.join(parts[1:-1]),parts[-1])\n\t #fw.write(cleanline);\n\t fwname.write(parts[0]+'\\n') #write the device hash\n\t _json = ','.join(parts[1:]).strip()\n\t json = _json[1:-1].replace('\"\"','\"') + '\\n';\n\t if len(json) >0:\n\t \tjson_arr.append(json);\n\nfwjson.write('['+','.join(json_arr) + ']')\nfwjson.close();\nfwname.close();\n'''\n\n\n#load json data into the variable 'data'\njson_data = open('InferenceJSON.json')\ndata = json.load(json_data)\njson_data.close()\n\n'''\nWe want the final data structure to look like this:\n{\n\t\"age\":{\n\t\t\t\"youth\":30, \n\t\t\t\"old\":10}\n\t\"life_stage\":{\n\t\t\t\"married\":20,\n\t }\n\t...\n}\n'''\ninfData = {}\n\nfor device in data:\n\tfor attr in device:\n\t\t'''\n\t\tattr is in the form of \n\t\t{\"attribute\":\"youth\",\"score\":0.5,\"taxonomy\":\"age\"}\n\t\t'''\n\t\tif attr[\"taxonomy\"] in infData:\n\t\t\tif attr[\"attribute\"] in infData[attr[\"taxonomy\"]]:\n\t\t\t\tinfData[attr[\"taxonomy\"]][attr[\"attribute\"]] += attr[\"score\"]\n\t\t\telse:\n\t\t\t\tinfData[attr[\"taxonomy\"]][attr[\"attribute\"]] = attr[\"score\"]\n\n\t\telse:\n\t\t\t_tmp = {}\n\t\t\t_tmp[attr[\"attribute\"]] = attr[\"score\"]\n\t\t\tinfData[attr[\"taxonomy\"]] = _tmp\n\t\n#pprint(infData);\n\n#graph gaming categories\nsingleCategories = {}\nfor key in infData.keys():\n\tif len(infData[key]) == 1 and key != \"computer_games\" and key in infData[key]:\n\t\t#{u'religion': {u'religion': 104.41715201000007}, ... }\n\t\tsingleCategories[key] = infData[key][key]\n\n\nsortedKeys = sorted(singleCategories.iterkeys(), key=lambda k: singleCategories[k])\nsortedValues = [int(math.floor(singleCategories[key])) for key in sortedKeys]\n\nxlabel = sortedKeys[-1:-10:-1]\ny = sortedValues[-1:-10:-1]\nx = range(len(xlabel))\n\n\nplt.bar(x, y, align='center');\nplt.xticks(x, xlabel)\nplt.xticks(rotation=15)\nplt.xlabel(\"Categories\")\nplt.ylabel(\"Aggregated score\")\nplt.savefig('Inference_Aggregated.png')\n\n'''\n#graph - age\nattributes=['gender','age','income','life_stage','parent']\n\nfor attribute in attributes:\n\t_attr = infData[attribute]\n\txlabel = list(_attr.keys())\n\tx = range(len(xlabel))\n\t_sum = sum(list(_attr.values()))\n\ty = [_attr[key]/_sum for key in _attr.keys()]\n\tplt.bar(x,y, 0.4, align='center');\n\tplt.xticks(x, xlabel)\n\tplt.xlabel(attribute+\" range\")\n\tplt.ylabel(\"Normalized score\")\n\tplt.savefig('Inference_'+attribute+'.png')\n\tplt.clf()\n'''","sub_path":"freq_analysis/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"274761922","text":"# Original\ninventory = [\"twin bed\", \"twin bed\", \"headboard\", \"queen bed\", \"king bed\", \"dresser\", \"dresser\", \"table\", \"table\", \"nightstand\", \"nightstand\", \"king bed\", \"king bed\", \"twin bed\", \"twin bed\", \"sheets\", \"sheets\", \"pillow\", \"pillow\"]\n\n# Checkpoint 1\ninventory_len = len(inventory)\n\n# Checkpoint 2\nfirst = inventory[0]\n\n# Checkpoint 3\nlast = inventory[-1]\n\n# Checkpoint 4\ninventory_2_6 = inventory[2:6]\n\n# Checkpoint 5\nfirst_3 = inventory[0:3]\n\n# Checkpoint 6\ntwin_beds = inventory.count(\"twin bed\")\n\n# Checkpoint 7\nremoved_item = inventory.pop(4)\n\n# Checkpoint 8\ninventory.insert(10, \"19th Century Bed Frame\")\n\n# Checkpoint 9\ninventory.sort()\nprint(inventory)\n\n","sub_path":"Python3/Projects/working-with-lists-review.py","file_name":"working-with-lists-review.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"507667417","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\n\n\ndef login_page(request):\n if request.user.is_authenticated:\n return redirect('main')\n if request.method == 'POST':\n print(request.POST)\n try:\n u = User.objects.get(email=request.POST.get('user_email'))\n except User.DoesNotExist:\n u = None\n print(u)\n user = authenticate(username=u.username,\n password=request.POST.get('password'))\n if user is not None:\n login(request, user)\n return redirect('main')\n else:\n messages.add_message(request, messages.INFO,\n 'Упс! Не подошло. Попробуй ещё!')\n return render(request, 'pages/login.html', {})\n else:\n return render(request, 'pages/login.html', {})\n\n\ndef user_logout(request):\n logout(request)\n return redirect('main')\n","sub_path":"diplom/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"172043519","text":"from urlparse import urlparse, parse_qs\nfrom datetime import datetime\n\nfrom atom.logger import get_logger\nfrom atom.http.exceptions import HTTPSyntaxError\n\nlog = get_logger(__name__)\n\nstatus_codes = {\n 100: 'Continue',\n 200: 'OK',\n 302: 'Found',\n 400: 'Bad Request',\n 403: 'Forbidden',\n 404: 'Not Found',\n 405: 'Method Not Allowed',\n 411: 'Length Required',\n 500: 'Internal Server Error',\n}\n\ndays = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']\nmonths = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\n\nclass HTTPHeaders(object):\n def __init__(self, type_):\n assert type_ in ('request', 'response')\n self.type = type_\n self._headers = []\n self._chunked = None\n self._content_length = None\n \n @classmethod\n def parse(cls, type_, lines):\n self = cls(type_)\n \n first_line = lines[0].split(None, 2)\n if len(first_line) < 3:\n raise HTTPSyntaxError('Invalid first line: \"{}\"'.format(lines[0]))\n \n if type_ == 'request':\n self.method, self.uri, self.http_version = first_line\n else:\n self.http_version, self.code, self.message = first_line\n try:\n self.code = int(self.code)\n except ValueError:\n raise HTTPSyntaxError('Invalid first line: \"{}\"'.format(lines[0]))\n \n # TODO make this work with HTTP/1.0 and >HTTP/1.1 too\n if self.http_version != 'HTTP/1.1':\n raise HTTPSyntaxError('Unknown HTTP version: \"{}\"'.format(self.http_version))\n \n cur_header = None\n for line in lines[1:]:\n if line[0] in ' \\t':\n if cur_header == None:\n raise HTTPSyntaxError('Invalid header: \"{}\"'.format(line))\n cur_header += '\\r\\n' + line\n else:\n if cur_header != None:\n self._add_raw(cur_header)\n cur_header = line\n if cur_header != None:\n self._add_raw(cur_header)\n \n self.check_syntax()\n return self\n \n def _add_raw(self, header):\n parts = header.split(':',1)\n if len(parts) != 2:\n raise HTTPSyntaxError('Invalid header: \"{}\"'.format(header))\n self.add(parts[0], parts[1])\n \n @classmethod\n def response(cls, code, message = None):\n self = cls('response')\n self.http_version = 'HTTP/1.1'\n self.code = int(code)\n self.message = message or status_codes[code]\n return self\n \n @classmethod\n def request(cls, method, uri):\n self = cls('request')\n self.method = method\n self.uri = uri\n self.http_version = 'HTTP/1.1'\n return self\n \n @property\n def raw(self):\n if self.type == 'request':\n ret = ['{} {} {}'.format(self.method, self.uri, self.http_version)]\n else:\n ret = ['{} {} {}'.format(self.http_version, self.code, self.message)]\n ret.extend('{}:{}'.format(h[1], h[2]) for h in self._headers)\n return '\\r\\n'.join(ret) + '\\r\\n\\r\\n'\n \n def add(self, name, value):\n self._headers.append([name.lower().strip(), name, ' ' + value])\n self._updated()\n \n def remove(self, name):\n self._headers = [h for h in self._headers if h[0] != name.lower()]\n self._updated()\n \n def set(self, name, value):\n self.remove(name)\n self.add(name, value)\n \n def get(self, name):\n return [h[2].strip() for h in self._headers if h[0] == name.lower()]\n \n def get_single(self, name):\n vals = self.get(name)\n if len(vals) > 1:\n raise HTTPSyntaxError('Header \"{}\" present multiple times'.format(name))\n return vals[0] if len(vals) != 0 else None\n \n def check_syntax(self):\n self.get_chunked()\n self.get_content_length()\n return True\n \n def _updated(self):\n self._chunked = None\n self._content_length = None\n \n def get_chunked(self):\n if self._chunked == None:\n te_headers = [h[2] for h in self._headers if h[0] == 'transfer-encoding']\n encodings = [value.lower().strip() for header in te_headers for value in header.split(';')]\n self._chunked = False\n if len(encodings) > 0:\n self._chunked = (encodings[-1] == 'chunked')\n if any(e == 'chunked' for e in encodings[:-1]):\n raise HTTPSyntaxError('Invalid Transfer-Encoding')\n return self._chunked\n \n def get_content_length(self):\n if self._content_length == None:\n if any(h[0] == 'transfer-encoding' for h in self._headers):\n return None\n cl_headers = [h[2] for h in self._headers if h[0] == 'content-length']\n if len(cl_headers) == 1:\n try:\n self._content_length = int(cl_headers[0].strip())\n except ValueError:\n raise HTTPSyntaxError('Invalid Content-Length')\n elif len(cl_headers) > 1:\n raise HTTPSyntaxError('Too many Content-Length headers')\n return self._content_length\n \n @property\n def path(self):\n return urlparse(self.uri).path\n \n @property\n def args(self):\n return parse_qs(urlparse(self.uri).query)\n \n def set_cookie(self, name, value, expires, secure, httponly, path = '/'):\n assert self.type == 'response'\n cookie_str = '{}={}'.format(name, value)\n \n if expires == False:\n expires = datetime.utcfromtimestamp(2**31-1)\n if expires:\n t = expires.utctimetuple()\n cookie_str += '; Expires={}, {:02} {} {:04} {:02}:{:02}:{:02} GMT'.format(\n days[t.tm_wday], t.tm_mday, months[t.tm_mon-1], t.tm_year, t.tm_hour, t.tm_min, t.tm_sec)\n \n if path:\n cookie_str += '; Path=' + path\n \n if secure:\n cookie_str += '; Secure'\n \n if httponly:\n cookie_str += '; HttpOnly'\n \n self.delete_cookie(name)\n self.add('Set-Cookie', cookie_str)\n log.debug('Set-Cookie: {}', cookie_str)\n \n def get_cookie(self, name):\n assert self.type == 'request'\n cookie_values = []\n for h in self._headers:\n if h[0] == 'cookie':\n cookies = [c.split('=') for c in h[2].split(';')]\n cookie_values.extend(c[1].strip() for c in cookies if c[0].strip().lower() == name.lower())\n return cookie_values\n \n def delete_cookie(self, name):\n if self.type == 'request':\n for h in self._headers:\n if h[0] == 'cookie':\n cookies = [c.split('=') for c in h[2].split(';')]\n cookies = [c for c in cookies if c[0].strip().lower() != name.lower()]\n h[2] = ';'.join('='.join(c) for c in cookies)\n self._headers = [h for h in self._headers if not (h[0] == 'cookie' and len(h[2]) == 0)]\n else:\n self._headers = [h for h in self._headers if not (h[0] == 'set-cookie' and h[2].split('=',1)[0].strip().lower() == name.lower())]\n \n","sub_path":"atom/http/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":7250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"158440020","text":"#!./ganga-no-mon\nimport os\nimport re\nimport sys\nimport argparse\nimport tempfile\nfrom functools import partial\nimport Ganga\nfrom gutils.utils import master_id, smart_jobs_select, subjobs\nfrom gutils.merge import direct_merge, download_merge\n\nDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))\nsys.path.append(DIR)\nfrom utils import rundb_run_info\nimport eos\nimport mergebase\nfrom mergebase import logger\n\n\nparser = argparse.ArgumentParser(parents=[mergebase.parser])\nparser.add_argument('jobs', nargs='+', help='Job IDs')\nparser.add_argument('--name', '-n', default='ntuples.root', help='Name of job output file in job.outputfiles')\nparser.add_argument('--type', help='Data type: (mc)counters or (mc)vertices. Tries to guess if not given')\nparser.add_argument('--id', required=True, help='Processing ID suffix')\nargs = parser.parse_args()\n\n\ndef guess_type(options):\n m = re.match('^(reco|hlt)_(bg|lumi)_(20\\d\\d)(data|sim)', options)\n if m:\n return ('mc' if m.group(4) == 'sim' else '') + 'vertices'\n m = re.match('^(redo_)?counters_([a-z]+_)?(20\\d\\d)(data|sim)', options)\n if m:\n return ('mc' if m.group(4) == 'sim' else '') + 'counters'\n raise NotImplementedError(\"I don't know how to parse options {}\".format(options))\n\n\ndef eos_name(name, processing_id, data_type=None):\n \"\"\"Derive full name for EOS (excl. extension) from job name.\"\"\"\n parts = name.split('-')\n if parts[0] == 'raw':\n if parts[1] == 'fill':\n data_id = '{}-all'.format(parts[2])\n elif parts[1] == 'run':\n data_id = '{}-{}'.format(rundb_run_info(parts[2])['fillid'], parts[2])\n else:\n RuntimeError(\"Unrecognized data type '{}' (expected 'fill' or 'run')\".format(parts[1]))\n guessed_type = guess_type(parts[3])\n data_type = data_type or guessed_type\n if data_type != guessed_type:\n logger.warning('Requested data type ({}) does not coincide with guessed type ({}).'.format(data_type, guessed_type))\n elif parts[0] == 'mc':\n guessed_type = guess_type(parts[2])\n data_type = data_type or guessed_type\n if data_type != guessed_type:\n logger.warning('Requested data type ({}) does not coincide with guessed type ({}).'.format(data_type, guessed_type))\n data_id = parts[1]\n else:\n raise NotImplementedError(\"I don't know how to parse job name {}\".format(name))\n folder = data_type.replace('mc', 'simulation/') if data_type.startswith('mc') else ('data/' + data_type)\n return os.path.join(eos.LUMI_ROOT, folder, '{}-{}-{}.root'.format(data_type, data_id, processing_id))\n\n\nfor specs in args.jobs:\n eos_path = partial(eos_name, processing_id=args.id, data_type=args.type)\n ok = mergebase.merge(specs, args.name, eos_path, args)\n","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"441847486","text":"# str_name=input(\"请输入字符串:\")\n# for i in str_name:\n# print(ord(i))\n#\n# while True:\n# int_num=int(input(\"请输入编码值\"))\n# if int_num <0:\n# break\n# print(chr(int_num))\n\n\"\"\"时间倒计时\"\"\"\n# for i in range(2,0,-1):\n# for item in range(59,-1,-1):\n# if i==2 and item!=0:\n# continue\n# print(\"%02d:%02d\"%(i,item))\n\n\n# str_num=input(\"请输入字符串\")\n# print(\"第一个字符为:%s\"%str_num[0])\n# print(\"最后一个字符为:%s\"%str_num[-1])\n# if not len(str_num)%2==0:\n# print(\"中间数为%s\"%str_num[len(str_num)//2])\n# print(\"倒数3个字符为:%s\"%str_num[-3:])\n# print(\"字符串倒叙为%s\"%str_num[::-1])\n\n\"\"\"list练习\"\"\"\n# list=[1,2,3]\n# list2=[4,5,6]\n# list.extend(list2)\n# print(list)\n'''计算最高分,最低分'''\n# score=[]\n# int_student_num=int(input(\"请输入学生总数:\"))\n# for item in range(int_student_num):\n# int_score=int(input(\"请输入第%d个学生成绩:\"%(item+1)))\n# score.append(int_score)\n#\n#\n# print(\"最高分为%d\"%max(score))\n# print(\"总分为%d\"%sum(score))\n# print(\"最低分为%d\"%min(score))\n\n# name_list=[]\n# while True:\n# str_name=input(\"请输入学生姓名:\")\n# if str_name==\"esc\":\n# break\n# if str_name in name_list:\n# print(\"姓名不能重复\")\n# else:\n# name_list.append(str_name)\n# print(name_list)\n\n'''最大值'''\n# list01=[1,23,4,5,6,3543,26573,7777,89,97]\n# max=list01[0]\n# for item in list01:\n# max=item if item>max else max\n# print(max)\n'''最小值'''\nlist01=[1,23,4,5,6,3543,-1,26573,7777,0,89,97]\nmin=list01[0]\nfor item in list01:\n min=item if item= finishTime:\n\t\tcontinue\n\tprint('Take a break.')\n\twhile testTime < ( currTrackTime + breakWaitSecs ):\n\t\ttestTimeFull = datetime.today()\n\t\ttestTime = updateTime( testTimeFull )\n\tcurrTrackTime = ( currTrackTime + breakWaitSecs )\n\tprint('Get back to work.')\nprint(\"Take a 15 minute break.\")\n","sub_path":"days/01-03-datetimes/code/pomodoro_timer_datetime.py","file_name":"pomodoro_timer_datetime.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"80154798","text":"try: # for pip >= 10\n from pip._internal.req import parse_requirements as pr\nexcept ImportError: # for pip <= 9.0.3\n from pip.req import parse_requirements as pr\nfrom setuptools import setup, find_packages\n\nins_req = [str(ir.req) for ir in pr('requirements.txt', session=False)]\n\n\ndef main():\n setup(\n name='rtsh',\n version_format='{tag}.{commitcount}+{gitsha}',\n setup_requires=['setuptools-git-version'],\n packages=find_packages(),\n long_description='Restricted tools shell for running diagnostic tools',\n install_requires=ins_req,\n include_package_data=True,\n entry_points='''\n [console_scripts]\n rtsh=rtsh.script:main\n ''',\n )\n\nif __name__ == '__main__':\n main()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"585891040","text":"import os\nimport numpy as np\nfrom osgeo import gdal\n\nclass Mask:\n\n def __init__(self ):\n\n \"\"\"\n Placeholder\n \"\"\"\n\n # increase system memory usage\n os.environ['GDAL_CACHEMAX'] = '2048'\n gdal.UseExceptions()\n\n self._qa_vars = {\n 'fill': self.fill_qa,\n 'terrain': self.terrain_qa,\n 'radiometricSaturation': self.radiometric_qa,\n 'cloud': self.cloud,\n 'cloudConf': self.cloud_confidence,\n 'cirrusConf': self.cirrus_confidence,\n 'cloudShadowConf': self.cloud_shadow_confidence,\n 'snowIceConf': self.snow_ice_confidence,\n }\n\n self._binary_vars = ('terrain', 'cloud', 'fill')\n self._data = None\n\n return\n\n\n def generate( self, qa_pathname, out_pathname=None ):\n\n \"\"\"\n writes the cloud+alpha mask as single-band uint8 tiff\n suitable for stacking as an alpha band\n threshold defaults to 2; only 2 and above are considered clouds\n \"\"\"\n\n self._data = None\n\n # open qa file\n ds = gdal.Open( qa_pathname )\n if ds is not None:\n\n # load qa array into memory\n qa_data = ds.GetRasterBand(1).ReadAsArray()\n self._data = np.zeros ( qa_data.shape, dtype=int)\n\n # retrieve cloud confidences\n funcs = [ 'cirrusConf', 'cloudConf', 'cloudShadowConf' ]\n for func in funcs:\n\n # apply logical or on mask and \n bits = self._qa_vars[ func ] ( qa_data )\n self._data = np.logical_or ( self._data, bits >= 3 )\n\n if out_pathname is not None:\n\n # get driver and create copy\n driver = gdal.GetDriverByName( 'GTiff' )\n out_ds = driver.CreateCopy( out_pathname, ds, options=[ 'TILED=YES', 'COMPRESS=DEFLATE'] )\n if out_ds is not None:\n\n # write array\n out_ds.GetRasterBand(1).WriteArray( self._data )\n out_ds = None\n\n # close file\n ds = None\n\n return self._data\n\n\n def apply( self, pathname, out_pathname=None ):\n\n \"\"\"\n Placeholder\n \"\"\"\n\n channel = None\n\n # open dataset\n ds = gdal.Open( pathname )\n if ds is not None:\n\n # read data and check equivalent dimensions\n if ds.RasterYSize == self._data.shape[ 0 ] and ds.RasterXSize == self._data.shape[ 1 ]:\n\n # set pixels to nodata\n channel = ds.GetRasterBand(1).ReadAsArray()\n channel[ self._data > 0 ] = 0\n\n if out_pathname is not None:\n\n # get driver and create copy\n driver = gdal.GetDriverByName( 'GTiff' )\n out_ds = driver.CreateCopy( out_pathname, ds, options=[ 'TILED=YES', 'COMPRESS=DEFLATE'] )\n if out_ds is not None:\n\n # write array\n out_ds.GetRasterBand(1).WriteArray( channel )\n out_ds.GetRasterBand(1).SetNoDataValue( 0 )\n\n out_ds = None\n \n # close dataset\n ds = None\n \n return channel\n\n\n\n\n def captureBits( self, arr, b1, b2):\n\n \"\"\"\n Placeholder\n \"\"\"\n\n width_int = int((b1 - b2 + 1) * \"1\", 2)\n return ((arr >> b2) & width_int).astype('uint8')\n\n\n def fill_qa(self, arr):\n \"\"\"\n 0 = No, this condition does not exist\n 1 = Yes, this condition exists\n \"\"\"\n return self.captureBits(arr, 0, 0)\n\n\n def terrain_qa(self, arr):\n \"\"\"\n 0 = No, this condition does not exist\n 1 = Yes, this condition exists\n \"\"\"\n return self.captureBits(arr, 1, 1)\n\n\n def radiometric_qa(self, arr):\n \"\"\"\n For radiometric saturation bits (2-3), read from left to right\n represent how many bands contain saturation:\n 00 - No bands contain saturation\n 01 - 1-2 bands contain saturation\n 10 - 3-4 bands contain saturation\n 11 - 5 or more bands contain saturation\n \"\"\"\n return self.captureBits(arr, 3, 2)\n\n\n def cloud(self, arr):\n \"\"\"\n 0 = No, this condition does not exist\n 1 = Yes, this condition exists\n \"\"\"\n return self.captureBits(arr, 4, 4)\n\n\n def cloud_confidence(self, arr):\n \"\"\"\n 00 = \"Not Determined\" = Algorithm did not determine the status of this condition\n 01 = \"No\" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)\n 10 = \"Maybe\" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)\n 11 = \"Yes\" = Algorithm has high confidence that this condition exists (67-100 percent confidence\n \"\"\"\n return self.captureBits(arr, 6, 5)\n\n\n def cloud_shadow_confidence(self, arr):\n \"\"\"\n 00 = \"Not Determined\" = Algorithm did not determine the status of this condition\n 01 = \"No\" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)\n 10 = \"Maybe\" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)\n 11 = \"Yes\" = Algorithm has high confidence that this condition exists (67-100 percent confidence\n \"\"\"\n return self.captureBits(arr, 8, 7)\n\n\n def snow_ice_confidence(self, arr):\n \"\"\"\n 00 = \"Not Determined\" = Algorithm did not determine the status of this condition\n 01 = \"No\" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)\n 10 = \"Maybe\" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)\n 11 = \"Yes\" = Algorithm has high confidence that this condition exists (67-100 percent confidence\n \"\"\"\n return self.captureBits(arr, 10, 9)\n\n\n def cirrus_confidence(self, arr):\n \"\"\"\n 00 = \"Not Determined\" = Algorithm did not determine the status of this condition\n 01 = \"No\" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)\n 10 = \"Maybe\" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)\n 11 = \"Yes\" = Algorithm has high confidence that this condition exists (67-100 percent confidence\n \"\"\"\n return self.captureBits(arr, 12, 11)\n\n\n def lookup( self, name, val):\n\n \"\"\"\n Placeholder\n \"\"\"\n\n if name in self._binary_vars:\n if val == 0:\n return \"no\"\n return \"yes\"\n else:\n if val == 0:\n return \"notDetermined\"\n elif val == 1:\n return \"no\"\n elif val == 2:\n return \"maybe\"\n elif val == 3:\n return \"yes\"\n","sub_path":"ingestion/landsat-8/processor/mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":6931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"649089047","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordResetForm\nfrom django.contrib.auth import login, logout, authenticate\nfrom accounts.forms import UserForm, ProfileForm\nfrom django.contrib.auth.models import User\nfrom restaurants.models import Preference, Comment\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\ndef signup_view(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=user.username, password=raw_password)\n login(request, user)\n return redirect('restaurants:list')\n else:\n form = UserCreationForm()\n return render(request, 'accounts/signup.html', {'form':form})\n\ndef login_view(request):\n if request.method=='POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request, user)\n if 'next' in request.POST:\n return redirect(request.POST.get('next'))\n else:\n return redirect('restaurants:list')\n else:\n form = AuthenticationForm()\n return render(request,'accounts/login.html', {'form':form})\n\n@login_required(login_url=\"/accounts/log-in/\")\ndef logout_view(request):\n logout(request)\n return redirect('restaurants:list')\n\n@login_required(login_url=\"/accounts/log-in/\")\ndef profile_view(request,pk=None):\n if pk:\n user = User.objects.get(pk=pk)\n else:\n user = request.user\n\n viewing_user = request.user\n liked_restaurants = Preference.objects.filter(user=user, value=1).order_by('restaurant__title')\n comments = Comment.objects.filter(author=user)\n return render(request, 'accounts/profile.html', {'user':user,\n 'liked_restaurants': liked_restaurants,\n 'comments':comments,\n 'viewing_user':viewing_user})\n\n@login_required(login_url=\"/accounts/log-in/\")\ndef edit_profile(request):\n if request.method == 'POST':\n user_form = UserForm(request.POST, instance=request.user)\n profile_form = ProfileForm(request.POST, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile = profile_form.save(commit=False)\n\n if 'picture' in request.FILES:\n profile.picture = request.FILES['picture']\n\n profile.save()\n return redirect('accounts:profile')\n else:\n user_form = UserForm(instance=request.user)\n profile_form = ProfileForm(instance=request.user.profile)\n return render(request, 'accounts/edit_profile.html', {\n 'user_form': user_form,\n 'profile_form': profile_form\n })\n\n@login_required(login_url=\"/accounts/log-in/\")\ndef delete_comment_from_profile(request, id):\n user = request.user\n comment = get_object_or_404(Comment, id=id)\n if user == comment.author:\n comment.delete()\n return redirect('accounts:profile')\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"277496497","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom math import sqrt\nimport numpy as np\nfrom sklearn.metrics.classification import accuracy_score, precision_score, recall_score, f1_score\n\n\nclass FullConnect(nn.Module):\n def __init__(self, input_size, output_size):\n super(FullConnect, self).__init__()\n self.weight = nn.Parameter(torch.Tensor(input_size, output_size))\n self.bias = nn.Parameter(torch.Tensor(output_size))\n u = 1 / sqrt(input_size)\n torch.nn.init.uniform_(self.weight, -u, u)\n self.bias.data.zero_()\n\n def forward(self, x):\n logit = torch.matmul(x, self.weight) + self.bias\n return logit\n\n\nclass LstmCell(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(LstmCell, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.init_param()\n\n def forward(self, x, h_fore, c_fore):\n input = torch.cat([h_fore, x], dim=-1)\n i, f, o, c = torch.matmul(input, self.Wi) + self.Bi, torch.matmul(input, self.Wf) + self.Bf, \\\n torch.matmul(input, self.Wo) + self.Bo, torch.matmul(input, self.Wc) + self.Bc\n i, f, o, c = torch.sigmoid(i), torch.sigmoid(f), torch.sigmoid(o), torch.tanh(c)\n o = F.threshold(o, threshold=0.4, value=0)\n c = f * c_fore + i * c\n h = o * torch.tanh(c)\n return h, c\n\n def init_param(self):\n u = 1 / sqrt(self.hidden_size)\n dim_param = self.input_size + self.hidden_size\n self.Wi = nn.Parameter(torch.Tensor(dim_param, self.hidden_size))\n self.Wf = nn.Parameter(torch.Tensor(dim_param, self.hidden_size))\n self.Wo = nn.Parameter(torch.Tensor(dim_param, self.hidden_size))\n self.Wc = nn.Parameter(torch.Tensor(dim_param, self.hidden_size))\n self.Bi = nn.Parameter(torch.Tensor(self.hidden_size))\n self.Bf = nn.Parameter(torch.Tensor(self.hidden_size))\n self.Bo = nn.Parameter(torch.Tensor(self.hidden_size))\n self.Bc = nn.Parameter(torch.Tensor(self.hidden_size))\n torch.nn.init.uniform_(self.Wi, -u, u)\n torch.nn.init.uniform_(self.Wf, -u, u)\n torch.nn.init.uniform_(self.Wo, -u, u)\n torch.nn.init.uniform_(self.Wc, -u, u)\n self.Bi.data.zero_()\n self.Bf.data.zero_()\n self.Bo.data.zero_()\n self.Bc.data.zero_()\n\n\nclass BiLstm(nn.Module):\n def __init__(self, dim_in, dim_hidden, device):\n super().__init__()\n self.dim_in = dim_in\n self.dim_hidden = dim_hidden\n self.fcell = LstmCell(dim_in, dim_hidden)\n self.bcell = LstmCell(dim_in, dim_hidden)\n self.device = device\n\n def forward(self, x, mask):\n '''\n :return bilstm hidden state\n :param x: batch, len, dim_in\n :return:\n '''\n batch, length, _ = x.shape\n hf, cf = torch.zeros([batch, self.dim_hidden]).to(self.device), \\\n torch.zeros([batch, self.dim_hidden]).to(self.device)\n hb, cb = torch.zeros([batch, self.dim_hidden]).to(self.device), \\\n torch.zeros([batch, self.dim_hidden]).to(self.device)\n hf_list = []\n hb_list = []\n for t in range(length):\n m = mask[:, t]\n _hf, _cf = self.fcell(x[:, t, :], hf, cf)\n hf = m[:, None] * _hf + (1 - m)[:, None] * hf\n cf = m[:, None] * _cf + (1 - m)[:, None] * cf\n hf_list.append(hf)\n for t in range(length - 1, -1, -1):\n m = mask[:, t]\n _hb, _cb = self.bcell(x[:, t, :], hb, cb)\n hb = m[:, None] * _hb + (1 - m)[:, None] * hb\n cb = m[:, None] * _cb + (1 - m)[:, None] * cb\n hb_list.append(hb)\n hb_list = list(reversed(hb_list))\n HF = torch.stack(hf_list, dim=1)\n HB = torch.stack(hb_list, dim=1)\n H = torch.cat([HF, HB], dim=-1)\n return H # batch, len, 2 * dim_hidden\n\n\ndef get_mask(maxlen, lens):\n device = lens.device\n batch = lens.shape[0]\n idx = torch.range(0, maxlen - 1, 1).to(device)\n idx = torch.stack([idx] * batch)\n mask = idx < lens[:, None].float()\n mask = mask.float()\n return mask\n\n\ndef get_acc(logit, labels):\n correct = torch.sum(torch.argmax(logit, dim=-1) == labels)\n acc = correct.float() / len(labels)\n return acc\n\n\ndef masked_softmax(A, mask, dim=1):\n # matrix A is the one you want to do mask softmax at dim=1\n A_max = torch.max(A, dim=dim, keepdim=True)[0]\n A_exp = torch.exp(A - A_max)\n A_exp = A_exp * mask # this step masks\n A_softmax = A_exp / (torch.sum(A_exp, dim=dim, keepdim=True) + 1e-10)\n return A_softmax\n\n\ndef get_score(a, b_max):\n a_max = np.argmax(a, axis=-1)\n acc = accuracy_score(a_max, b_max)\n p = precision_score(a_max, b_max, average='macro')\n r = recall_score(a_max, b_max, average='macro')\n f1 = f1_score(a_max, b_max, average='macro')\n return acc, p, r, f1\n\n\nif __name__ == '__main__':\n lens = torch.from_numpy(np.array([3, 4, 5, 6, 3])).long().cuda()\n mask = get_mask(10, lens)\n print(mask)\n","sub_path":"gcm/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"180699584","text":"import pygame\nfrom GameCell import *\nclass TFireCell(TGameCell): # Клетка огня. В дополнение от родителя проигрывает анимацию.\n TYPE = 'fire'\n def magic_old(self, scr): # Старая версия анимации. Если всё же хотите её увидеть, то можете назвать её \"magic\" (без кавычек) и переназвать оригинальную функцию.\n for stage in range(9):\n pygame.event.get()\n for i in range(self.damage * 90):\n R = randint(100, 255)\n scr.set_at([self.pos[0] + randint(1, self.img.get_width() - 1), self.pos[1] + randint(1, self.img.get_height() - 1)], [R + randint(0, 255 - R), R, 0])\n pygame.display.update()\n pygame.time.delay(100)\n def magic(self, scr): # Активирует собственную магию.\n for stage in range(9):\n pygame.event.get()\n N = self.damage * 3\n for i in range(N):\n R = randint(100, 255)\n start = [self.pos[0] + 30 + randint(-10, 10), self.pos[1] + 60 - 2]\n finish = [start[0] + randint(-5, 5), start[1] - randint(5, 15) * self.damage]\n pygame.draw.line(scr, [R + randint(0, 255 - R), R, 0], start, finish, 5)\n #pygame.draw.line(scr, [R + randint(0, 255 - R), R, 0], [self.pos[0] + randint(0, 60), self.pos[1] + 60 - i * 60 / N], [self.pos[0] + randint(0, 60), self.pos[1] + 60 - i * 60 / N - randint(0, 60 // N)])\n pygame.display.update()\n if self.FullAnim:\n pygame.time.delay(100)\n","sub_path":"FireCell.py","file_name":"FireCell.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"171327741","text":"# Author: Mingyu Ding\n# Time: 2/1/2020 9:33 PM\n# Copyright 2019. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport re\nimport numpy as np\nimport os\n\n\ndef parse_kitti_result(respath, mode='new'):\n\n text_file = open(respath, 'r')\n\n acc = np.zeros([3, 41], dtype=float)\n\n lind = 0\n for line in text_file:\n\n parsed = re.findall('([\\d]+\\.?[\\d]*)', line)\n\n for i, num in enumerate(parsed):\n acc[lind, i] = float(num)\n\n lind += 1\n\n text_file.close()\n\n if mode == 'old':\n easy = np.mean(acc[0, 0:41:4])\n mod = np.mean(acc[1, 0:41:4])\n hard = np.mean(acc[2, 0:41:4])\n else:\n easy = np.mean(acc[0, 1:41:1])\n mod = np.mean(acc[1, 1:41:1])\n hard = np.mean(acc[2, 1:41:1])\n\n return easy, mod, hard\n\n\nresults_path = '/mnt/lustre/dingmingyu/2020/mmdetection/work_dirs_kitti/fcos_mstrain_640_800_x101_64x4d_fpn_gn_2x/name_2019_10_20/epoch_1/data' # TODO\ntest_iter = 0\n\n\nfor lbl in ['Car', 'Cyclist', 'Pedestrian']:\n\n lbl = lbl.lower()\n\n respath_2d = os.path.join(results_path.replace('/data', ''), 'stats_{}_detection.txt'.format(lbl))\n respath_gr = os.path.join(results_path.replace('/data', ''), 'stats_{}_detection_ground.txt'.format(lbl))\n respath_3d = os.path.join(results_path.replace('/data', ''), 'stats_{}_detection_3d.txt'.format(lbl))\n\n if os.path.exists(respath_2d):\n easy, mod, hard = parse_kitti_result(respath_2d, mode='old')\n\n print_str = 'OLD_test_iter {} 2d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format(test_iter, lbl,\n easy, mod, hard)\n print(print_str)\n\n easy, mod, hard = parse_kitti_result(respath_2d)\n\n print_str = 'NEW_test_iter {} 2d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format(test_iter, lbl,\n easy, mod, hard)\n print(print_str)\n\n if os.path.exists(respath_gr):\n easy, mod, hard = parse_kitti_result(respath_gr, mode='old')\n\n print_str = 'OLD_test_iter {} gr {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format(test_iter, lbl,\n easy, mod, hard)\n\n print(print_str)\n\n easy, mod, hard = parse_kitti_result(respath_gr)\n\n print_str = 'NEW_test_iter {} gr {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format(test_iter, lbl,\n easy, mod, hard)\n\n print(print_str)\n\n if os.path.exists(respath_3d):\n easy, mod, hard = parse_kitti_result(respath_3d, mode='old')\n\n print_str = 'OLD_test_iter {} 3d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format(test_iter, lbl,\n easy, mod, hard)\n\n print(print_str)\n\n easy, mod, hard = parse_kitti_result(respath_3d)\n\n print_str = 'NEW_test_iter {} 3d {} --> easy: {:0.4f}, mod: {:0.4f}, hard: {:0.4f}'.format(test_iter, lbl,\n easy, mod, hard)\n\n print(print_str)\n","sub_path":"kitti_tools/split1/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"432408638","text":"import calendar\nimport pytz\nimport requests\nfrom flask import Flask, jsonify, request, current_app\nfrom flask.json import JSONEncoder\nfrom datetime import datetime\nfrom dateutil import parser\nfrom .transforms import transform_sqlalchemy_obj\nfrom .response import Response, InvalidArgument\nfrom apns import Payload\nimport time\nimport hashlib\n\nREPORT_DICT = {\n \"topic\": \"TOPIC\",\n \"timeline\": \"TIMELINE\",\n \"reply\": \"REPLY\"\n}\n\n\nclass CustomJSONEncoder(JSONEncoder):\n\n def default(self, obj):\n try:\n if isinstance(obj, datetime):\n if obj.utcoffset():\n obj = obj - obj.utcoffset()\n millis = int(\n calendar.timegm(obj.timetuple()) * 1000 +\n obj.microsecond / 1000\n )\n return millis\n iterable = iter(obj)\n except TypeError:\n pass\n else:\n return list(iterable)\n # For sqlalchemy\n if hasattr(obj, '__tablename__'):\n return transform_sqlalchemy_obj(obj)\n return JSONEncoder.default(self, obj)\n\n\ndef get_md5(content):\n md = hashlib.md5()\n md.update(content)\n return md.hexdigest()\n\n\ndef parse_datetime(datetime_str, tz=None):\n if not datetime_str:\n return None\n local_datetime = parser.parse(datetime_str)\n if not local_datetime.utcoffset():\n if not tz:\n tz = current_app.config['DEFAULT_TIMEZONE']\n local_datetime = tz.localize(local_datetime)\n return local_datetime.astimezone(pytz.utc).replace(tzinfo=None)\n\n\ndef parse_local_datetime(datetime_str, tz=None):\n if not datetime_str:\n return None\n utc_datetime = parser.parse(str(datetime_str))\n if not utc_datetime.utcoffset():\n if not tz:\n utc_tz = current_app.config['UTC_TIMEZONE']\n local_tz = current_app.config['DEFAULT_TIMEZONE']\n local_datetime = utc_tz.localize(utc_datetime)\n lo = local_datetime.replace(tzinfo=pytz.utc).astimezone(utc_tz)\n return local_tz.normalize(lo).replace(tzinfo=None)\n\n\ndef send_message(phone, message):\n payload = {\n 'account': current_app.config.get('MESSAGE_ID'),\n 'password': current_app.config.get('MESSAGE_PASSWORD'),\n 'mobile': phone,\n 'content': message\n }\n r = requests.post(current_app.config.get('MESSAGE_SERVER'), data=payload)\n return r.json()\n\n\ndef paginate_response(query):\n data = request.args\n try:\n return Response.success(\n query.paginate(\n int(data.get('page', 1)),\n int(data.get('per_page', 10)), False))\n except ValueError as e:\n return Response.error('Invalid parameters')\n\n\ndef send_notification(device_id, title, message, badge=1):\n try:\n if not device_id:\n return\n if device_id.startswith('android:'):\n device_id = device_id[8:]\n r = current_app.youmeng.android_push_unicast(device_id, title, message)\n if r.status_code != 200:\n raise InvalidArgument(r.text)\n return\n elif device_id.startswith('ios:'):\n device_id = device_id[4:]\n if device_id:\n payload = Payload(alert=message, sound=\"default\", badge=badge)\n current_app.apns.gateway_server.send_notification(device_id, payload)\n except Exception as e:\n current_app.logger.warning(e)\n\n\ndef send_notifications(device_ids, title, message):\n android = []\n ios = []\n for device_id in device_ids:\n if device_id.startswith('android:'):\n android.append(device_id[8:])\n elif device_id.startswith('ios:'):\n ios.append(device_id[4:])\n else:\n ios.append(device_id)\n\n if android:\n r = current_app.youmeng.android_push_unicast(\n ','.join(android), title, message)\n if r.status_code != 200:\n raise InvalidArgument(r.text)\n if ios:\n # frame = Frame()\n identifier = 1\n expiry = time.time() + 3600\n priority = 10\n payload = Payload(alert=message, sound=\"default\", badge=badge)\n # for id in ios:\n # frame.add_item(id, payload, identifier, expiry, priority)\n # current_app.apns.gateway_server.send_notification_multiple(frame)\n","sub_path":"server/iwx/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"588116279","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 20 17:19:16 2017\n\n@author: George\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport time\nimport os\n\n\ndef timeLapse(delay = 2.5, numberOfFrames = 100000, fileType = '.tif'):\n # folder to write to\n folder = r'C:\\Users\\George\\Desktop\\timelapse'\n \n subfolder = time.strftime(\"%d-%m-%Y_%I-%M-%S_%p\")\n \n newpath = folder + '\\\\' + subfolder\n \n if not os.path.exists(newpath):\n os.makedirs(newpath)\n \n filename = newpath + '\\\\' + 'img'\n \n #filetype \n #fileType = '.jpg'\n \n cap = cv2.VideoCapture(0)\n # 2304 x 1296 gets me 1280x720\n cap.set(4, 2304.0)\n cap.set(3, 1296.0)\n #print str(cap.get(3)),str(cap.get(4))\n \n ret, frame = cap.read()\n count = 0\n \n while(count < numberOfFrames):\n \n ret, frame = cap.read()\n frame_num = \"%08d\" % (count,)\n \n cv2.imshow('Video', frame) \n cv2.imwrite(filename + '_' + frame_num + fileType, frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n \n count += 1\n print (count)\n time.sleep(delay)\n \n cv2.destroyAllWindows()\n cap.release()\n\n\nif __name__ == '__main__':\n timeLapse(delay=0.002)\n\n","sub_path":"timelapse.py","file_name":"timelapse.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"143069500","text":"# The program that executes the main functions of the game.\n# Author: Qifu Yin\n# Date: 12-27-2018\n\nfrom graphics import *\nimport time\nimport random\nimport time\nimport sys\nfrom Deck import*\nfrom Button import*\nfrom Texts import*\n\nclass Game():\n def __init__(self,win):\n #set the main background for the game\n# start = GraphWin(\"zhajinhua\", 700, 400)\n# start.setCoords(0,400,615,0)\n# start.setBackground(\"white\")\n# table = Rectangle(Point(600,353),Point(87,27))\n# table.setFill(\"brown\")\n# table.draw(start)\n # Set variables for the game\n bet = 0\n money = 1000\n center = Point(250,100)\n # Draw the bank and bet in the window\n bank_view = Scorebox(win, Point(115,20),\" $\",money)\n bet_view = Scorebox(win, Point(210,20),\"Bet:\",bet)\n # Draw the buttons needed in the game\n draw_Button = Button(win, Point(50,40), 75, 25,15,\"Draw\", True)\n bet1_Button = Button(win, Point(50,65),75,25,15,\"$1\", False)\n bet5_Button = Button(win, Point(50,90),75,25,15,\"$5\", False)\n bet20_Button = Button(win, Point(50,115),75,25,15,\"$20\", False)\n bet50_Button = Button(win, Point(50,140),75,25,15,\"$50\", False)\n affirm_Button = Button(win, Point(50,165),75,25,15,\"Affirm\", False)\n again_Button = Button(win, Point(50,190), 75, 25,15,\"Again\", False)\n quit_Button = Button(win, Point(50,215), 75, 25,15,\"Quit\", True)\n buttons = [draw_Button,bet1_Button,bet5_Button,bet20_Button,bet50_Button,affirm_Button,again_Button,quit_Button]\n # Create a deck and shuffle cards\n deck = Deck()\n deck.Shuffle()\n p = win.getMouse()\n \n #test code\n num_1= num_2=value1=value2=0\n num1_view = Scorebox(win, Point(270,20),\"my\",num_1)\n num2_view = Scorebox(win, Point(350,20),\"computer:\",num_2)\n #test code\n \n # Begin the game:\n \n while not quit_Button.clicked(p):\n p = win.getMouse()\n # User can click the draw button to start playing\n if draw_Button.clicked(p):\n # view original bet\n bet = 1\n bet_view.updateText(bet)\n # draw 3 cards to player\n player_cards = []\n for i in range(3):\n location = Point(125+25*i,90) # Place each card 25*i further on the x-axis\n value = deck.Deal()\n card = Card(win,value,location)\n player_cards.append(card)\n # draw 3 cards to computer\n computer_cards = []\n for i in range(3):\n location = Point(300+25*i,90) # Place each card 25*i further on the x-axis\n value = deck.Deal()\n card = Card(win,value,location)\n computer_cards.append(card)\n card.Undraw()\n # update the bottons\n buttonUpdates('stillplaying',buttons) \n if bet1_Button.clicked(p):\n bet += 1\n bet_view.updateText(bet)\n if bet5_Button.clicked(p):\n bet += 5\n bet_view.updateText(bet)\n if bet20_Button.clicked(p):\n bet += 20\n bet_view.updateText(bet)\n if bet50_Button.clicked(p):\n bet += 50\n bet_view.updateText(bet)\n if affirm_Button.clicked(p):\n # compare the cards to decide who wins and settlement\n #test code\n num_1 = get_max(player_cards)\n num_2 = get_max(computer_cards)\n num1_view.updateText(num_1)\n num2_view.updateText(num_2)\n #test code\n if compare(player_cards, computer_cards):\n money += bet\n bank_view.updateText(money)\n else:\n money -= bet\n bank_view.updateText(money)\n # Draw the computer cards\n for card in computer_cards:\n card.kaart.draw(win)\n # update the buttons\n buttonUpdates('gameover',buttons)\n if again_Button.clicked(p):\n # clear the cards\n for i in player_cards:\n i.Undraw()\n for i in computer_cards:\n i.Undraw()\n # reset the scoreboard\n bet = 0\n bet_view.updateText(bet)\n # update the buttons\n buttonUpdates(\"newgame\", buttons)\n deck = Deck()\n deck.Shuffle()\n \n # Window closes if the quit_Button is clicked\n win.close()\n \ndef is_bomb(cards):\n value = []\n for card in cards:\n value.append(card.value[1:])\n if value[0] == value[1] and value[1] == value[2]:\n return True\n return False\ndef is_sf(cards):\n suit = []\n value = []\n for card in cards:\n suit.append(card.value[0])\n value.append(int(card.value[1:]))\n if suit[0] == suit[1] and suit[1] == suit[2]:\n if '14' in value:\n if '12' in value:\n if '13' in value:\n return True\n return False\n else:\n value = sorted(value)\n if abs(value[0]-value[1]) == 1 and abs(value[1]-value[2]) == 1:\n return True\n else:\n return False\ndef is_flush(cards):\n suit = []\n for card in cards:\n suit.append(card.value[0])\n if suit[0] == suit[1] and suit[1] == suit[2]:\n return True\n return False\ndef is_straight(cards):\n value = []\n for card in cards:\n value.append(int(card.value[1:]))\n if '14' in value:\n if '12' in value:\n if '13' in value:\n return True\n return False\n else:\n value = sorted(value)\n if abs(value[0]-value[1]) == 1 and abs(value[1]-value[2]) == 1:\n return True\n else:\n return False\n \ndef is_pair(cards):\n value = []\n for card in cards:\n value.append(card.value[1:])\n if value[0] == value[1] or value[0] == value[2] or value[2] == value[1]:\n return True\n return False\n\ndef get_max(cards):\n #get the type of hands, 6 is bomb and 1 is single\n if is_bomb(cards):\n return 6\n elif is_sf(cards):\n return 5\n elif is_flush(cards):\n return 4\n elif is_straight(cards):\n return 3\n elif is_pair(cards):\n return 2\n else:\n return 1\n\ndef compare(cards1, cards2):\n # cards1 is player and cards2 is computer\n number_1 = get_max(cards1)\n number_2 = get_max(cards2)\n suit1 = suit2 = []\n value1 = value2 = []\n for card in cards1:\n suit1.append(card.value[0])\n value1.append(card.getValue())\n for card in cards2:\n suit2.append(card.value[0])\n value2.append(card.getValue())\n # if num1 > num2, player wins, return True\n if number_1 > number_2:\n return True\n # if num1 > num2, computer wins, return False\n elif number_1 < number_2:\n return False\n # same type of hands\n # here number_1 == number_2\n # if bomb\n elif number_1 == 6:\n if value1[0] >= value2[0]:\n return True\n else:\n return False\n # if sf and straight\n elif number_1 == 5 or number_1 == 3:\n max1 = max(value1)\n max2 = max(value2)\n if max1 >= max2:\n return True\n else:\n return False\n # if flush and single\n elif number_1 == 4 or number_1 == 1:\n value1 = sorted(value1)\n value2 = sorted(value2)\n if value1[2] > value2[2]:\n return True\n elif value1[2] < value2[2]:\n return False\n else:\n if value1[1] > value2[1]:\n return True\n elif value1[1] < value2[1]:\n return False\n else:\n if value1[0] >= value2[0]:\n return True\n else:\n return False\n # if pair\n elif number_1 == 2:\n value1 = sorted(value1)\n value2 = sorted(value2)\n if value1[0] == value1[1]:\n pair_value1 = value1[0]\n single_value1 = value1[2]\n else:\n pair_value1 = value1[2]\n single_value1 = value1[0]\n if value2[0] == value2[1]:\n pair_value2 = value2[0]\n single_value2 = value2[2]\n else:\n pair_value2 = value2[2]\n single_value2 = value2[0]\n if pair_value1 > pair_value2:\n return True\n elif pair_value1 < pair_value2:\n return False\n else:\n if single_value1 >= single_value2:\n return True\n else:\n return False\n\n \n \n \n\n\n\n\n\n\n\n\n\n \n\n \n","sub_path":"Gameplay.py","file_name":"Gameplay.py","file_ext":"py","file_size_in_byte":9037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"122262640","text":"#-*-coding:utf8;-*-\n#qpy:3\n#qpy:console\nimport time\n\ntag = 'welcome to flames'\nprint(tag.center(40,' ').upper())\ntime.sleep(.5)\ntag1 = 'enter first name:'\ntag2 = 'enter second name:'\n\nprint(tag1.center(39,' ').title())\nname1 = input().lower()\ntime.sleep(.5)\nprint(tag2.center(39,' ').title())\nname2 = input().lower()\nl1 = len(name1)\nl2 = len(name2)\nlength = l1+l2\n\n\nlist1 = []\nfor l1 in range(0,len(name1)):\n if name1[l1] in name2:\n list1.append(name1[l1])\n \nfor l2 in range(0,len(name2)):\n if name2[l2] in name1:\n list1.append(name2[l2])\n \nlength0 = length-len(list1)\n\ndef length(num,fate):\n \n if length0 == num:\n print(fate)\n return\n\nlength(1,'s')\nlength(2,'e')\nlength(3,'f')\nlength(4,'e')\nlength(5,'f')\nlength(6,'m')\nlength(7,'e')\nlength(8,'a')\nlength(9,'e')\nlength(10,'l')\nlength(11,'m')\nlength(12,'a')\nlength(13,'a')\nlength(14,'f')\nlength(15,'m')\nlength(16,'f')\nlength(17,'a')\nlength(18,'f')\nlength(19,'l')\nlength(20,'e')\nlength(21,'f')\nlength(22,'e')\nlength(23,'f')\nlength(24,'f')\nlength(25,'m')\n\n\n\n\n\n\n\n\n\n","sub_path":"flames.py","file_name":"flames.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"140737196","text":"#\n# Flags:\n#\n# non-empty array A consisting of N integers is given.\n#\n# A peak is an array element which is larger than its neighbours.\n# More precisely, it is an index P such that 0 < P < N − 1\n# and A[P − 1] < A[P] > A[P + 1].\n#\n# For example, the following array A:\n#\n# A[0] = 1\n# A[1] = 5\n# A[2] = 3\n# A[3] = 4\n# A[4] = 3\n# A[5] = 4\n# A[6] = 1\n# A[7] = 2\n# A[8] = 3\n# A[9] = 4\n# A[10] = 6\n# A[11] = 2\n# has exactly four peaks: elements 1, 3, 5 and 10.\n#\n# You are going on a trip to a range of mountains whose relative heights\n# are represented by array A, as shown in a figure below. You have to\n# choose how many flags you should take with you. The goal is to set\n# the maximum number of flags on the peaks, according to certain rules.\n#\n# Flags can only be set on peaks. What's more, if you take K flags,\n# then the distance between any two flags should be greater than or\n# equal to K. The distance between indices P and Q is the absolute value |P − Q|.\n#\n# For example, given the mountain range represented by array A,\n# above, with N = 12, if you take:\n# *\n# two flags, you can set them on peaks 1 and 5;\n# three flags, you can set them on peaks 1, 5 and 10;\n# four flags, you can set only three flags, on peaks 1, 5 and 10.\n# You can therefore set a maximum of three flags in this case.\n#\n# Write a function:\n#\n# class Solution { public int solution(int[] A); }\n#\n# that, given a non-empty array A of N integers, returns the maximum number of\n# flags that can be set on the peaks of the array.\n#\n# For example, the following array A:\n#\n# A[0] = 1\n# A[1] = 5\n# A[2] = 3\n# A[3] = 4\n# A[4] = 3\n# A[5] = 4\n# A[6] = 1\n# A[7] = 2\n# A[8] = 3\n# A[9] = 4\n# A[10] = 6\n# A[11] = 2\n# the function should return 3, as explained above.\n#\n# Write an efficient algorithm for the following assumptions:\n# *\n# N is an integer within the range [1..400,000];\n# each element of array A is an integer within the range [0..1,000,000,000].\n#\nimport math\ndef main():\n doIt(0, [0, 0, 1, 0])\n doIt(1, [0, 1, 0, 1, 0])\n doIt(2, [0, 1, 0, 1, 0, 1, 0])\n doIt(3, [0, 1, 0, 0, 1, 0, 0, 1, 0])\n doIt(4, [0, 99, 0, 0, 0, 99, 1, 0, 0, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 99, 0])\n doIt(4, [0, 99, 0, 0, 0, 99, 1, 0, 0, 99, 2, 33, 2, 1, 2, 3, 44, 5, 6, 7, 8, 9, 99, 0])\n\ndef doIt(answer, A):\n out(\"Answer expected: {} : {} \".format(answer, solution(A)))\n\ndef out(msg):\n print(msg)\n\ndef solution(A):\n if len(A) < 3:\n return 0\n\n peaks = []\n\n for i in range(1, (len(A) - 1)):\n middle = A[i]\n if A[i - 1] < middle and middle > A[i + 1]:\n peaks.append(i)\n\n numberOfPeaks = len(peaks)\n if numberOfPeaks < 2:\n return numberOfPeaks\n\n peakAry = peaks\n peakAry.sort()\n\n maxCount = 0\n maxDistance = (peakAry[numberOfPeaks - 1] - peakAry[0])\n maxPeaks = int(math.sqrt(maxDistance)) + 1\n\n for flagCount in range(maxPeaks, 1, -1):\n\n count = 0\n leftIndex = 0\n rightIndex = 1\n while leftIndex < numberOfPeaks and rightIndex < numberOfPeaks:\n leftPeak = peakAry[leftIndex]\n rightPeak = peakAry[rightIndex]\n if (rightPeak - leftPeak) >= flagCount:\n count += 1 if count > 0 else 2\n if count >= flagCount:\n break\n\n # print(\"FlagCount : Left : Right : Count : $flagCount : $leftIndex : $rightIndex : $count\")\n leftIndex = rightIndex\n rightIndex = leftIndex + 1\n else:\n rightIndex += 1\n\n if rightIndex >= numberOfPeaks:\n leftIndex += 1\n rightIndex = leftIndex + 1\n\n if count >= flagCount:\n maxCount = flagCount\n # Found the max+\n break\n\n return maxCount\n\nmain()\n\n","sub_path":"C0dility/python/src/_0010_Prime_and_composite_numbers/Flags.py","file_name":"Flags.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"288894338","text":"from tkinter import *\nimport os, time, sqlite3, webbrowser, smtplib\nfrom tkinter import scrolledtext, messagebox\n\nclass Feedback:\n\tdef __init__(self, interface=\"Linux\", pathinit=\"\", users=[None], initialfeedback=\"feedback;stars\"):\n\t\tself.initfeed = initialfeedback\n\t\tself.assvar = StringVar()\n\t\tself.assvar.set(self.initfeed)\n\t\tself.path = pathinit\n\t\tself.interface = interface\n\t\tif self.interface == \"Windows\":\n\t\t\tself.slash = \"\\ \"[0]\n\t\telif self.interface == \"Linux\":\n\t\t\tself.slash = \"/\"\n\t\telse:\n\t\t\tself.interface = \"Linux\"\n\t\t\tself.slash = \"/\"\n\t\tself.middle = -1\n\t\t#for x in users:\n\t\tconn = sqlite3.connect(\".FlappyBird\"+self.slash+\"Feedbacks.db\")\n\t\tcursor = conn.cursor()\n\t\tsql = \"SELECT stars FROM feedbacks\"\n\t\ttry:\n\t\t\tcursor.execute(sql)\n\t\t\tself.sm = [item[0] for item in cursor]\n\t\t\tself.middle = sum(self.sm) / len(self.sm)\n\t\texcept:\n\t\t\tpass\n\tdef start(self):\n\t\tself.tk = Tk()\n\t\tself.tk.title(\"OrangoMango Feedback [{0:.2f}stars]\".format(self.middle))\n\t\tself.titlelabel = Label(self.tk, text=\"Send Feedback\")\n\t\tself.titlelabel.grid()\n\t\tself.t_EnterName = Label(self.tk, text=\"Enter name: \")\n\t\tself.t_EnterName.grid(row=1)\n\t\tself.t_EnterFeedback = Label(self.tk, text=\"Enter \\'Feedback;stars\\': \")\n\t\tself.t_EnterFeedback.grid(row=2)\n\t\tself.e_EnterName = Entry(self.tk)\n\t\tself.e_EnterName.grid(row=1, column=1)\n\t\tself.e_EnterFeedback = scrolledtext.ScrolledText(self.tk, width=40, height=7)\n\t\tself.e_EnterFeedback.grid(row=2, column=1)\n\t\tself.e_EnterFeedback.insert(\"end\", self.assvar.get())\n\t\tself.b_Cancel = Button(self.tk, text=\"Cancel\", command=self.cancel)\n\t\tself.b_Cancel.grid(row=3, column=2, sticky=\"w\")\n\t\tself.b_Submit = Button(self.tk, text=\"Submit\", command=self.submit)\n\t\tself.b_Submit.grid(row=3, column=2, sticky=\"e\")\n\t\tself.tk.mainloop()\n\tdef cancel(self):\n\t\tself.tk.destroy()\n\tdef submit(self):\n\t\ta = messagebox.askyesno(\"GitHub\", \"Do you want to post the feedback online? (YES-github.com; NO-locally)\")\n\t\tif a:\n\t\t\twebbrowser.open(\"http://www.github.com/OrangoMango/FlappyBird/issues/new\")\n\t\tself.name = self.e_EnterName.get()\n\t\t#print(self.e_EnterFeedback.get(\"1.0\", \"end-1c\").split(\";\"))\n\t\ttry:\n\t\t\tself.feedback, self.stars = self.e_EnterFeedback.get(\"1.0\", \"end-1c\").split(\";\") #Try tuple()\t\t\t\n\t\t\tself.stars = float(self.stars)\n\t\t\tif self.stars < 1 and stars > 5:\n\t\t\t\traise\n\t\texcept:\n\t\t\tmessagebox.showerror(\"Error\", \"Invalid star input, please write: \\'Feedback;stars_number\\'.\")\n\t\t\treturn\n\t\tnow = time.asctime()\n\t\tcon = sqlite3.connect(self.path+\".FlappyBird\"+self.slash+\"Feedbacks.db\")\n\t\tcursor = con.cursor()\n\t\tsql = \"CREATE TABLE IF NOT EXISTS feedbacks(name TEXT, date TEXT, feedback TEXT, stars INTEGER)\"\n\t\tcursor.execute(sql)\n\t\tsql = \"INSERT INTO feedbacks VALUES(?, ?, ?, ?)\"\n\t\tcursor.execute(sql, (self.name, now, self.feedback, self.stars))\n\t\tcon.commit()\t\n\t\n\t\tsql = \"SELECT * FROM feedbacks WHERE name = ?\"\n\t\tcursor.execute(sql, (self.name,))\n\t\tdata = [item for item in cursor]\n\t\tcon.close()\n\t\tmessagebox.showinfo(\"ClientInfo\", \"Feedback from {user} in {date} [{feedback}] {stars}:\".format(user=data[-1][0], \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdate=data[-1][1], feedback=data[-1][2], stars=data[-1][3]))\n\t\tself.tk.destroy()\n\n\tdef see_feedbacks(self):\n\t\tmessagebox.showinfo(\"AdminInfo\", \"Feedbacks Access Is only for admin\")\n","sub_path":"FlappyBird - Linux/FlappyBird - Linux/FeedbackInterface.py","file_name":"FeedbackInterface.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"480899837","text":"#!/usr/bin/env python3\n#\nimport os\nimport ftplib\nfrom utils.FileManager import FileManager\nfrom utils.FTPConfig import FTPConfig\n\nROOT = os.path.realpath(__file__).split(\"/src/\")[0]\nSCRIPT_NAME = os.path.basename(__file__).split(\".\")[0]\nUNIQUE_RUN_ID = \"caf1aeb2-52b0-47d8-b37d-5a7f39f4da59\"\nFILE_ERROR_OUT = SCRIPT_NAME + \".error.txt\"\n\n\nclass DownloadStorageFiles2:\n file_manager = None\n ftp_config = None\n ftp = None\n\n def __init__(self):\n self.file_manager = FileManager(SCRIPT_NAME, self.__class__.__name__, UNIQUE_RUN_ID)\n self.ftp_config = FTPConfig(ROOT+\"/resources/DownloadStorageFiles/ftp.ini\", SCRIPT_NAME, UNIQUE_RUN_ID)\n\n def download(self, input_file):\n self.file_manager.log(\"--Download START -- :\" + input_file)\n with open(input_file, \"r\") as f:\n for line in f:\n line_path = str(line).replace('\\n', '')\n data = str(line_path).split(',')\n if len(data) >= 3:\n media_id = data[0]\n self.file_manager.validate_dir(self.file_manager.out_path, media_id + \"/\") # ensure folder exists\n\n storage_filehost = data[1]\n storage_filepath = data[2]\n\n source_filename = storage_filepath.split('/')[-1]\n source_basepath = storage_filepath.replace(source_filename,'')\n\n # file_name = source_filename.split('.')[0]\n file_extension = source_filename.split('.')[-1]\n\n # local_filename = source_filename\n local_filename = \"manifest.\" + file_extension\n if len(data) == 4:\n local_filename = data[3] + \".\" + file_extension\n else:\n self.file_manager.out(storage_filehost + \",\" + source_basepath, media_id + \"/\" + file_extension+\"_info.txt\", True)\n\n try:\n if self.ftp_config.profile_tag != storage_filehost:\n if self.ftp is not None:\n self.ftp.quit()\n self.ftp_config.load(storage_filehost) # load storage_host as profile_tag\n self.ftp = ftplib.FTP(self.ftp_config.host)\n self.ftp.login(self.ftp_config.username, self.ftp_config.password)\n\n local_filepath = self.file_manager.get_outfile(media_id + \"/\" + local_filename)\n local_file = open(local_filepath, 'wb')\n self.file_manager.log(\"Download: \" + storage_filepath + \" >> \" +\n self.ftp.retrbinary('RETR ' + storage_filepath, local_file.write))\n local_file.close()\n except ftplib.error_perm:\n err_msg = \"FTP file does not exist,mediaId:\" + media_id + \",filepath:\" + storage_filepath\n self.file_manager.log(err_msg)\n self.file_manager.out(err_msg, FILE_ERROR_OUT)\n self.file_manager.delete_outfile(media_id + \"/\" + local_filename)\n except ftplib.all_errors:\n err_msg = \"FTP all errors,mediaId:\" + media_id\n self.file_manager.log(err_msg)\n self.file_manager.out(err_msg, FILE_ERROR_OUT)\n self.file_manager.delete_outfile(media_id + \"/\" + local_filename)\n\n else:\n self.file_manager.log(\"Line data not enough info:\" + line_path)\n self.file_manager.out(\"Line data not enough info:\" + line_path, FILE_ERROR_OUT)\n\n self.file_manager.log(\"--Download END -- :\" + input_file)\n\n\ndef path_form(first, *entries):\n final_entries = []\n\n def work_of_magic(value):\n items = value.split('/')\n for item in items:\n if item is not '':\n final_entries.append(item)\n\n if first is not '':\n first_items = first.split('/')\n if first_items[0] is '':\n final_entries.append('')\n\n work_of_magic(first)\n\n for entry in entries:\n work_of_magic(entry)\n\n return '/'.join(final_entries)\n\ndsf = DownloadStorageFiles2()\n# folder = \"00001-00002\" # Trial\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"00001-01000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"01001-02000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"02001-03000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"03001-04000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"04001-05000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"05001-06000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"06001-07000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"07001-08000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"08001-09000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"09001-10000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH-cont.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"10001-11000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"11001-12000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"12001-13000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"13001-14000\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n# folder = \"14001-14355\"\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-DASH.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/manifest-HLS.txt\")\n# dsf.download(ROOT+\"/in/DownloadStorageFiles/\"+folder+\"/subtitles.txt\")\n\nexit(0)\n\n","sub_path":"python/src/DownloadStorageFiles2.py","file_name":"DownloadStorageFiles2.py","file_ext":"py","file_size_in_byte":8321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"439337675","text":"import torch\nimport torch.nn as nn\nfrom inferno.extensions.layers.convolutional import ConvELU3D, Conv3D, BNReLUConv3D\nfrom inferno.extensions.layers.sampling import AnisotropicPool, AnisotropicUpsample, Upsample, GlobalMaskedAvgPool3d\nfrom .base import Xcoder\n\nCONV_TYPES = {'vanilla': ConvELU3D,\n 'conv_bn': BNReLUConv3D}\n\n\ndef get_pooler(scale_factor):\n assert isinstance(scale_factor, (int, list, tuple))\n if isinstance(scale_factor, (list, tuple)):\n assert len(scale_factor) == 3\n assert scale_factor[0] == 1\n # we need to make sure that the scale factor conforms with the single value\n # that AnisotropicPool expects\n pooler = AnisotropicPool(downscale_factor=scale_factor[1])\n else:\n if scale_factor > 0:\n pooler = nn.MaxPool3d(kernel_size=1 + scale_factor,\n stride=scale_factor,\n padding=1)\n else:\n pooler = None\n return pooler\n\n\ndef get_sampler(scale_factor):\n assert isinstance(scale_factor, (int, list, tuple))\n if isinstance(scale_factor, (list, tuple)):\n assert len(scale_factor) == 3\n # we need to make sure that the scale factor conforms with the single value\n # that AnisotropicPool expects\n assert scale_factor[0] == 1\n sampler = AnisotropicUpsample(scale_factor=scale_factor[1])\n else:\n if scale_factor > 0:\n sampler = Upsample(scale_factor=scale_factor)\n else:\n sampler = None\n return sampler\n\n\nclass Encoder(Xcoder):\n def __init__(self, in_channels, out_channels, kernel_size,\n conv_type=ConvELU3D, scale_factor=2):\n super(Encoder, self).__init__(in_channels, out_channels, kernel_size,\n conv_type=conv_type,\n pre_conv=get_pooler(scale_factor))\n\n\nclass Decoder(Xcoder):\n def __init__(self, in_channels, out_channels, kernel_size,\n conv_type=ConvELU3D, scale_factor=2):\n super(Decoder, self).__init__(in_channels, out_channels, kernel_size,\n conv_type=conv_type,\n post_conv=get_sampler(scale_factor))\n\n\nclass UNet3DNl(nn.Module):\n \"\"\"\n 3D U-Net architecture without skip conncetions\n with the number of layers specified by the user.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n initial_num_fmaps,\n fmap_growth,\n num_layers=5,\n scale_factor=2,\n glob_pool=None,\n final_activation='auto',\n conv_type_key='vanilla'):\n \"\"\"\n Parameter:\n ----------\n in_channels (int): number of input channels\n out_channels (int): number of output channels\n initial_num_fmaps (int): number of feature maps of the first layer\n fmap_growth (int): growth factor of the feature maps; the number of feature maps\n in layer k is given by initial_num_fmaps * fmap_growth**k\n num_layers (int): the number of layers (excluding the base) in the U-Net\n scale_factor (int or list / tuple): upscale / downscale factor (default: 2)\n glob_pool: the final global pooling (None, 'avg', 'max')\n final_activation: final activation used (default: 'auto')\n conv_type_key: convolution type used (default: 'vanilla')\n \"\"\"\n super(UNet3DNl, self).__init__()\n\n assert conv_type_key in CONV_TYPES, conv_type_key\n conv_type = CONV_TYPES[conv_type_key]\n assert isinstance(scale_factor, (int, list, tuple))\n self.scale_factor = [scale_factor] * num_layers \\\n if isinstance(scale_factor, int) else scale_factor\n assert len(self.scale_factor) == num_layers\n self.scale_factor = [0] + self.scale_factor \\\n if isinstance(self.scale_factor, list) else (0,) + self.scale_factor\n # the entry can be a tuple/list for anisotropic sampling\n assert all(isinstance(sfactor, (int, list, tuple)) for sfactor in self.scale_factor)\n\n # The global pooling applied on the bottleneck embedding space\n # to convert the feature maps to a feature vector\n # of the same size for any input size\n # if glob_pool == 'avg':\n # self.global_pool = nn.AdaptiveAvgPool3d(1)\n # elif glob_pool == 'max':\n # self.global_pool = nn.AdaptiveMaxPool3d(1)\n if glob_pool == 'avg_mask':\n self.global_pool = GlobalMaskedAvgPool3d()\n else:\n self.global_pool = None\n\n # Set attributes\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n # Build encoders with proper number of feature maps\n # number of feature maps for the encoders\n\n fe = [in_channels]\n for n in range(num_layers):\n fe.append(initial_num_fmaps * fmap_growth**n)\n encoders = []\n for n in range(num_layers):\n encoders.append(Encoder(fe[n], fe[n+1], 3, conv_type=conv_type,\n scale_factor=self.scale_factor[n]))\n self.encoders = nn.ModuleList(encoders)\n\n # Build base\n # number of base output feature maps\n f0b = initial_num_fmaps * fmap_growth**num_layers\n\n self.base = Encoder(fe[num_layers], f0b, 3, conv_type=conv_type,\n scale_factor=self.scale_factor[num_layers])\n self.base_upsample = get_sampler(self.scale_factor[num_layers])\n\n # Decoders list\n fd = [f0b]\n for n in reversed(range(num_layers)):\n fd.append(initial_num_fmaps * fmap_growth**n)\n decoders = []\n for n in range(num_layers):\n decoders.append(Decoder(fd[n] + fe[-n-1], fd[n+1], 3, conv_type=conv_type,\n scale_factor=self.scale_factor[-n-2]))\n self.decoders = nn.ModuleList(decoders)\n\n # Build output\n self.output = Conv3D(fd[num_layers], out_channels, 3)\n # Parse final activation\n if final_activation == 'auto':\n final_activation = nn.Sigmoid() if out_channels == 1 else nn.Softmax3d()\n if isinstance(final_activation, str):\n self.final_activation = getattr(nn, final_activation)()\n elif isinstance(final_activation, nn.Module):\n self.final_activation = final_activation\n elif final_activation is None:\n self.final_activation = None\n else:\n raise NotImplementedError\n\n\n def encode(self, x, pool=False, mask=0):\n # get a downsampled mask\n if not mask and isinstance(mask, bool):\n mask = torch.ones(x.shape)\n else:\n mask = (x != mask).type(torch.float)\n # pool the mask\n for i in self.scale_factor:\n pooler = get_pooler(i)\n mask = mask if pooler is None else pooler(mask)\n\n encoder_out = []\n # apply encoders and remember their outputs\n for encoder in self.encoders:\n x = encoder(x)\n encoder_out.append(x)\n\n x = self.base(x)\n # if we want to use the encoder for feature extraction we might want to pool\n if pool and self.global_pool is not None:\n x = self.global_pool(x, mask) if isinstance(self.global_pool, GlobalMaskedAvgPool3d) \\\n else self.global_pool(x)\n return x\n else:\n return x, encoder_out\n\n def forward(self, input_):\n # encode\n embedding, encode_out = self.encode(input_)\n # the first decoder upsample\n x = embedding if self.base_upsample is None else self.base_upsample(embedding)\n # apply decoders\n max_level = len(self.decoders) - 1\n for level, decoder in enumerate(self.decoders):\n x = decoder(torch.cat((x, encode_out[max_level - level]), 1))\n # apply the last layer\n x = self.output(x)\n if self.final_activation is not None:\n x = self.final_activation(x)\n return x\n","sub_path":"neurofire/models/unet/unet_3d_n_layers.py","file_name":"unet_3d_n_layers.py","file_ext":"py","file_size_in_byte":8188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"330649405","text":"\nfrom Crypto.Cipher import AES\n\nfile = open(\"7.txt\", \"r\").read()\nfile_decoded = file.decode(\"base64\")\n\nkey = \"YELLOW SUBMARINE\"\n\ncipher = AES.new(key, AES.MODE_ECB)\nplaintext = cipher.decrypt(file_decoded)\nprint(plaintext)","sub_path":"CTFs/CryptoPals/Set1/challenge7.py","file_name":"challenge7.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"50005645","text":"from classifier import Net\nimport torch\nfrom torchvision import transforms\nfrom PIL import Image\nimport os\n\nclass Grader:\n def __init__(self):\n self.device = torch.device(\"cuda\")\n self.model = Net()\n self.model.load_state_dict(torch.load(\"mnist_cnn.pt\"))\n self.model = self.model.to(self.device)\n self.model.eval()\n\n # toPIL = transforms.ToPILImage()\n # os.makedirs('debug', exist_ok=True)\n # for i, img in enumerate(imgs[:20]):\n # img = toPIL(img)\n # img.save(f'debug/fake_img{i}.png')\n\n self.transform=transforms.Compose([\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n def grade(self, samples):\n imgs, labels = samples['imgs'].to(self.device), samples['labels'].to(self.device)\n pred = self.model(self.transform(imgs)).argmax(dim=1)\n # print(pred[:20])\n acc = torch.sum(pred == labels) / len(pred)\n # print(\"Accuracy:\", acc)\n return acc\n\nif __name__ == '__main__':\n grader = Grader()\n samples = torch.load(\"vae_generated_samples.pt\")\n acc = grader.grade(samples)\n print(\"Accuracy:\", acc)","sub_path":"vae/grader.py","file_name":"grader.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"480394376","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nimport json\n\nimport responses\nfrom pyzeef import Zeef, Block, Page, Scratchpad\n\n\nclass TestZeef(unittest.TestCase):\n\n def setUp(self):\n self.auth_url = '{}/pages/mine'.format(Zeef.API_URL)\n self.pages_url = Page.PAGE_URL\n self.block_url = Block.BLOCK_URL\n\n def _scratchpad_mock(self):\n \"\"\"\n mock helper\n \"\"\"\n # scratchpad response mock\n body = \"\"\"\n {\n \"id\": 0,\n \"owner\": \"APIUser\",\n \"scratchPadLinks\": [\n {\n \"id\": 0,\n \"scratchPadId\": 0,\n \"title\": \"string\",\n \"url\": \"string\"\n }\n ]\n }\n \"\"\"\n responses.add(responses.GET, url=Scratchpad.SCRATCHPAD_URL, body=body)\n\n @responses.activate\n def test_zeef_authentication_good_token(self):\n body = ('{\"pageOverviews\":[{\"id\":1,\"url\":\"https://test.zeef.com'\n '/user\",\"subjectName\":\"test page\",\"curator\":\"user\",'\n '\"languageCode\":null,\"status\":\"published\",\"pageType\":null,'\n '\"imageUrl\":\"https://zeef.io/image/2635/100/s?1419350138591'\n '\"}]}')\n\n responses.add(responses.GET, url=self.auth_url, status=200, body=body)\n self._scratchpad_mock()\n\n zeef = Zeef('GoodToken')\n self.assertEqual(len(zeef.pages), 0)\n\n def test_zeef_authentication_bad_token(self):\n z = Zeef('badtoken')\n r = z.authorize(persist_pages=False)\n response = {'status': 404, 'content': 'Error'\n 'Not Found'\n ''}\n self.assertEqual(r, response)\n\n def test_zeef_class_attributes(self):\n z = Zeef('sometoken')\n self.assertEqual(z.auth_url, self.auth_url)\n self.assertEqual(z.pages_url, self.pages_url)\n self.assertEqual(z.token, 'sometoken')\n\n @responses.activate\n def test_get_basic_page(self):\n self._scratchpad_mock()\n # /pages/mine mock\n pages_mine_body = ('{\"pageOverviews\":[{\"id\":1,\"url\":\"https://test.'\n 'zeef.com/user\",\"subjectName\":\"test page\",'\n '\"curator\":\"user\",\"languageCode\":null,'\n '\"status\":\"published\",\"pageType\":null,'\n '\"imageUrl\":\"https://zeef.io/image/2635/100'\n '/s?1419350138591\"}]}')\n\n responses.add(responses.GET, url=self.auth_url, status=200,\n body=pages_mine_body)\n\n # /page/id mock\n page_body = {\n 'htmlDescription': u'Testing Description',\n 'id': 1,\n 'links': [],\n 'markdownDescription': 'Testing Description',\n 'owner': {'fullName': 'Test Owner', 'username': 'test.owner'},\n 'pageType': 'SUBJECT',\n 'plainTextDescription': 'Testing Description',\n 'profile': {\n 'facebookURL': 'http://facebook.com/testuser',\n 'googlePlusURL': None,\n 'htmlSummary': '',\n 'id': 9999,\n 'linkedinURL': 'http://linkedin.com/in/testuser',\n 'markdownSummary': '',\n 'profileImageURL': 'https://zeef.io/image/2439/100/s?123456',\n 'twentyFourSessionsURL': None,\n 'twitterURL': 'http://twitter.com/testuser'\n },\n 'subject': {\n 'alias': [\n {\n 'defaultAlias': True,\n 'displayName': 'Test Page',\n 'id': 3833,\n 'name': 'test-page'\n }\n ],\n 'id': 777\n }\n }\n _id = page_body['id']\n page_body = json.dumps(page_body)\n url = '{}/{}'.format(self.pages_url, _id)\n responses.add(responses.GET, url=url, body=page_body, status=200)\n\n z = Zeef('GoodToken', persist_pages=True)\n self.assertEqual(len(z.pages), 1)\n self.assertEqual(z.pages[0].__class__, Page)\n self.assertEqual(z.pages[0].title, 'Test Page')\n self.assertEqual(z.pages[0].description, 'Testing Description')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_pyzeef.py","file_name":"test_pyzeef.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"120069462","text":"#encoding:UTF-8\n\n\"\"\"\nThis script is used to construct the model and to train my data.\n\"\"\"\n\nfrom keras.optimizers import SGD\nfrom convnetskeras.convnets import preprocess_image_batch, convnet\nfrom keras import callbacks\nfrom datetime import datetime\nfrom IDCardGen import IDCardGen\n\nchar_font_path = '../fonts/fangzhengheiti.TTF'\nnumb_font_path = '../fonts/ocr-b-10.ttf'\n\nstarttime = datetime.now()\nprint ('start time is ', starttime)\nsgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\nmodel = convnet('alexnet', heatmap=False)\nmodel.compile(optimizer='adadelta', \n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nidcardgen = IDCardGen(char_font_path, numb_font_path)\ngen = idcardgen.preprocess((100, 100))\n\ncbs = callbacks.TensorBoard(log_dir='./graph', \n histogram_freq=1, \n write_graph=True, \n write_images=True)\n\nmodel.fit_generator(gen, steps_per_epoch=3000, epochs=4, \n validation_data=gen, validation_steps=500,\n callbacks=[cbs])\n\nmodel.save('models/v1_20170728.h5')\n\nendtime = datetime.now()\nprint ('end time is ', endtime)\nprint ('the total time is ', endtime - starttime)\nprint ('Done!Model is saved')","sub_path":"src/convnet_train.py","file_name":"convnet_train.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"28496846","text":"def leer_perros(na):\n pe = {}\n a = open(na)\n for l in a:\n _, n, r, p, s = l.strip().split(';')\n if n not in pe:\n pe[n] = []\n pe[n].append(s)\n a.close()\n return pe\n\nprint(leer_perros('perros.txt'))\n\ndef leer_razas(nombre_archivo):\n d = {}\n archivo = open(nombre_archivo)\n for linea in archivo:\n _, nombre, raza, _, _ = linea.strip().split(';')\n if raza not in d:\n d[raza] = []\n if nombre not in d[raza]:\n d[raza].append(nombre)\n archivo.close()\n return d\n\nprint(leer_razas('perros.txt'))\n\ndef mestizos(raza1, raza2, nombre_archivo):\n d = {}\n archivo = open(nombre_archivo)\n for linea in archivo:\n _, nombre, raza, _, _ = linea.strip().split(';')\n if nombre not in d:\n d[nombre] = []\n d[nombre].append(raza)\n archivo.close()\n final = []\n for nombre in d:\n if raza1 in d[nombre] and raza2 in d[nombre]:\n final.append(nombre)\n return final\n\nprint(mestizos('salchicha', 'san bernardo', 'perros.txt'))\nprint(mestizos('pequines', 'labrador', 'perros.txt'))\nprint(mestizos('pequines', 'pastor aleman', 'perros.txt'))\n\ndef solucion_mestizos(raza1, raza2, nombre_archivo):\n d = {}\n soluciones = leer_perros(nombre_archivo)\n perros_mestizos = mestizos(raza1, raza2, nombre_archivo)\n for p in perros_mestizos:\n solu = soluciones[p]\n sin_repeticion = []\n for s in solu:\n if s not in d:\n d[s] = 0\n if s not in sin_repeticion:\n d[s] += 1\n sin_repeticion.append(s)\n return d\n\nprint(solucion_mestizos('salchicha', 'san bernardo', 'perros.txt'))\nprint(solucion_mestizos('pequines', 'labrador', 'perros.txt'))\nprint(solucion_mestizos('pequines', 'pastor aleman', 'perros.txt'))\n","sub_path":"archivos-proc/pyllan/pyllan.py","file_name":"pyllan.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"511792031","text":"def head(node): return node.value\ndef tail(node): return node.nex\n\nclass Node:\n\n def __init__(self, value = None, nex = None):\n self.value = value\n self.nex = nex\n\nclass LinkedList:\n\n def __init__(self, collection = None):\n self.head = None\n self.size = 0\n if collection != None:\n for value in collection:\n self.size += 1\n self.append(value)\n \n def display(self):\n for value in self:\n if type(value) is LinkedList:\n value.display()\n else:\n print(value,end=' ')\n print()\n\n def append(self,value):\n if self.head == None:\n self.head = Node(value)\n return\n\n probe = self.head\n\n while tail(probe) != None:\n probe = tail(probe) \n\n probe.nex = Node(value)\n self.size += 1\n\n def prepend(self,value):\n self.head = Node(value,self.head)\n self.size += 1\n\n def removeAt(self,i):\n if i >= self.size:\n raise KeyError(\"Index exceeds size of list\")\n \n probe = self.head\n count = 1\n while count != i:\n probe = tail(probe) \n count += 1\n\n value = head(tail(probe))\n if i != self.size - 1:\n probe.nex = tail(tail(probe))\n else:\n probe.nex = None\n\n return value\n\n def insert(self,i,value):\n if i > self.size:\n raise KeyError(\"Index exceeds size of list\")\n elif i == self.size:\n self.append(value)\n return\n\n probe = self.head\n count = 1\n while count != i:\n probe = probe.nex\n count += 1\n\n probe.nex = Node(value,probe.nex)\n\n def remove(self):\n if self.head == None:\n return\n\n self.head = self.head.nex\n\n self.size -= 1\n\n def __add__(self, lyst):\n newLyst = LinkedList(lyst)\n newLyst.prepend(self)\n\n return newLyst\n\n def __iter__(self):\n probe = self.head\n while probe != None:\n yield head(probe) \n probe = tail(probe)\n\n def __len__(self):\n return self.size\n\n def __contains__(self,value):\n for thing in self:\n if value == thing:\n return True\n return False\n","sub_path":"practice/linked.py","file_name":"linked.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"567191154","text":"\n\nfrom xai.brain.wordbase.adjectives._gentle import _GENTLE\n\n#calss header\nclass _GENTLES(_GENTLE, ):\n\tdef __init__(self,): \n\t\t_GENTLE.__init__(self)\n\t\tself.name = \"GENTLES\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"gentle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_gentles.py","file_name":"_gentles.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"50924087","text":"'''\nRoulette module\n----------------------------------------------------------------------------\n'''\n\nfrom app.mac import mac, signals\nimport requests, os\nimport random\n\n'''\nMain funciton, all happens after this\n'''\n@signals.command_received.connect\ndef handle(message):\n if message.command == \"click\" or message.command == \"roulette\":\n if message.predicate == \"-h\":\n show_help(message)\n else:\n handle_command(message)\n \n \n'''\nHandles command\n!click\n'''\ndef handle_command(message):\n actions = ['😛☁️🔫 CLICK', '😛☁️🔫 CLICK', '😛☁️🔫 CLICK', '😛☁️🔫 CLICK', '😛☁️🔫 CLICK', '😵💥🔫 BAAANG!']\n response = random.choice(actions)\n mac.send_message(response, message.conversation)\n \n \n\n'''\nPrints help (how to use example)\n'''\ndef show_help(message):\n answer = \"*Roulette*\\n*Usage:* !click \\n*Example:* !click\"\n mac.send_message(answer, message.conversation)\n","sub_path":"modules/roulette/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"170254248","text":"from flask import Flask, render_template, request, redirect\nfrom flask_cors import CORS\nimport sqlite3\n\napp = Flask(__name__)\nCORS(app)\n\nconn = sqlite3.connect('myBank.db')\nc = conn.cursor()\n#c.execute('''CREATE TABLE CustomerDetails (rowid INT, customerName VARCHAR(20), email VARCHAR(50), transacId INT PRIMARY KEY, Balance INT)''')\n#c.execute('''CREATE TABLE TransferDetails (rowid INTEGER PRIMARY KEY AUTOINCREMENT, SenderName VARCHAR(20), senderId INT, ReceiverName VARCHAR(20), receiverId INT, amountTransferred INT)''')\n#records = [(1, 'Peter', 'pet@gmail.com', 101, 10000),\n# (2, 'Amy', 'amy@gmail.com', 102, 50000),\n# (3, 'Bob', 'bob@gmail.com', 103, 5000),\n# (4, 'Hannah', 'han@gmail.com', 104, 3000),\n# (5, 'Sandy', 'San@gmail.com', 105, 9000),\n# (6, 'Betty', 'betty@gmail.com', 106, 1000),\n# (7, 'Richard', 'Richard@gmail.com', 107, 2000),\n# (8, 'Vicky', 'vicky@gmail.com', 108, 6500),\n# (9, 'Ben', 'Ben@gmail.com', 109, 5555),\n# (10, 'Willaim', 'william@gmail.com', 110, 20000)]\n#c.executemany('INSERT INTO CustomerDetails VALUES (?,?,?,?,?);', records)\n#conn.commit()\n#conn.close()\n\n@app.route('/')\ndef homepage():\n return render_template('home.html')\n\n\n@app.route('/transfer//')\ndef transferpage(senderid):\n return render_template('transfer.html')\n \n\n@app.route('/transferamount////')\ndef addAmount(sid, rid, amt):\n conn = sqlite3.connect('myBank.db')\n c = conn.cursor()\n\n c.execute(\"SELECT * FROM CustomerDetails WHERE transacId = \"+str(sid))\n sen_details = c.fetchall()\n sen_bal = sen_details[0][4] - amt\n \n c.execute(\"SELECT * FROM CustomerDetails WHERE transacId = \"+str(rid))\n rec_details = c.fetchall()\n rec_bal = rec_details[0][4] + amt\n\n c.execute(\"UPDATE customerDetails SET Balance =\"+str(sen_bal)+\" WHERE transacId =\"+str(sid))\n c.execute(\"UPDATE customerDetails SET Balance =\"+str(rec_bal)+\" WHERE transacId =\"+str(rid))\n transfer_details = [sen_details[0][1], sid, rec_details[0][1], rid, amt]\n c.execute('INSERT INTO TransferDetails(SenderName, senderId, ReceiverName, receiverId, amountTransferred) VALUES (?,?,?,?,?)', transfer_details)\n\n conn.commit()\n conn.close()\n return \"transferred\"\n\n\n@app.route('/getParticularTransferHistory//')\ndef getParticularTransferHistory(Id):\n conn = sqlite3.connect('myBank.db')\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"SELECT * FROM TransferDetails WHERE senderId =\"+str(Id)+\" OR receiverId =\"+str(Id)+\";\")\n transferDetails = c.fetchall()\n conn.commit()\n conn.close()\n return render_template('transferhistory.html', transferDetails=transferDetails)\n\n\n\n@app.route('/getAllCustomerDetails/')\ndef getAllCustomerDetails():\n conn = sqlite3.connect('myBank.db')\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"SELECT * FROM CustomerDetails\")\n customerDetails = c.fetchall()\n conn.commit()\n conn.close()\n return render_template('details.html', customerDetails=customerDetails)\n\n\n\n@app.route('/getAllTransferHistory/')\ndef getAllTransferHistory():\n conn = sqlite3.connect('myBank.db')\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"SELECT * FROM TransferDetails\")\n transferDetails = c.fetchall()\n conn.commit()\n conn.close()\n return render_template('transferhistory.html', transferDetails=transferDetails)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"459507316","text":"import math\n\ndef solve(D, P):\n \"\"\" solve the problem \"\"\"\n \n answer = max(P)\n if answer < 4: return answer\n\n for m in range(2, max(P)):\n special = 0\n for p in P:\n special += math.ceil(p/m) - 1\n if answer > special + m:\n answer = special + m\n\n return answer\n\ndef parse():\n \"\"\" parse input \"\"\"\n D = int(input())\n P = [int(i) for i in input().split()]\n\n return D, P\n\n\ndef main():\n \n T = int(input())\n\n # solve\n for t in range(1, T+1):\n params = parse()\n result = solve(*params)\n print('Case #%d: %s' % (t, result))\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"solutions_5686275109552128_1/Python/waitingkuo0527/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"76362802","text":"#LIBRARIES\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport json\nimport sqlite3\nimport datetime\nimport time\nimport TerDec as td # Personal module,needs TerDec.py\n############################\n\nmission0_1=td.Mission('Initial variables. Connecting to database') #Mission Reply tool from TerDec.py\nct=td.counter(description='Tweets into database',sleep=0) #Counter from TerDec.py\ndbpath=td.setpath(r'eLearning_tweets.db') #Path tool from TerDec.py\ndbpath.askupdate('Path of database')\nconn = sqlite3.connect(dbpath.path)\nc = conn.cursor()\ntoday_table=\"CREATE TABLE IF NOT EXISTS raw_tweets (id_str TEXT, tweets TEXT,user_id_str TEXT, timestamp_ms REAL,\\\n user_location TEXT,user_followers REAL,user_friends REAL,user_favourites REAL,user_statuses REAL)\"\ntoday_upload=\"INSERT INTO raw_tweets (id_str, tweets,user_id_str, timestamp_ms,\\\n user_location,user_followers,user_friends,user_favourites,user_statuses) VALUES (?,?,?,?,?,?,?,?,?)\"\nc.execute(today_table)\nconn.commit()\nmission0_1.end()\n\nmission0_2=td.Mission('Get API Keys')\nckey=\"\"\ncsecret=\"\"\natoken=\"\"\nasecret=\"\"\nmission0_2.end()\n\n# class object\nclass listener(StreamListener): #listener is being declared as a class inheriting from base class StreamListener\n\n def fulltext_handler(self,jsondata):\n try:\n full=jsondata['retweeted_status']['extended_tweet']['full_text']\n return full\n except:\n try:\n full=jsondata['extended_tweet']['full_text']\n return full\n except:\n full=jsondata['text']\n return full\n \n def on_data(self, data): #this is therefore a method from StreamListener\n try: \n raw_data = json.loads(data)\n lang=raw_data['lang']\n if lang=='en': # first filter by this label, keep it into database for check\n tweets=self.fulltext_handler(raw_data)\n id_str=raw_data['id_str']\n user_id_str=raw_data['user']['id_str']\n user_location=raw_data['user']['location']\n user_followers=raw_data['user']['followers_count']\n user_friends=raw_data['user']['friends_count']\n user_favourites=raw_data['user']['favourites_count']\n user_statuses=raw_data['user']['statuses_count']\n timestamp_ms=raw_data['timestamp_ms']\n c.execute(today_upload,(id_str,tweets,user_id_str,timestamp_ms,\\\n user_location,user_followers,user_friends,user_favourites,user_statuses))\n conn.commit()\n ct.flush() # Show number into database\n else:\n pass\n\n except:\n pass\n############################\n\nmission1=td.Mission('Start to fetching and insert into database (Only be stopped manually)')\nfor aftererror in range(10): #If error, it can still try.\n try:\n auth = OAuthHandler(ckey, csecret)\n auth.set_access_token(atoken, asecret)\n twitterStream = Stream(auth, listener())\n twitterStream.filter(track=[\"a\",\"e\",\"i\",\"o\",\"u\"]) #fetching 'everything' from twitter.\n\n except Exception as e:\n print(str(e))\n time.sleep(5)\nmission1.end()","sub_path":"program/fetcher1_twitter.py","file_name":"fetcher1_twitter.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"403213388","text":"# -*- coding: utf-8 -*-\n##########################################################################\n# Copyright (C) 2005-2013 UC Mobile Limited. All Rights Reserved\n# File : uc_tools.processmanager\n# \n# Creation : 2013-10-31\n# Author : huangjj@ucweb.com\n###########################################################################\n\nfrom uc_tools import cmdexec\nimport re\n\n\nMEMORY_INFO_RE = re.compile('^(?P\\w+):\\s+(?P\\d+) kB$')\nNVIDIA_MEMORY_INFO_RE = re.compile('^\\s*(?P\\S+)\\s*(?P\\S+)\\s*'\n '(?P\\d+)\\s*(?P\\d+)$')\n\nclass ProcessManager(object):\n \"\"\" 安卓设备的进程管理\n \"\"\"\n def __init__(self, device=None):\n \"\"\" 构造方法\n\n :param device: 手机串号\n :type device: string\n \"\"\"\n self.cmd_exec = cmdexec.CMDExec()\n if device:\n self.cmd_exec.setTargetSerial(device)\n self._device = device\n \n def getPid(self, pkg_name):\n \"\"\" 获取当前包对应的进程PID\n\n :param pkg_name: 包名\n :type pkg_name: string\n\n :returns: 当前包对应的进程PID\n :rtype: string\n \"\"\"\n pid = ''\n cmd = \"ps\"\n text= self.cmd_exec.sendShellCommand(cmd)\n lines=text.split('\\r\\n')\n index = 1\n for line in lines:\n if len(line) <= 0:\n continue\n line = line.strip('\\r')\n pkglist=line.split()\n \n if \" PID \" in line:\n for i in range(len(pkglist)):\n if pkglist[i] == \"PID\":\n index = i\n continue\n elif (pkg_name in line) \\\n and (pkg_name == pkglist[-1]):\n\n pid = pkglist[index]\n break\n return pid\n \n def killPid(self, pid, signum=9):\n \"\"\" 根据PID杀掉进程\n\n :param pid: 进程的PID号\n :type pid: int 或者 str\n\n :param signum: 杀进程的信号量,默认为9\n :type signum: int\n \"\"\"\n kill_cmd = \"kill -\" + str(signum) + \" \" + str(pid)\n self.cmd_exec.sendShellCommand(kill_cmd)\n \n# def extractPid(self, process_name):\n# '''\n# Extracts Process Ids for a given process name from Android Shell.\n# \n# Args:\n# process_name: name of the process on the device.\n# \n# Returns:\n# List of all the process ids (as strings) that match the given name.\n# If the name of a process exactly matches the given name, the pid of\n# that process will be inserted to the front of the pid list.\n# '''\n# pids = []\n# for line in self.cmd_exec.sendShellCommand('ps'):\n# data = line.split()\n# print data\n# try:\n# if process_name in data[-1]: # name is in the last column\n# print process_name\n# if process_name == data[-1]:\n# # print process_name\n# pids.insert(0, data[1]) # PID is in the second column\n# else:\n# pids.append(data[1])\n# except IndexError:\n# pass\n# return pids\n# \n# \n# \n# def killAll(self, process):\n# '''\n# Android version of killall, connected via adb.\n# \n# Args:\n# process: name of the process to kill off\n# \n# Returns:\n# the number of processes killed\n# '''\n# pids = self.extractPid(process)\n# if pids:\n# self.cmd_exec.sendShellCommand('kill -9 ' + ' '.join(pids))\n# return len(pids)\n# \n# def killAllBlocking(self, process, timeout_sec):\n# '''\n# Blocking version of killall, connected via adb.\n# \n# This waits until no process matching the corresponding name appears in ps'\n# output anymore.\n# \n# Args:\n# process: name of the process to kill off\n# timeout_sec: the timeout in seconds\n# \n# Returns:\n# the number of processes killed\n# '''\n# processes_killed = self.killAll(process)\n# if processes_killed:\n# elapsed = 0\n# wait_period = 0.1\n# # Note that this doesn't take into account the time spent in ExtractPid().\n# while self.extractPid(process) and elapsed < timeout_sec:\n# time.sleep(wait_period)\n# elapsed += wait_period\n# if elapsed >= timeout_sec:\n# return 0\n# return processes_killed\n# \n# def getProtectedFileContents(self, filename, log_result=False):\n# '''\n# Gets contents from the protected file specified by |filename|.\n# \n# This is less efficient than GetFileContents, but will work for protected\n# files and device files.\n# '''\n# # Run the script as root\n# return self.cmd_exec.sendShellCommand('cat \"%s\"' % filename)\n# \n# def getMemoryUsageForPid(self, pid):\n# '''\n# Returns the memory usage for given pid.\n# \n# Args:\n# pid: The pid number of the specific process running on device.\n# \n# Returns:\n# A tuple containg:\n# [0]: Dict of {metric:usage_kb}, for the process which has specified pid.\n# The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,\n# Shared_Dirty, Private_Clean, Private_Dirty, Referenced, Swap,\n# KernelPageSize, MMUPageSize, Nvidia (tablet only).\n# [1]: Detailed /proc/[PID]/smaps information.\n# '''\n# usage_dict = collections.defaultdict(int)\n# smaps = collections.defaultdict(dict)\n# current_smap = ''\n# contents = self.getProtectedFileContents('/proc/%s/smaps' % pid, log_result=False)\n# key = None\n# usage_kb = 0\n# for line in contents:\n# items = line.split()\n# # See man 5 proc for more details. The format is:\n# # address perms offset dev inode pathname\n# if len(items) > 5:\n# current_smap = ' '.join(items[5:])\n# elif len(items) > 3:\n# current_smap = ' '.join(items[3:])\n# match = re.match(MEMORY_INFO_RE, line)\n# if match:\n# key = match.group('key')\n# usage_kb = int(match.group('usage_kb'))\n# usage_dict[key] += usage_kb\n# if key not in smaps[current_smap]:\n# smaps[current_smap][key] = 0\n# smaps[current_smap][key] = smaps[current_smap][key] + usage_kb\n# if not usage_dict or not any(usage_dict.values()):\n# # Presumably the process died between ps and calling this method.\n# logging.warning('Could not find memory usage for pid ' + str(pid))\n# print smaps\n \n# def processesUsingDevicePort(self, device_port):\n# '''\n# Lists processes using the specified device port on loopback interface.\n# \n# Args:\n# device_port: Port on device we want to check.\n# \n# Returns:\n# A list of (pid, process_name) tuples using the specified port.\n# '''\n# tcp_results = self.cmd_exec.sendShellCommand('cat /proc/net/tcp', log_result=False)\n# tcp_address = '0100007F:%04X' % device_port\n# pids = []\n# for single_connect in tcp_results:\n# connect_results = single_connect.split()\n# # Column 1 is the TCP port, and Column 9 is the inode of the socket\n# if connect_results[1] == tcp_address:\n# socket_inode = connect_results[9]\n# socket_name = 'socket:[%s]' % socket_inode\n# lsof_results = self.cmd_exec.sendShellCommand('lsof', log_result=False)\n# for single_process in lsof_results:\n# process_results = single_process.split()\n# # Ignore the line if it has less than nine columns in it, which may\n# # be the case when a process stops while lsof is executing.\n# if len(process_results) <= 8:\n# continue\n# # Column 0 is the executable name\n# # Column 1 is the pid\n# # Column 8 is the Inode in use\n# if process_results[8] == socket_name:\n# pids.append((int(process_results[1]), process_results[0]))\n# break\n# return pids\n ","sub_path":"uc_tools/processmanager.py","file_name":"processmanager.py","file_ext":"py","file_size_in_byte":8727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"595270838","text":"from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, func\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.exc import OperationalError, ArgumentError\n\n# 12 - Найдите и распечатайте отделы, в которых работает больше 3-х служащих.\n\nBase = declarative_base()\n\n\nclass Dept(Base):\n __tablename__ = \"dept\"\n deptno = Column(Integer, primary_key=True) # Код департамента\n dname = Column(String) # Название департамента\n loc = Column(String) # Местонахождение\n\n def __init__(self, deptno, loc):\n self.deptno = deptno\n self.loc = loc\n\n\nclass Emp(Base):\n __tablename__ = \"emp\"\n empno = Column(Integer, primary_key=True) # Код сотрудника\n ename = Column(String) # Имя сотрудника\n job = Column(String) # Должность\n mgr = Column(Integer) # Руководитель\n hiredate = Column(Integer) # Дата устройства на работу\n sal = Column(Integer) # Зарплата\n comm = Column(Integer) # Премия\n deptno = Column(ForeignKey(\"dept.deptno\")) # Код департамента\n\n def __init__(self, ename, job, mgr, hiredate, sal, comm, deptno):\n self.ename = ename\n self.job = job\n self.mgr = mgr\n self.hiredate = hiredate\n self.sal = sal\n self.comm = comm\n self.deptno = deptno\n\nclass Salgrade(Base):\n __tablename__ = \"salgrade\"\n grade = Column(Integer, primary_key=True)\n losal = Column(Integer)\n hisal = Column(Integer)\n\n def __init__(self, losal, hisal):\n self.name = losal\n self.hisal = hisal\n\n\n\ndef connect():\n try:\n Session = sessionmaker(bind=engine)\n except OperationalError:\n print(\"OperationalError: Unable to connect to MySQL database.\")\n except ArgumentError:\n print(\"Invalid Argument for connect to database.\")\n return Session\n\n\nengine = create_engine('sqlite:///mysql.db')\nBase.metadata.create_all(bind=engine)\nsession = connect()()\nemployers = session.query(func.count(Emp.ename), Emp.deptno).group_by(Emp.deptno).having(func.count(Emp.ename) > 3)\ndept = session.query(Dept).all()\ntemp = []\nfor nom in dept:\n temp.append(nom.deptno)\nfor emp in employers:\n print('Отдел: {:<15} В нем сотрудников:{}'.format(dept[temp.index(emp.deptno)].dname, emp[0]))\n","sub_path":"sqlalch12.py","file_name":"sqlalch12.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"383556494","text":"# def rotate90(m):\n# num = len(m)\n# res = [[0] * N for _ in range(num)]\n#\n# for r in range(num):\n# for c in range(num):\n# res[num-1-c][r] = m[r][c]\n# return res\n#\n# T = int(input())\n# for tc in range(1, T+1):\n# N = int(input())\n# arr = [num for num in range(1, N**2 + 1)]\n# dalpang = [[0 for j in range(N)] for i in range(N)]\n# cnt_squre = 0\n# cnt = N-1\n# while True:\n# temp1 = arr[:cnt + 1]\n# arr = arr[cnt+1:]\n# try:\n# for i in range(cnt+1):\n# dalpang[cnt_squre][cnt_squre+i] = temp1[i]\n# except:\n# break\n# dalpang = rotate90(dalpang)\n#\n# temp2 = arr[:cnt]\n# arr = arr[cnt:]\n# try:\n# for i in range(1, cnt+1):\n# dalpang[cnt_squre][cnt_squre+i] = temp2[i-1]\n# except:\n# dalpang = rotate90(dalpang)\n# dalpang = rotate90(dalpang)\n# dalpang = rotate90(dalpang)\n# break\n# dalpang = rotate90(dalpang)\n#\n# temp3 = arr[:cnt]\n# arr = arr[cnt:]\n# try:\n# for i in range(1, cnt+1):\n# dalpang[cnt_squre][cnt_squre+i] = temp3[i-1]\n# except:\n# dalpang = rotate90(dalpang)\n# dalpang = rotate90(dalpang)\n# break\n# dalpang = rotate90(dalpang)\n#\n# temp4 = arr[:cnt - 1]\n# arr = arr[cnt-1:]\n# try:\n# for i in range(1, cnt):\n# dalpang[cnt_squre][cnt_squre+i] = temp4[i-1]\n# except:\n# dalpang = rotate90(dalpang)\n# break\n# dalpang = rotate90(dalpang)\n#\n# cnt_squre += 1\n# cnt -= 1\n# print(dalpang)\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n cnt = 1\n dalpang = [[0 for j in range(N)] for i in range(N)] # 배열 0으로 초기화\n\n r_st = 0\n r_end = N-1\n c_st = 0\n c_end = N-1\n\n while c_st <= c_end and r_st <= r_end:\n for i in range(c_st, c_end+1): # 왼쪽에서 오른쪽\n dalpang[r_st][i] = cnt\n cnt += 1\n r_st += 1\n\n for i in range(r_st, r_end + 1): # 위에서 아래\n dalpang[i][c_end] = cnt\n cnt += 1\n c_end -= 1\n\n for i in range(c_end, c_st-1, -1): # 오른쪽에서 왼쪽\n dalpang[r_end][i] = cnt\n cnt += 1\n r_end -= 1\n\n for i in range(r_end, r_st - 1, -1): # 아래에서 위쪽\n dalpang[i][c_st] = cnt\n cnt += 1\n c_st += 1\n\n print(f'#{tc}')\n for i in range(N):\n print(*dalpang[i]) # 아주 좋아 이 표현식","sub_path":"Algorithm/BEAKJOON/1954_달팽이.py","file_name":"1954_달팽이.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"191123155","text":"from tkinter import *\nfrom tkinter import messagebox,ttk\nfrom PIL import ImageTk as i\nimport mysql.connector\nimport datetime\ndef login():\n uname = e1.get()\n password = e2.get()\n\n if (uname == \"\" and password == \"\"):\n messagebox.showinfo(\"\", \"Blank Not allowed\")\n\n\n elif (uname == \"Admin\" and password == \"123\"):\n f1.forget()\n f2.pack()\n\n messagebox.showinfo(\"\", \"Login Success\")\n # root.destroy()\n\n\n else:\n messagebox.showinfo(\"\", \"Incorrent Username and Password\")\n\ndef atd():\n f1.forget()\n f2.forget()\n f3.pack()\ndef save_atd():\n a=e3.get()\n b=cvar.get()\n c=datetime.datetime.now()\n mydb=mysql.connector.connect(host=\"localhost\",user=\"root\",passwd=\"1234\",database=\"att_pro\")\n cur=mydb.cursor()\n # cur.execute(\"CREATE TABLE employe_atd (id INT, atd_status VARCHAR(255)\")\n # messagebox.showinfo(cur)\n sql = \"insert into employe_atd(id, atd_status,time) values (%s, %s,%s)\"\n val=(a,b,c)\n try:\n cur.execute(sql,val)\n mydb.commit()\n except:\n mydb.rollback()\n messagebox.showinfo('', \"record inserted!\")\n mydb.close()\ndef new_ep():\n\n f1.forget()\n f2.forget()\n f3.forget()\n f4.pack()\ndef emp_data():\n a = e4.get()\n b = e5.get()\n c = e6.get()\n d = e7.get()\n e = e8.get()\n mydb = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"1234\", database=\"att_pro\")\n cur = mydb.cursor()\n #cur.execute(\"CREATE TABLE employe_details (id INT, name VARCHAR(255),age INT,department VARCHAR(255),Basic_pay VARCHAR(255))\")\n # messagebox.showinfo(cur)\n sql = \"insert into employe_details(id, name, age ,department,Basic_pay) values (%s, %s, %s, %s, %s)\"\n val = (a,b,c,d,e)\n try:\n cur.execute(sql, val)\n mydb.commit()\n except:\n mydb.rollback()\n messagebox.showinfo('', \"record inserted!\")\n mydb.close()\ndef sal():\n f1.forget()\n f2.forget()\n f3.forget()\n f4.forget()\n f5.pack()\ndef sal_cal():\n f1.forget()\n f2.forget()\n f3.forget()\n f4.forget()\n f5.forget()\n f6.forget()\n f7.forget()\n f8.pack()\n a=e9.get()\n mydb = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"1234\", database=\"att_pro\")\n cur=mydb.cursor()\n j = f\"select name from employe_details where id={a}\"\n cur.execute(j)\n res = cur.fetchall()\n j1 = f\"select Basic_pay from employe_details where id={a}\"\n cur.execute(j1)\n res1 = cur.fetchall()\n for x1 in res1:\n global z1\n z1 = x1[0]\n\n j2=f\"select count(atd_status) from employe_atd where (atd_status='Present' and id={a})\"\n cur.execute(j2)\n res2 = cur.fetchall()\n for x2 in res2:\n global z2\n z2=x2[0]\n j3 = f\"select count(atd_status) from employe_atd where (atd_status='Late Time' and id={a})\"\n cur.execute(j3)\n res3 = cur.fetchall()\n for x3 in res3:\n global z3\n z3 = x3[0]\n j4 = f\"select count(atd_status) from employe_atd where (atd_status='Half Day' and id={a})\"\n cur.execute(j4)\n res4 = cur.fetchall()\n for x4 in res4:\n global z4\n z4 = x4[0]\n j5 = f\"select count(atd_status) from employe_atd where (atd_status='Absent' and id={a})\"\n cur.execute(j5)\n res5 = cur.fetchall()\n for x5 in res5:\n global z5\n z5 = x5[0]\n j6 = f\"select count(atd_status) from employe_atd where (atd_status='Over Time' and id={a})\"\n cur.execute(j6)\n res6 = cur.fetchall()\n for x6 in res6:\n global z6\n z6 = x6[0]\n od_p=int(z1)/30\n hd_p=od_p/2\n lt_p=hd_p/2\n ot_p=hd_p/2\n cal=(od_p*int(z2))-(lt_p*int(z3))+(hd_p*int(z4))+(ot_p*int(z6))\n d = [[res, res2, res3, res4, res6, res5,cal]]\n # t.insert('','end',d)\n for i in d:\n t.insert('','end',values=i)\n # messagebox.showinfo(\"Net pay\",cal)\ndef show():\n\n a=e3.get()\n mydb = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"1234\", database=\"att_pro\")\n cur=mydb.cursor()\n j1 = f\"select name from employe_details where id={a}\"\n cur.execute(j1)\n res1 = cur.fetchall()\n for x1 in res1:\n global z1\n z1 = x1[0]\n\n j2=f\"select count(atd_status) from employe_atd where (atd_status='Present' and id={a})\"\n cur.execute(j2)\n res2 = cur.fetchall()\n for x2 in res2:\n global z2\n z2=x2[0]\n j3 = f\"select count(atd_status) from employe_atd where (atd_status='Late Time' and id={a})\"\n cur.execute(j3)\n res3 = cur.fetchall()\n for x3 in res3:\n global z3\n z3 = x3[0]\n j4 = f\"select count(atd_status) from employe_atd where (atd_status='Half Day' and id={a})\"\n cur.execute(j4)\n res4 = cur.fetchall()\n for x4 in res4:\n global z4\n z4 = x4[0]\n j5 = f\"select count(atd_status) from employe_atd where (atd_status='Absent' and id={a})\"\n cur.execute(j5)\n res5 = cur.fetchall()\n for x5 in res5:\n global z5\n z5 = x5[0]\n j6=f\"select count(atd_status) from employe_atd where (atd_status='Over Time' and id={a})\"\n cur.execute(j6)\n res6 = cur.fetchall()\n for x6 in res6:\n global z6\n z6 = x6[0]\n ttl=int(z2)+int(z3)+int(z4)+int(z5)+int(z6)\n\n d=(z1,\"has \",z2,\"present days \",z3,\" late Entry \",z4,\" half days \",z6,\" over time \",z5,\" absent out of\",ttl,\" days\")\n messagebox.showinfo(\"\",d)\ndef clear():\n e1.delete(0, END)\n e2.delete(0, END)\n e3.delete(0, END)\n e4.delete(0, END)\n e5.delete(0, END)\n e6.delete(0, END)\n e7.delete(0, END)\n e8.delete(0, END)\n e9.delete(0, END)\n # t.delete(0,END)\n # tv.delete(0,END)\ndef back():\n f3.forget()\n f4.forget()\n f5.forget()\n f6.forget()\n f7.forget()\n f8.forget()\n f2.pack()\ndef sal1():\n f1.forget()\n f2.forget()\n f3.forget()\n f4.forget()\n f5.forget()\n f6.pack()\n mydb = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"1234\", database=\"att_pro\")\n cur = mydb.cursor()\n j1 = f\"select * from employe_atd\"\n cur.execute(j1)\n res = cur.fetchall()\n for i in res:\n tv.insert('', 'end', values=i)\ndef sal2():\n f7.forget()\n f8.forget()\n f1.forget()\n f2.forget()\n f3.forget()\n f4.forget()\n f5.forget()\n f6.forget()\n f7.pack()\n mydb = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"1234\", database=\"att_pro\")\n cur = mydb.cursor()\n j1 = f\"select * from employe_details\"\n cur.execute(j1)\n res = cur.fetchall()\n for i in res:\n t1.insert('', 'end', values=i)\nw=Tk()\nw.title(\"Login\")\nw.geometry(\"1350x650+0+0\")\nw.resizable(0,0)\nbg =i.PhotoImage(file =\"b4.png\")\nbg1 =i.PhotoImage(file =\"b5 .png\")\nbg2 =i.PhotoImage(file =\"b7.png\")\n# can=Canvas(height=500,width=500)\n# can.create_image(150,200,image=bg)\n# frame 1 admin panel\nf1=Frame(w,height=1600,width=1600)\nmy_lbl=Label(f1,image=bg)\nmy_lbl.place(x=0,y=0)\nLabel(f1, text=\"UserName\",height=2,bg=\"#45ceff\",padx=20).place(x=500, y=250)\nLabel(f1, text=\"Password\",height=2,bg=\"#45ceff\",padx=22).place(x=500, y=310)\n\ne1 = Entry(f1,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=\"2\")\ne1.place(x=650, y=250)\n\ne2 = Entry(f1,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=\"2\")\ne2.place(x=650, y=310)\ne2.config(show=\"*\")\n\nButton(f1, text=\"Login\",activeforeground = \"red\",activebackground = \"pink\",bg=\"#45ceff\" ,command=login, height=3, width=13).place(x=620, y=400)\nButton(f1, text=\"Clear\", command=clear,activeforeground = \"red\",activebackground = \"pink\",bg=\"#45ceff\", height=3, width=13).place(x=750, y=400)\nf1.pack()\n\n# frame 2 admin panel menu\nf2=Frame(w,height=2000,width=2000)\nmy_lbl=Label(f2,image=bg1)\nmy_lbl.place(x=0,y=0)\nLabel(f2,text=\"Welcome\",height=3,width=25,bg=\"#cba786\",font=2).place(x=600,y=20)\nButton(f2,text=\"New Employee\",command=new_ep,height=3,width=25,font=2).place(x=600,y=120)\nButton(f2,text=\"Employee Attandance\",command=atd,height=3,width=25,font=2).place(x=600,y=220)\nButton(f2,text=\"Salary Calculator\",command=sal,height=3,width=25,font=2).place(x=600,y=320)\nButton(f2,text=\"Check Attandance Database\",command=sal1,height=3,width=25,font=2).place(x=600,y=420)\nButton(f2,text=\"Check Employee Database\",command=sal2,height=3,width=25,font=2).place(x=600,y=520)\n# frame3\nf3=Frame(w,height=2000,width=2000)\nmy_lbl=Label(f3,image=bg2)\nmy_lbl.place(x=0,y=0)\nLabel(f3,text=\"Attandance Time!!\",height=3,width=25,font=2).place(x=550,y=20)\nid=Label(f3,text=\"Employee Id\",height=2,padx=20)\nid.place(x=500,y=150)\ncvar = StringVar()\ncvar.set(\"Entery Status\")\noption = (\"Present\", \"Late Time\", \"Half Day\",\"Absent\",\"Over Time\")\no = OptionMenu(f3,cvar, *option)\no.config(font=(\"times\",14))\no.place(x=500,y=250,width=300)\ne3=Entry(f3,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=2)\ne3.place(x=650,y=150)\nButton(f3,text=\"Save\",command=save_atd,height=3,width=13,font=2).place(x=600,y=400)\nButton(f3, text=\"Clear\", command=clear, height=3, width=13,font=2).place(x=800, y=400)\nButton(f3, text=\"Previous\", command=back, height=3, width=13,font=2).place(x=400, y=400)\n# Button(f3, text=\"Show Attandance\", command=show, height=3, width=13).place(x=50, y=250)\n# frame 4 adding new employee\nf4=Frame(w,height=2000,width=2000)\nmy_lbl=Label(f4,image=bg2)\nmy_lbl.place(x=0,y=0)\ncr_ep=Label(f4,text=\"Employee ID\",height=2,padx=20,width=20,font=2)\ncr_ep.place(x=80,y=30)\nnm=Label(f4,text=\"Employee Name\",height=2,padx=20,width=20,font=2)\nnm.place(x=80,y=90)\nage=Label(f4,text=\"Employee Age\",height=2,padx=20,width=20,font=2)\nage.place(x=80,y=150)\ncr_ep=Label(f4,text=\"Employee Department\",height=2,padx=20,width=20,font=2)\ncr_ep.place(x=80,y=210)\nbs=Label(f4,text=\"Employee Basic Salary\",height=2,padx=20,width=20,font=2)\nbs.place(x=80,y=270)\ne4=Entry(f4,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=2,width=30)\ne4.place(x=380,y=30)\ne5=Entry(f4,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=2,width=30)\ne5.place(x=380,y=90)\ne6=Entry(f4,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=2,width=30)\ne6.place(x=380,y=150)\ne7=Entry(f4,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=2,width=30)\ne7.place(x=380,y=210)\ne8=Entry(f4,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=2,width=30)\ne8.place(x=380,y=270)\nButton(f4,text=\"Save\",command=emp_data,height=3,width=13,font=2).place(x=380,y=450)\nButton(f4, text=\"Clear\", command=clear, height=3, width=13,font=2).place(x=570, y=450)\nButton(f4, text=\"Previous\", command=back, height=3, width=13,font=2).place(x=190, y=450)\n# frame5 salary calculator\nf5=Frame(w,height=2000,width=2000)\nmy_lbl=Label(f5,image=bg2)\nmy_lbl.place(x=0,y=0)\ni=Label(f5,text=\"Employe ID\",height=2,padx=20,width=20,font=2).place(x=200,y=60)\ne9=Entry(f5,selectborderwidth=1,borderwidth=10,bg=\"#d9f5ff\",fg=\"#000000\",font=2,width=30)\ne9.place(x=500,y=60)\nButton(f5,text=\"Calculate Salary\",command=sal_cal,height=3,width=13,font=2).place(x=420,y=250)\nButton(f5, text=\"Clear\", command=clear, height=3, width=13,font=2).place(x=610, y=250)\nButton(f5, text=\"Previous\", command=back, height=3, width=13,font=2).place(x=230, y=250)\n# frame 6 for showing data of employe attandance\nf6=Frame(w,height=2000,width=2000)\ntv=ttk.Treeview(f6,columns=(1,2,3),show=\"headings\",height=10)\ntv.pack()\ntv.heading(1,text=\"employee id\")\ntv.heading(2,text=\"employee attandance\")\ntv.heading(3,text=\"entery time\")\nButton(f6,text=\"Previous\",command=back,height=3,width=20).pack()\n# frame 7 for showing employe data\nf7=Frame(w,height=2000,width=2000)\nt1=ttk.Treeview(f7,columns=(1,2,3,4,5),show=\"headings\",height=10)\nt1.pack()\nt1.heading(1,text=\"Employee ID\")\nt1.heading(2,text=\"Employee Name\")\nt1.heading(3,text=\"Employee Age\")\nt1.heading(4,text=\"Employee Department\")\nt1.heading(5,text=\"Employee Salary\")\nButton(f7, text=\"Previous\", command=back, height=3, width=13).pack()\n#frame 8 for showing salary calculation\nf8=Frame(w,height=2000,width=2000)\nt = ttk.Treeview(f8, columns=(1, 2, 3, 4, 5,6,7), show=\"headings\", height=10)\nt.pack()\nt.heading(1, text=\"Name\")\nt.heading(2, text=\"Present\")\nt.heading(3, text=\"Late Time\")\nt.heading(4, text=\"Half Day\")\nt.heading(5, text=\"Over Time\")\nt.heading(6, text=\"Absent\")\nt.heading(7, text=\"Net Pay\")\nButton(f8, text=\"Previous\", command=back, height=3, width=13).pack()\nw.mainloop()","sub_path":"attandance.py","file_name":"attandance.py","file_ext":"py","file_size_in_byte":12236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"284645852","text":"#!/usr/bin/env python\n\n\"\"\"\nimage process module\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nimport imageutility as imutil\n\n\ndef imdrawcross(imgcol, point, size, color='g'):\n \"\"\"\n draw a cross on the point\n \"\"\"\n cx, cy = point\n h_pt1 = (cx - size, cy)\n h_pt2 = (cx + size, cy)\n v_pt1 = (cx, cy - size)\n v_pt2 = (cx, cy + size)\n\n if color == 'g':\n col = (0,255,0)\n\n cv2.line(imgcol, h_pt1, h_pt2, col, lineType = 8)\n cv2.line(imgcol, v_pt1, v_pt2, col, lineType = 8)\n\n\ndef imdrawcircle(imgcol, circle, color='red', filled=False):\n \"\"\"\n draw a circle\n \"\"\"\n thickness = 1\n\n center = (circle[0], circle[1])\n rad = circle[2]\n\n if filled:\n thickness = -1\n\n if color == 'red':\n col = (0,0,255)\n\n cv2.circle(imgcol, center, rad, col, thickness, lineType = 8)\n\n\ndef imoverlaycircles(img, circles):\n \"\"\"\n (imagarray, [(x,y,r)]) -> (imagearray)\n\n overlay circles to a gray level image\n\n \"\"\"\n\n imgtmp = imconvertgrayscaledynamic(img)\n # Convert image gray to color (RGB)\n imgcol = cv2.cvtColor(imgtmp, cv2.COLOR_GRAY2RGB)\n\n for cc in circles:\n center = (cc[0], cc[1])\n rad = cc[2]\n # overlay circle\n cv2.circle(imgcol, center, rad, (0,0,255),thickness=1, lineType = 8)\n\n return imgcol\n\n\ndef transferImageHistLUT(imgSrc, usrLUT):\n \"\"\"\n (imageArray, intArray) -> imageArray\n\n 1st apply histogram equalization and then perform a user-specified\n lootup-table transformation by remapping the pixel values in the\n image.\n\n Precondition imgSrc is 8-bit gray level image\n\n \"\"\"\n\n # histogram equalization\n imgHistEq = cv2.equalizeHist(imgSrc)\n\n # lookup-table transformation\n imgDst = usrLUT[imgHistEq]\n\n return imgDst\n\n\ndef imconvertgrayscaledynamic(imgsrc):\n \"\"\"\n (imageArray) -> imagearray\n\n Convert a grayscale image to U8, dynamic strech pixlevalue\n\n >>> imconvertgrayscaledynamic(img)\n\n Preconditions: imgsrc is a grayscale image\n \"\"\"\n\n minval,maxval,minloc,maxloc = cv2.minMaxLoc(imgsrc)\n scale = 256.0 / (maxval - minval)\n\n imgdst = cv2.convertScaleAbs((imgsrc - minval), alpha=scale)\n\n return imgdst\n\n\ndef imcropcircle(imgsrc, cc):\n \"\"\"\n (imageArray, tuple) -> imageArray\n\n Crop the pupil as a circle\n\n imgsrc: image array\n cc: circle (center_x,center_y, radius_r)\n\n >>> imcropcircle(imgsrc, (x,y,r))\n\n \"\"\"\n\n (cx,cy,r) = cc\n mask = np.zeros(imgsrc.shape, np.uint8)\n cv2.circle(mask,(cx,cy),r,255,-1)\n imgdst = cv2.bitwise_and(imgsrc, imgsrc, mask=mask)\n imgdst = imgdst[cy-r:cy+r, cx-r:cx+r]\n\n return imgdst\n\n\ndef imcirclemean(imgsrc, circle):\n \"\"\"\n (imageArray, tuple) -> imageArray\n\n Calculate the average value of a circle/pupil\n cv2.mean return a 4-elements tuple (B,G,R,?)\n\n >>> imcropcircle(imgsrc, (x,y,r))\n\n \"\"\"\n\n (cx,cy,r) = circle\n mask = np.zeros(imgsrc.shape, np.uint8)\n cv2.circle(mask,(cx,cy),r,255,-1)\n mean = cv2.mean(imgsrc, mask)\n\n return mean\n\n\ndef imrectmean(imgsrc, rects):\n \"\"\"\n calculate mean intensities of a list of rects\n\n \"\"\"\n\n mean_ints = []\n for rect in rects:\n # calculate ROI1\n (x1,y1,x2,y2) = rect\n r = imgsrc[y1:y2+1, x1:x2+1]\n # average\n m = np.mean(r)\n mean_ints.append(m)\n\n return mean_ints\n\n\ndef convertGray2ColorLUT(imgSrc, colLUT):\n \"\"\"\n (imageArray1c, lookupTable3c) -> imageArray3c\n\n Convert a gray level image (single channel) to a RGB color image\n (3 channels), with a user-defined 3-channel look-up table.\n\n >>> convertGray2ColorLUT(imgSrc, colLUT) -> imgDst\n\n Precondition:\n imgSrc is a 8-bit 2-D 1-channel array,\n colLUT is a 8-bit 2-D 3-chanel array (1-256-[255,255,255])\n id use imread to load the look-up table\n \"\"\"\n\n # Convert image gray to color (RGB)\n imgCol = cv2.cvtColor(imgSrc, cv2.COLOR_GRAY2RGB)\n\n # Apply built-in false color, \"rainbow\" from openCV\n #imgDst = cv2.applyColorMap(imgCol, cv2.COLORMAP_RAINBOW)\n\n # Apply the look-up table, \"rainbow\" from LV\n imgDst = cv2.LUT(imgCol, colLUT)\n\n return imgDst\n\n\ndef imreconstruct(marker, mask):\n \"\"\"\n (imageArray, imageArray) -> imageArray\n\n Mimic matlabe imreconstruct with simple loop,\n reconstruct image with 3x3 kernel\n\n >>> imreconstruct(marker, mask) -> imgDst\n\n\n \"\"\"\n\n #kernel = np.ones((3,3),np.uint8)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))\n\n h = marker\n h1 = cv2.bitwise_and(cv2.dilate(h, kernel, iterations=1), mask)\n\n # repeat until h not change any more\n # CMP_NE compare not equal: 1; equal: 0, -> total countNonZero: 0\n while (cv2.countNonZero(cv2.compare(h1,h,cv2.CMP_NE))):\n h = h1\n h1 = cv2.bitwise_and(cv2.dilate(h, kernel, iterations=1), mask)\n\n return h1\n\n\ndef bwremoveparticles(imgsrc, ksize):\n \"\"\"\n (imageArray, int) -> imageArray\n\n removes from a binary image small particles by\n 1) erode image with ksize kernal\n 2) reconstrct image with a 3x3 kernel\n\n >>> bwremoveparticles(imgsrc, ksize)\n \"\"\"\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (ksize,ksize))\n\n # erode image with kernel\n fe = cv2.erode(imgsrc, kernel, iterations=1)\n\n # reconstruct image with imreconstruct\n imgdst = imreconstruct(fe, imgsrc)\n\n return imgdst\n\n\ndef bwfillhole(thresh, mask):\n \"\"\"\n (imageArrayU8, imageArrayU8) -> imageArrayU8\n\n 1) it fill the boarder with 0s\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n --->>>\n [0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0],\n\n 2) fill all holes of a binary image using floodFill\n\n >>> bwfillholes(thresh, mask) -> bwholesfilled\n\n Precondition:\n image is a binary image;\n hole is defined as black (0) inside white (1) area.\n \"\"\"\n\n #dst = thresh.copy() # numpy array property copy()\n dst = np.copy(thresh)\n\n # fill the board with 0\n thresh[0, :] = 0 # fill first row with 0\n thresh[-1,:] = 0 # fill last row with 0\n thresh[:, 0] = 0 # fill first col with 0\n thresh[:,-1] = 0 # fill last col with 0\n # fill in background with 0\n cv2.floodFill(thresh, mask, (0,0), 255)\n\n dst = dst + (~thresh)\n \"\"\"\n alternateivly using cv::findNonZero(~thresh), to find all no zero points,\n then loop through each point and set the value to 255\n bw[cv2.findNonZero(~thresh)] = 255\n \"\"\"\n\n return dst\n\n\ndef imremovecircle(imgu8, circle):\n \"\"\"\n (image_matrix, (x,y,r))\n\n Remove small circular particle of a U8 image, and inpaint it with nearby\n pixel values. This function uses cv2.inpaint which works only on u8c1 or\n u8c3 image.\n\n Precondition: cornea (x,y) is the relatrive shift from center of the image\n\n \"\"\"\n (cx, cy, cr) = circle\n h, w = imgu8.shape\n# (px, py) = (w/2, h/2)\n\n # check boundary\n# if (((abs(cx) + cr) > w/2) or ((abs(cy) + cr) > h/2)):\n# return (False, imgu8)\n if ((cx<0) | (cy<0) | (cx > w) | (cy>h)):\n return (False, imgu8)\n#\n# x = px + cx\n# y = py + cy\n# r = max(cr, 3)\n\n # creat a circular mask\n mask = np.zeros((h,w), np.uint8)\n cv2.circle(mask, (cx,cy), cr, 255, -1)\n\n # fill the hole with cv2.inpaint\n dst = cv2.inpaint(imgu8, mask, 3, cv2.INPAINT_NS)\n\n return (True, dst)\n\n\n#cv2.getRectSubPix\n","sub_path":"imageprocess.py","file_name":"imageprocess.py","file_ext":"py","file_size_in_byte":7452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"400657749","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom pynq.overlays.base import BaseOverlay\nfrom pynq.lib.video import *\nbase = BaseOverlay(\"base.bit\")\nimport numpy as np\n\n\n# In[ ]:\n\n\n# monitor configuration: 640*480 @ 60Hz\nMode = VideoMode(640,480,24)\nhdmi_out = base.video.hdmi_out\nhdmi_out.configure(Mode,PIXEL_BGR)\nhdmi_out.start()\nframe_out_w = 320\nframe_out_h = 240\n# camera (input) configuration\nframe_in_w = 320\nframe_in_h = 240\n\n\n# In[ ]:\n\n\ndef hdmi_show(frame,hdmi_out):\n outframe = hdmi_out.newframe()#建立一个用于存放输出画面的参数空间,三维数组,其尺寸有hdmi_out决定,hdmi_out由前面HDMI初始模块确定\n #print(\"ori\",type(outframe),len(outframe))\n #outframe[120:120+frame_out_h,160:160+frame_out_w,:] = frame[120:frame_out_h+120,0:frame_out_w,:]#将画面数据格式转换为HDMI能够读取的格式,注意通道顺序\n outframe[:,:,:] = frame[:,:,:]#将画面数据格式转换为HDMI能够读取的格式,注意通道顺序\n \n #print(\"set\",type(frame_vga),len(frame_vga))\n hdmi_out.writeframe(outframe)\n\n\n# In[ ]:\n\n\nheart_list = np.random.randint(65,70,size=200)\nheart_list\n\n\n# In[ ]:\n\n\n#!/usr/bin/python\n# -*-coding:utf-8 -*-\n\nimport socket\nimport cv2\nimport numpy\n\n# 接受图片大小的信息\ndef recv_size(sock, count):\n buf = b''\n while count:\n newbuf = sock.recv(count) #最大接受count个字节\n #print(newbuf)\n if not newbuf: return None\n buf += newbuf #buf中加上接受的字符的内容\n count -= len(newbuf) #count减去字节的长度\n return buf\n\n\n\n\n# socket.AF_INET 用于服务器与服务器之间的网络通信\n#socket.SOCK_STREAM 代表基于TCP的流式socket通信\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# 设置地址与端口,如果是接收任意ip对本服务器的连接,地址栏可空,但端口必须设置\naddress = ('192.168.0.102', 8888)\ns.bind(address) # 将Socket(套接字)绑定到地址\ns.listen(True) # 开始监听TCP传入连接\nprint ('Waiting for images...')\n\n# 接受TCP链接并返回(conn, addr),其中conn是新的套接字对象,可以用来接收和发送数据,addr是链接客户端的地址。\n\nconn, addr = s.accept()\n\n\n###############################################\n\nimport cv2\nimport numpy as np\nimport time\nfrom PIL import Image, ImageDraw, ImageFont\n\n# 心跳数据\nt_size = 1000\nheart_list = np.random.randint(65,70,size=1000)\nlist_index = 0\n\ndef cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):\n if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n # 创建一个可以在给定图像上绘图的对象\n draw = ImageDraw.Draw(img)\n # 字体的格式\n# fontStyle = ImageFont.truetype(\n# \"font/simsun.ttc\", textSize, encoding=\"utf-8\")\n# # 绘制文本\n# draw.text((left, top), text, textColor, font=fontStyle)\n# # 转换回OpenCV格式\n return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\nscale = 0\n# print('cap')\n# cap = cv2.VideoCapture(0)\n# print('cap2')\n# # Check if camera opened successfully\n# if (cap.isOpened() == False):\n# print(\"Error opening video stream or file\")\nframeNum = 0\n# Read until video is completed\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))\nkernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\nW =[]\ncnt = 0\ncnt2 = 0\ncnt2_n = 0\ncnt3= 0\nflag =0\ncnt4 = 0\n################################################\nwhile True:\n length = recv_size(conn,16) # 首先接收来自客户端发送的大小信息\n length = length.decode()\n if isinstance(length, str): # 若成功接收到大小信息,进一步再接收整张图片\n #print(\"length\",length)\n stringData = recv_size(conn,int(length))\n data =numpy.fromstring(stringData, dtype='uint8')\n decimg = cv2.imdecode(data,1) # 解码处理,返回mat图片\n \n #\n #print('Image recieved successfully!')\n \n############################################################\n #frame = cv2.resize(frame, (int(a[1] / 3), int(a[0] / 3)), interpolation=cv2.INTER_CUBIC)\n frame = decimg.copy()\n frameNum += 1\n time1 = time.time()\n tempframe = frame\n if (frameNum == 1):\n previousframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)\n if (frameNum >= 2):\n time0 = time.time()\n\n currentframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)\n\n currentframe = cv2.absdiff(currentframe, previousframe)\n #currentframe = currentframe+currentframe2\n\n clahe = cv2.createCLAHE(clipLimit=0.01, tileGridSize=(10, 10))\n currentframe = clahe.apply(currentframe)\n\n median = cv2.medianBlur(currentframe, 3)\n\n # img = cv2.imread(\"E:/chinese_ocr-master/4.png\")\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, threshold_frame = cv2.threshold(median, 10, 255, cv2.THRESH_OTSU)\n gauss_image = cv2.GaussianBlur(threshold_frame, (3, 3), 0)\n\n\n edge_output = cv2.Canny(gauss_image, 50, 150)\n\n\n #fgmk = cv2.morphologyEx(edge_output, cv2.MORPH_CLOSE, kernel)\n\n\n\n # Display the resulting frame\n\n image = frame.copy()\n #fgmk = cv2.medianBlur(fgmk, 3)\n # 查找轮廓\n contours = cv2.findContours(edge_output, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n for c in contours[1]:\n if cv2.contourArea(c) > 1500 and cv2.contourArea(c) < 15000:\n (x, y, w, h) = cv2.boundingRect(c)\n if cnt<2:\n\n W.append(y)\n cnt +=1\n else:\n W.append(y)\n del(W[0])\n\n if scale == 0: scale = -1;break\n scale = w / h\n\n cv2.putText(image, \"scale:{:.3f}\".format(scale), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 0, 255), 2)\n cv2.drawContours(image, [c], -1, (255, 0, 0), 1)\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 1)\n image = cv2.fillPoly(image, [c], (255, 255,\n 255)) # 填充\n flag = 1\n # 根据image人体比例判断\n if scale > 0.75:\n #print(W)\n\n if ( y - W[0])>30 and y!=0:\n #print(cnt2)\n #print(W)\n if cnt2 > 1:\n cv2.putText(image, \" Falled\", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n elif cnt2>0:\n cv2.putText(image, \" Walking\", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n \n cv2.putText(image, \" Heart: {}\".format(heart_list[list_index]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n print(\"walking\")\n if flag:\n cnt2+=1\n\n elif cnt2>2:\n cv2.putText(image, \" Falled\", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n else:\n cv2.putText(image, \" Walking\", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n #cv2.putText(image, \" Heart: {}\".format(heart_list[list_index]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n print(\"walking\")\n flag = 0\n\n # cv2.putText(img, \"Walking 行走中\", (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)#行走中\n #if scale > 0.9 and scale < 2:\n #image = cv2ImgAddText(image, \"Falling 中间过程\", 10, 20, (255, 0, 0), 30) # 跌倒中\n # cv2.putText(img, \"Falling 跌倒中\", (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)#跌倒中\n if scale < 0.75 and scale > 0:\n if cnt2 < 2:\n cv2.putText(image, \" Walking\", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n print(\"walking\")\n cv2.putText(image, \" Heart: {}\".format(heart_list[list_index]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n elif cnt2 >3:\n cv2.putText(image, \" Walking\", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n print(\"walking\")\n cv2.putText(image, \" Heart: {}\".format(heart_list[list_index]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n cnt2 = 0\n else:\n cv2.putText(image, \" Falled\", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0, 0, 255), 2)\n\n cnt3 += 1\n if cnt3 ==3:\n cnt3 = 0\n if cnt2 == cnt2_n and cnt2<5:\n cnt2 = cnt2\n cnt2_n = cnt2\n\n hdmi_show(image,hdmi_out)\n print(1/abs(time.time()-time0))\n\n # Press Q on keyboard to exit\n c = cv2.waitKey(1) # 停止\n list_index +=1\n if (c == 27):\n break\n if (base.buttons[3].read()==1):\n break\n if (base.buttons[0].read()==1):\n flag = 1\n #breathe.value = 0\n break\n\n previousframe = cv2.cvtColor(tempframe, cv2.COLOR_BGR2GRAY)\n\n\n# When everything done, release the video capture object\n# cap.release()\n# hdmi_out.stop()\n# del hdmi_out\n# Closes all the frames\n##############################################################\n \n \n \n \n \n \n \n c = cv2.waitKey(1) # 停止\\\n if (c == 27):\n break\ns.close()\ncv2.destroyAllWindows()\n\n","sub_path":"传输视频.py","file_name":"传输视频.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"651427983","text":"def extract_title(df):\n \"\"\"\n Extracts the title out of a name\n\n Arguments:\n df {Pandas dataframe} -- should contain a Name column\n\n Returns:\n df {Pandas dataframe} -- same dataframe except changed Name column content\n \"\"\"\n print(\"Going to extact the title out of the names\")\n extraction = {'.*Mrs\\..*': 'Mrs',\n '.*Sir\\..*': 'Royalty',\n '.*Mr\\..*': 'Mr',\n '.*Capt\\..*': 'Officer',\n '.*Col\\..*': 'Officer',\n '.*Countess\\..*': 'Royalty',\n '.*Dona\\..*': 'Royalty',\n '.*Don\\..*': 'Royalty',\n '.*Dr\\..*': 'Officer',\n '.*Jonkheer.*': 'Royalty',\n '.*Lady\\..*': 'Royalty',\n '.*Major\\..*': 'Officer',\n '.*Master\\..*': 'Master',\n '.*Mlle\\..*': 'Miss',\n '.*Mme\\..*': 'Mrs',\n '.*Ms\\..*': 'Mrs',\n '.*Rev\\..*': 'Officer',\n '.*Miss\\..*': 'Miss'}\n df['Name'] = df['Name'].replace(extraction, regex=True)\n return df\n\n\ndef fillna_age(df):\n \"\"\"\n Will fill all missing values of the Age column\n based on the median values of the Age\n after a groupby on the Name, Pclass, and Sex\n\n Arguments:\n df {Pandas dataframe} -- should contain a Age, Pclass, Name, and Sex column\n\n Returns:\n df {Pandas dataframe} -- same dataframe except all missing values of the Age column are filled\n \"\"\"\n print(\"Going to fill all missing values for age\")\n age_selection = df[['Age', 'Pclass', 'Name', 'Sex']].dropna()\n grouped_age = age_selection.groupby(['Name', 'Pclass', 'Sex'])['Age'].median()\n\n df['Age'] = df.apply(lambda x: grouped_age.loc[(x['Name'], x['Pclass'], x['Sex'])] if not x['Age'] > 0 else x['Age'],\n axis=1)\n return df\n","sub_path":"Phase 3 - testing linting logging/titanic_analysis/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"62636209","text":"import sys\nimport time\n\nfrom PySide6.QtCore import QThread, Signal, Slot\nfrom PySide6.QtWidgets import (\n QApplication,\n QLabel,\n QMainWindow,\n QPushButton,\n QVBoxLayout,\n QWidget,\n)\n\n\nclass Thread(QThread):\n \"\"\"\n Worker thread\n \"\"\"\n\n result = Signal(str)\n\n @Slot()\n def run(self):\n \"\"\"\n Your code goes in this method\n \"\"\"\n print(\"Thread start\")\n counter = 0\n while True:\n time.sleep(0.1)\n # Output the number as a formatted string.\n self.result.emit(f\"The number is {counter}\")\n counter += 1\n print(\"Thread complete\")\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # Create thread and start it.\n self.thread = Thread()\n self.thread.start()\n\n label = QLabel(\"Output will appear here\")\n button = QPushButton(\"Kill thread\")\n # Terminate (kill immediately) the thread.\n button.pressed.connect(self.thread.terminate)\n\n # Connect signal, so output appears on label.\n self.thread.result.connect(label.setText)\n\n container = QWidget()\n layout = QVBoxLayout()\n layout.addWidget(label)\n layout.addWidget(button)\n container.setLayout(layout)\n\n self.setCentralWidget(container)\n self.show()\n\n\n\napp = QApplication(sys.argv)\nwindow = MainWindow()\napp.exec()\n","sub_path":"pyside6-source/concurrent/qthread_2.py","file_name":"qthread_2.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"29945035","text":"import community\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\n\nG=nx.readwrite.gexf.read_gexf(\"graph.gexf\")\nG=G.to_undirected()\n\ncommunauties = community.best_partition(G)\nprint(type(communauties))\nprint(communauties)\n\nsize = float(len(set(communauties.values())))\npos = nx.spring_layout(G)\ncount = 0\ncolors=(\"red\",\"blue\",\"green\",\"yellow\",\"black\",\"purple\",\"violet\",\"orange\")\nfor com in set(communauties.values()) :\n count = count + 1\n list_nodes = [nodes for nodes in communauties.keys()\n if communauties[nodes] == com]\n nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 40,\n node_color = colors[com% len(colors)])\n\n\nnx.draw_networkx_edges(G, pos, alpha=0.5,width=0.1)\nplt.show()","sub_path":"Python Project/communities.py","file_name":"communities.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"32500711","text":"import pandas as pd\nfrom jugaad_trader import Zerodha\nfrom datetime import datetime, timedelta\nfrom dateutil.tz import tzoffset\nimport talib\nfrom ta.volatility import DonchianChannel\n\nkite = Zerodha()\n\nkite.set_access_token()\n\ntoday = datetime.today()\n\n\ndef get_timestamp():\n return datetime.now(tzoffset(None, 19800)).isoformat(' ', 'seconds')\n\n\ndef get_ltp(instrument_token):\n return kite.ltp(instrument_token)[str(instrument_token)]['last_price']\n\n\n\n# Tickertape: {9957378: 'BANKNIFTY2170835000CE', 9957634: 'BANKNIFTY2170835000PE'}\nhistorical_data = pd.DataFrame(kite.historical_data(\n 9957378, \"2021-07-02 09:15:00\", \"2021-07-02 15:30:00\", \"minute\", oi=True))\n\nhistorical_data[\"oiema21\"] = talib.EMA(historical_data.oi, timeperiod = 21)\nhistorical_data['don'] = talib.MIN(historical_data.oi, timeperiod = 21)\n\n\nopen_position = False\nprev_candle = \"\"\nbuy_price = 0\nsell_price = 0\n\nprofit = 0\npnl = []\n\n\n\nfor index, data in historical_data.iterrows():\n prev_candle = historical_data.iloc[index-1]\n\n if not open_position:\n if prev_candle.don > data.oi:\n open_position = True\n buy_price = data.close\n print(\"Trade opened\", data.close)\n \n\n\n if open_position:\n if prev_candle.don < data.oi:\n open_position = False\n sell_price = data.close \n profit += sell_price - buy_price\n print(\"Trade closed\", data.close)\n print(\"PNL:\", sell_price - buy_price, pnl)\n \n\n buy_price=0\n sell_price=0\n pnl = []\n elif prev_candle.don > data.oi:\n pnl.append(data.close - buy_price)\n prev_oi = data.oi\n\nprint(profit)\n\n\n\n# oi = historical_data.oi.tolist()\n# prev_oi = 0\n# counter = 0\n# for oi_value in oi:\n# if oi_value < prev_oi:\n# counter+=1\n# prev_oi = oi_value\n\n# print(counter)\n","sub_path":"oi_playground/backtestOI.py","file_name":"backtestOI.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"396697218","text":"from datetime import datetime\nimport subprocess \nimport csv\nimport argparse\nimport json\nimport re\nimport os\n\n#start_function\ndef del_secret():\n js_file = os.environ['json_file']\n data = json.load(open(js_file,\"r\"))\n for secret in data[\"secrets\"]:\n json_body='{range .items[*]}{.metadata.name}{.metadata.creationTimestamp}}{end}'\n #print the secret with year , put a tab b/w keyword db-pass and year 2022\n cmd1 = \"kubectl get secrets -o jsonpath='%s'\"%json_body\n c1 = subprocess.Popen(cmd1,shell=True, stdout=subprocess.PIPE)\n c1.wait()\n out = c1.communicate()\n data = out[0].decode(\"utf-8\")\n secrets = []\n for i in data.split(\"}\")[:-1]:\n _secret = i.split(secret[\"year\"])[0]\n if secret[\"secret_name\"] in _secret:\n secrets.append(_secret)\n print(_secret+\" \"+secret[\"year\"])\n #print(data_json)\n #print the secret with out year\n for _secret in secrets:\n print(_secret)\n\n #make it logic with user interactive pattern \"do you want to delete\"?\n #if yes then it will do the delete secret step else it will exit the program\n \n #answer = input(\"do you want to delete: \") #this is python3 only\n #answer = raw_input(\"do you want to delete: \") #this is python2 only\n if secret[\"proceed\"] == \"true\":\n for _secret in secrets:\n cmd2 = \"kubectl delete secret %s\"%_secret\n c2 = subprocess.Popen(cmd2,shell=True, stdout=subprocess.PIPE)\n c2.wait()\n out = c2.communicate()\n data = out[0].decode(\"utf-8\")\n print(data)\n else:\n print(\"delete aborted.\")\n continue\n #delete the secret\n #cmd1 = \"kubectl get secrets -o jsonpath=json_body | tr \"}\" \"\\n\" | sed 's/2022-.*/ 2022/g' | grep -E 'db-pass' | awk '{print $1;}' | xargs -I {} kubectl delete secret {}\"\n #c1 = subprocess.Popen(cmd1,shell=True, stdout=subprocess.PIPE)\n # c1.wait()\n #out = c1.communicate()\n #data = json.loads(out[0].decode(\"utf-8\"))\n # print(%s\"secret got deleted\")\n\n\nos.environ['json_file'] = 'sec.json'\n\ndel_secret()\n","sub_path":"infra/secret-delete-env-event.py","file_name":"secret-delete-env-event.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"552655687","text":"# Autor: Daniel Rojas, A01376572\r\n# Misión 6. Listas en Python\r\n\r\ndef recortarLista(enteros): #Elimina el primer y último valor de una lista\r\n enteros2 = []\r\n for indice in range(len(enteros)):\r\n n = enteros[indice]\r\n if indice != 0 and indice != len(enteros)-1:\r\n enteros2.append(n)\r\n return enteros2\r\n\r\n\r\ndef estanOrdenados(lista):\r\n orden = sorted(lista)\r\n if orden == lista:\r\n return True\r\n else:\r\n return False\r\n \r\n \r\ndef sonAnagramas(a,b):\r\n orden1 = sorted(a.lower())\r\n orden2 = sorted(b.lower())\r\n print(orden1)\r\n print(orden2)\r\n if orden1 == orden2:\r\n return True\r\n else:\r\n return False\r\n \r\n\r\ndef hayDuplicados(enteros):\r\n for x in enteros:\r\n veces = enteros.count(x)\r\n if veces > 1:\r\n return True\r\n break\r\n else:\r\n continue\r\n return False\r\n \r\n \r\ndef borrarDuplicados(enteros):\r\n while hayDuplicados(enteros)==True:\r\n for indice in range(len(enteros)):\r\n dato = enteros[indice]\r\n veces = enteros.count(dato)\r\n for n in range(veces-1):\r\n enteros.remove(dato)\r\n if veces>=2:\r\n break\r\n \r\n\r\ndef main():\r\n print(\"Ejercicio 1:\") #Ejercicio 1\r\n enteros = [1,2,3,4,5]\r\n enterosRecortada = recortarLista(enteros)\r\n print(\"Lista original\", enteros, \", recortada es\", enterosRecortada)\r\n \r\n print(\"\"\"\r\nEjercicio 2:\"\"\") #Ejercicio 2\r\n lista = [7,12,53]\r\n ordenada = estanOrdenados(lista)\r\n if ordenada == True:\r\n print(\"La lista\",lista,\"SÍ está ordenada\")\r\n else:\r\n print(\"La lista\",lista,\"NO está ordenada\")\r\n \r\n print(\"\"\"\r\nEjercicio 3:\"\"\") #Ejercicio 3\r\n cadena1 = \"Roma\"\r\n cadena2 = \"Mora\"\r\n anagrama = sonAnagramas(cadena1,cadena2)\r\n if anagrama == True:\r\n print(cadena1, \"y\", cadena2, \"SÍ son anagramas\")\r\n else:\r\n print(cadena1, \"y\", cadena2, \"NO son anagramas\")\r\n\r\n print(\"\"\"\r\nEjercicio 4:\"\"\") #Ejercicio 4\r\n lista = [2,8,5,3,4]\r\n duplicados = hayDuplicados(lista)\r\n if duplicados == True:\r\n print(\"En la lista\",lista,\"SÍ hay números duplicados\")\r\n else:\r\n print(\"En la lista\",lista,\"NO hay duplicados\")\r\n \r\n print(\"\"\"\r\nEjercicio 5:\"\"\") #Ejercicio 5\r\n lista = [1,2,2,2,3,3,3,4,5]\r\n print(\"La lista original es\",lista)\r\n borrarDuplicados(lista)\r\n print(\"Y sin números duplicados queda así:\",lista)\r\n \r\n \r\nmain()","sub_path":"listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"447766566","text":"def first(x):\n if type(x) == int:\n y = x / 2\n return y\n else:\n print(\"x must be integer\")\n\ndef second(x):\n if type(x) == int:\n y = x * 4\n return y\n else:\n print(\"x muxt be integer\")\n \na = first(100.1)\nprint(second(a))","sub_path":"4-4.py","file_name":"4-4.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"398248174","text":"import ejemplo31\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\ndef train_linreg(sess, model, X_train, Y_train, num_epochs = 1200):\n sess.run(model.init_op)\n training_costs = []\n for i in range(num_epochs):\n _, cost = sess.run( [model.optimizer,model.mean_cost],\n feed_dict={ model.X:X_train,\n model.Y:Y_train})\n training_costs.append(cost)\n return training_costs\n\n\n\n\ndef predict_linreg(sess, model, X_test):\n y_pred = sess.run( model.z_net,\n feed_dict ={model.X:X_test})\n return y_pred\n\n\n\n\n\nif __name__ == '__main__':\n\n X_train = np.arange(10).reshape((10,1))\n\n Y_train = np.array([3.0, 3.1, 3.2, 3.3,\n 3.6, 3.5, 4.3, 3.4,\n 3.4, 4.0])\n\n print(\"\\nX_train =\\n\", X_train)\n print(\"\\nY_train =\\n\", Y_train)\n\n lrmodel = ejemplo31.TfLinreg(x_dim = X_train.shape[1], learning_rate = 0.01)\n\n sess = tf.Session(graph = lrmodel.grafo)\n training_costs = train_linreg(sess, lrmodel, X_train, Y_train)\n\n plt.plot( range(1,len(training_costs) + 1),\n training_costs)\n plt.tight_layout()\n plt.xlabel('Epoch.')\n plt.ylabel('Training costs.')\n plt.title('Training.')\n plt.show()\n\n plt.scatter( X_train,\n Y_train,\n marker = 's',\n s = 50,\n label = 'Training data')\n\n plt.plot( range(X_train.shape[0]),\n predict_linreg(sess, lrmodel, X_train),\n color = 'gray',\n marker = 'o',\n markersize = 6,\n linewidth = 3,\n label = 'Linreg model.')\n\n plt.xlabel('x')\n plt.ylabel('y')\n plt.tight_layout()\n plt.legend()\n plt.title('Training.')\n plt.show()\n","sub_path":"ejemploslibro/ejemplo32.py","file_name":"ejemplo32.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"637255116","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n评估策略优劣的功能函数模块\n\n@author: Leon Zhang\n@version: 0.4\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\n# def create_equity_curve(total_series):\n# \"\"\"\n# 计算资金曲线\n# 参数:\n# portfolio对象中all_holdings的DataFrame\n# 展示profit and loss (PnL)\n# \"\"\"\n# curve = total_series.to_frame('total')\n# curve['returns'] = curve['total'].pct_change() # 计算百分比变化\n# curve['equity_curve'] = (1.0 + curve['returns']).cumprod() # 计算累计值\n# return curve\n#\n# def create_sharpe_ratio(returns, periods=252):\n# \"\"\"\n# 计算策略夏普率,基准为0,未使用无风险利率信息\n# 参数:\n# returns: pandas Series格式的每个bar周期的百分比收益\n# periods: 一天的252,每小时的252*4,每分钟的为252*4*60\n# \"\"\"\n# return np.sqrt(periods) * (np.mean(returns))/np.std(returns)\n#\n# def create_drawdowns(equity_curve):\n# \"\"\"\n# 计算PnL曲线的最大回撤,以及持续时间\n# 参数:\n# equity_curve: 资金曲线的Series\n#\n# return: drawdown, duration\n# Ref: http://stackoverflow.com/questions/22607324/start-end-and-duration-of-maximum-drawdown-in-python\n# \"\"\"\n# # 计算累计收益,记录最高收益(High Water Mark)\n# df = equity_curve.to_frame('equity_curve')\n# df['cum_max'] = df['equity_curve'].cummax()\n# df['dd'] = df['cum_max'] / df['equity_curve'] - 1 #\n# i = df['dd'].index.get_loc(df['dd'].idxmax()) # 获取回撤周期的结束row序号\n# j = df['dd'].index.get_loc(df['equity_curve'].iloc[:i].idxmax()) # 回撤开始的row\n#\n# return df['dd'], df['dd'].iloc[-1], i-j\n#\n# def create_drawdowns_slow(pnl):\n# \"\"\"\n# 非矢量化计算最大回撤,计算速度慢\n# pnl: pandas Series格式的百分比收益\n# \"\"\"\n# # 计算累计收益,记录最高收益(High Water Mark)\n# hwm = [0] # 历史最大值序列\n#\n# idx = pnl.index\n# drawdown = pd.Series(index=idx)\n# duration = pd.Series(index=idx)\n#\n# for t in range(1, len(idx)):\n# hwm.append(max(hwm[t-1], pnl[t]))\n# drawdown[t] = hwm[t] - pnl[t]\n# duration[t] = (0 if drawdown[t] == 0 else duration[t-1]+1)\n# return drawdown, drawdown.max(), duration.max()\n\n\ndef perform_metrics(total_series, periods=252):\n \"\"\"\n 资金曲线,夏普率和最大回撤的计算\n 参数:\n total_series:账户资金的Series\n periods:回测时间尺度,默认为天,用于计算年化夏普率\n 返回:\n perform, ret, sharpe_ratio, max_dd的元组\n \"\"\"\n perform = total_series.to_frame('total')\n perform['return'] = perform['total'].pct_change()\n perform['equity_curve'] = (1.0 + perform['return']).cumprod()\n ret = perform['equity_curve'][-1] - 1 # 回测期间收益率\n sharpe_ratio = np.sqrt(periods) * np.mean(perform['return']) / np.std(perform['return']) # 夏普率\n\n perform['cum_max'] = perform['equity_curve'].cummax()\n perform['drawdown'] = perform['equity_curve'] / perform['cum_max'] - 1 # 回撤向量\n max_dd = perform['drawdown'].min() # 最大回撤\n\n # i = holdings['drawdown'].index.get_loc(holdings['drawdown'].idxmax()) # 获取回撤周期的结束row序号\n # j = holdings['dd'].index.get_loc(holdings['equity_curve'].iloc[:i].idxmax()) # 回撤开始的row\n\n return perform, ret, sharpe_ratio, max_dd\n\n\ndef detail_blotter(backtest, positions, holdings, mode='simplified'):\n \"\"\"\n 分品种获取详细交易状况,合并市场数据、交易情况和账户变动\n 参数:\n backtest, positions, holdings为回测引擎返回的变量\n mode: 'simplified'则市场行情数据只保留'close'列\n (DataFrame的字典)\n 返回:\n 字典,键为symbol,值为DataFrame格式\n\n 示例:\n blotter = detail_blotter(backtest, positions, holdings)\n blotter_rb = blotter['RB']\n blotter_br.head()\n \"\"\"\n blotter = dict()\n data_dict = backtest.data_handler.latest_symbol_data\n trades = backtest.trade_record()\n trades['direction'] = [1 if d=='BUY' else -1 for d in trades['direction']]\n trades['cost'] = trades['direction'] * trades['fill_price'] * trades['quantity']\n for symb in data_dict.keys():\n data = pd.DataFrame(data_dict[symb], columns=['symbol', 'datetime', 'open', 'high', 'low',\n 'close', 'volume'])\n if mode == 'simplified':\n data = data[['datetime', 'close']].set_index('datetime')\n else: # 'full'\n data = data.set_index('datetime')\n\n trades_symb = trades[trades['symbol']==symb][['direction','fill_price', 'commission', 'cost']]\n holdings_symb = pd.Series(holdings[symb], name='holdings')\n positions_symb = pd.Series(positions[symb], name='positions')\n merge = data.join([positions_symb, holdings_symb, trades_symb], how='outer').iloc[1:, :].fillna(0.)\n # 计算每根bar结束后的盈亏\n merge['pnl'] = merge['holdings'] - merge['holdings'].shift(1) - merge['cost'].shift(1) - \\\n merge['commission'].shift(1)\n merge.ix[0, 'pnl'] = 0. # NaN\n # 回测结束时对可能存在的强制平仓进行额外计算\n merge.ix[-1, 'pnl'] = merge['holdings'].iloc[-1] - merge['holdings'].iloc[-2] - merge['cost'].iloc[-1] - \\\n merge['commission'].iloc[-1]\n # 以回测第一根bar收盘价作为起始资本\n merge['adj_total'] = merge['pnl'].cumsum() + merge['close'].iloc[0]\n del merge['cost']\n blotter[symb] = merge\n\n return blotter\n","sub_path":"xquant/finance/perform.py","file_name":"perform.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"585920708","text":"from frozen_lake import *\nimport numpy as np \nimport gym\nfrom gym.spaces import prng\n\ndef max_causal_ent_irl(mdp, gamma, trajectories, epochs=1, learning_rate=0.2, \n r = None, horizon=None):\n \"\"\"\n Finds the reward vector that maximizes the log likelihood of the expert \n trajectories via gradient descent.\n \n The gradient is the difference between the mean empirical state visitation \n counts computed from the expert trajectories and the occupancy measure of \n the MDP under a policy induced by the reward vector.\n\n Parameters\n ----------\n mdp : object\n Instance of the MDP class.\n gamma : float \n Discount factor; 0<=gamma<=1.\n trajectories : 3D numpy array\n Expert trajectories. \n Dimensions: [number of traj, timesteps in the traj, state and action].\n epochs : int\n Number of iterations gradient descent will run.\n learning_rate : float\n Learning rate for gradient descent.\n r : 1D numpy array\n Initial reward vector with the length equal to the #states in the MDP.\n horizon : int\n Horizon for the finite horizon version of value iteration.\n Returns\n -------\n 1D numpy array\n Reward vector computed with Maximum Causal Entropy algorithm from \n the expert trajectories.\n\n Note\n -------\n Following the Levine implementation, the state features are assumed to \n be one-hot encodings of the state. If this is not the case, reward \n would have to have the shape (feature.shape[0]), and the gradient of the \n IRL log likelihood would be a dot product of the current expression \n for dL_dr and the feature matrix.\n \"\"\" \n\n # Compute the empirical state-action visitation counts and the probability \n # of a trajectory starting in state s from the expert trajectories.\n sa_visit_count, P_0 = compute_s_a_visitations(mdp, gamma, trajectories)\n \n if r is None:\n r = np.random.rand(mdp.nS)\n\n for i in range(epochs):\n V = compute_value_boltzmann(mdp, gamma, r, horizon=horizon)\n \n # Compute the Boltzmann policy \\pi_{s,a} = \\exp(Q_{s,a} - V_s) \n policy = compute_policy(mdp, gamma, r=r, V=V) \n \n # IRL log likelihood term: \n # L = 0; for all traj: for all (s, a) in traj: L += Q[s,a] - V[s]\n L = np.sum(sa_visit_count * np.log(policy))\n \n # The expected #times policy π visits state s in a given #timesteps.\n D = compute_D(mdp, gamma, policy, P_0, t_max=trajectories.shape[1]) \n\n # Mean state visitation count of expert trajectories\n # mean_s_visit_count[s] = ( \\sum_{i,t} 1_{traj_s_{i,t} = s}) / num_traj\n mean_s_visit_count = np.sum(sa_visit_count,1) / trajectories.shape[0]\n\n # IRL log likelihood gradient w.r.t reward. Corresponds to line 9 of \n # Algorithm 2 from the MaxCausalEnt IRL paper \n # www.cs.cmu.edu/~bziebart/publications/maximum-causal-entropy.pdf. \n # Refer to the Note in this function. Minus sign to get the gradient \n # of negative log likelihood, which we then minimize with GD.\n dL_dr = -(mean_s_visit_count - D)\n\n # Gradient descent\n r = r - learning_rate * dL_dr\n\n print('Epoch: ',i, 'log likelihood of all traj: ', L, \n 'average per traj step: ', \n L/(trajectories.shape[0] * trajectories.shape[1]))\n return r\n\n\nclass MDP(object):\n \"\"\"\n MDP object\n\n Attributes\n ----------\n self.nS : int\n Number of states in the MDP.\n self.nA : int\n Number of actions in the MDP.\n self.P : two-level dict of lists of tuples\n First key is the state and the second key is the action. \n self.P[state][action] is a list of tuples (prob, nextstate, reward).\n self.T : 3D numpy array\n The transition prob matrix of the MDP. p(s'|s,a) = self.T[s,a,s']\n \"\"\"\n def __init__(self, env):\n P, nS, nA, desc = MDP.env2mdp(env)\n self.P = P # state transition and reward probabilities, explained below\n self.nS = nS # number of states\n self.nA = nA # number of actions\n self.desc = desc # 2D array specifying what each grid cell means \n self.env = env\n self.T = self.get_transition_matrix()\n\n def env2mdp(env):\n return ({s : {a : [tup[:3] for tup in tups] \n for (a, tups) in a2d.items()} for (s, a2d) in env.P.items()}, \n env.nS, env.nA, env.desc)\n \n def get_transition_matrix(self):\n \"\"\"Return a matrix with index S,A,S' -> P(S'|S,A)\"\"\"\n T = np.zeros([self.nS, self.nA, self.nS])\n for s in range(self.nS):\n for a in range(self.nA):\n transitions = self.P[s][a]\n s_a_s = {t[1]:t[0] for t in transitions}\n for s_prime in range(self.nS):\n if s_prime in s_a_s:\n T[s, a, s_prime] = s_a_s[s_prime]\n return T\n\n\ndef softmax(x1,x2):\n \"\"\" \n Numerically stable computation of log(exp(x1) + exp(x2))\n described in Algorithm 9.2 of Ziebart's PhD thesis \n http://www.cs.cmu.edu/~bziebart/publications/thesis-bziebart.pdf.\n\n Note that softmax(softmax(x1,x2), x3) = log(exp(x1) + exp(x2) + exp(x3))\n \"\"\"\n max_x = np.amax((x1,x2))\n min_x = np.amin((x1,x2))\n return max_x + np.log(1+np.exp(min_x - max_x))\n\ndef compute_value_boltzmann(mdp, gamma, r, horizon = None, threshold=1e-4):\n \"\"\"\n Find the optimal value function via value iteration with the max-ent \n Bellman backup given at Algorithm 9.1 in Ziebart's PhD thesis \n http://www.cs.cmu.edu/~bziebart/publications/thesis-bziebart.pdf.\n\n Parameters\n ----------\n mdp : object\n Instance of the MDP class.\n gamma : float \n Discount factor; 0<=gamma<=1.\n r : 1D numpy array\n Initial reward vector with the length equal to the \n number of states in the MDP.\n horizon : int\n Horizon for the finite horizon version of value iteration.\n threshold : float\n Convergence threshold.\n\n Returns\n -------\n 1D numpy array\n Array of shape (mdp.nS), each V[s] is the value of state s under \n the reward r and Boltzmann policy.\n \"\"\"\n \n V = np.copy(r)\n\n t = 0\n diff = float(\"inf\")\n while diff > threshold:\n V_prev = np.copy(V)\n for s in range(mdp.nS):\n # V_s_new = \\log[\\sum_a exp(r_s + gamma \\sum_{s'} p(s'|s,a)V_{s'})]\n for a in range(mdp.nA):\n # If-else statement is used to compute softmax correctly. \n # If V[s] is initialized as 0 and only the expression from \n # 'else' is used, there would be an additional e^0 in the sum.\n if a == 0:\n # V[s] = r_s + \\gamma \\sum_{s'} p(s'|s,a)V_{s'}\n V[s] = r[s] + gamma * np.dot(mdp.T[s, a, :], V_prev) \n else:\n # V[s] = log(exp(V[s]) \n # + exp(r_s + \\gamma \\sum_{s'} p(s'|s,a)V_{s'}))\n V[s] = softmax(V[s], \n r[s] + gamma*np.dot(mdp.T[s, a, :], V_prev))\n \n if np.sum(np.isnan(V[s])) > 0: \n raise Exception('NaN encountered in value, iteration ', \n t, 'state',s, ' action ', a)\n \n diff = np.amax(abs(V_prev - V))\n \n t+=1\n if horizon is not None:\n if t==horizon: break\n return V\n\n\ndef compute_policy(mdp, gamma, r=None, V=None, horizon=None, threshold=1e-4):\n \"\"\"\n Computes the Boltzmann policy \\pi_{s,a} = \\exp(Q_{s,a} - V_s).\n \n Parameters\n ----------\n mdp : object\n Instance of the MDP class.\n gamma : float \n Discount factor; 0<=gamma<=1.\n r : 1D numpy array\n Initial reward vector with the length equal to the #states in the MDP.\n V : 1D numpy array\n Value of each of the states of the MDP.\n horizon : int\n Horizon for the finite horizon version of value iteration.\n threshold : float\n Convergence threshold.\n\n Returns\n -------\n 2D numpy array\n Array of shape (mdp.nS, mdp.nA), each value p[s,a] is the probability \n of taking action a in state s.\n \"\"\"\n\n if V is None: \n V = compute_value_boltzmann(mdp, gamma, r, horizon, threshold)\n\n policy = np.zeros((mdp.nS, mdp.nA))\n for s in range(mdp.nS):\n for a in range(mdp.nA):\n # This is exp(Q_{s,a} - V_s)\n policy[s,a] = np.exp(r[s] + np.dot(mdp.T[s, a,:], gamma*V) - V[s])\n \n # Hack for finite horizon length to make the probabilities sum to 1:\n policy = policy / np.sum(policy, axis=1).reshape((mdp.nS, 1))\n\n if np.sum(np.isnan(policy)) > 0: \n raise Exception('NaN encountered in policy')\n \n return policy\n\n\ndef generate_trajectories(mdp, policy, timesteps=20, num_traj=50):\n \"\"\"\n Generates trajectories in the MDP given a policy.\n \"\"\"\n s = mdp.env.reset()\n \n trajectories = np.zeros([num_traj, timesteps, 2]).astype(int)\n \n for d in range(num_traj):\n for t in range(timesteps):\n action = np.random.choice(range(mdp.nA), p=policy[s, :])\n trajectories[d, t, :] = [s, action]\n s, _, _, _ = mdp.env.step(action)\n s = mdp.env.reset()\n \n return trajectories\n\n\ndef compute_s_a_visitations(mdp, gamma, trajectories):\n \"\"\"\n Computes the empirical state-action visitation counts and the probability \n of a trajectory starting in state s from the expert trajectories.\n \n Empirical state-action visitation counts:\n sa_visit_count[s,a] = \\sum_{i,t} 1_{traj_s_{i,t} = s AND traj_a_{i,t} = a}\n\n P_0(s) -- probability that the trajectory will start in state s. \n P_0[s] = \\sum_{i,t} 1_{t = 0 AND traj_s_{i,t} = s} / i\n Used in computing the occupancy measure of a MDP under a given policy.\n\n Parameters\n ----------\n mdp : object\n Instance of the MDP class.\n gamma : float \n Discount factor; 0<=gamma<=1.\n trajectories : 3D numpy array\n Expert trajectories. \n Dimensions: [number of traj, timesteps in the traj, state and action].\n\n Returns\n -------\n (2D numpy array, 1D numpy array)\n Arrays of shape (mdp.nS, mdp.nA) and (mdp.nS).\n \"\"\"\n\n s_0_count = np.zeros(mdp.nS)\n sa_visit_count = np.zeros((mdp.nS, mdp.nA))\n \n for traj in trajectories:\n # traj[0][0] is the state of the first timestep of the trajectory.\n s_0_count[traj[0][0]] += 1\n for (s, a) in traj:\n sa_visit_count[s, a] += 1\n \n # Count into probability \n P_0 = s_0_count / trajectories.shape[0]\n \n return(sa_visit_count, P_0)\n\n\ndef compute_D(mdp, gamma, policy, P_0=None, t_max=None, threshold = 1e-6):\n \"\"\"\n Computes occupancy measure of a MDP under a given time-constrained policy \n -- the expected number of times that policy π visits state s in a given \n number of timesteps.\n \n Described in Algorithm 9.3 of Ziebart's PhD thesis \n http://www.cs.cmu.edu/~bziebart/publications/thesis-bziebart.pdf.\n\n Parameters\n ----------\n mdp : object\n Instance of the MDP class.\n gamma : float \n Discount factor; 0<=gamma<=1.\n policy : 2D numpy array\n policy[s,a] is the probability of taking action a in state s.\n P_0 : 1D numpy array of shape (mdp.nS)\n i-th element is the probability that the traj will start in state i.\n t_max : int\n number of timesteps the policy is executed.\n\n Returns\n -------\n 1D numpy array of shape (mdp.nS)\n \"\"\"\n\n if P_0 is None: P_0 = np.ones(mdp.nS) / mdp.nS\n D_prev = np.copy(P_0) \n \n t = 1\n diff = float(\"inf\")\n while diff > threshold:\n \n # Line 6 of Algorithm 9.3: \n # for all s: D[s] <- P_0[s]\n D = np.copy(P_0)\n\n for s in range(mdp.nS):\n for a in range(mdp.nA):\n # Line 9 of Algorithm 9.3:\n # for all s_prime reachable from s by taking a do:\n for p_sprime, s_prime, _ in mdp.P[s][a]:\n # Line 10 of Algorithm 9.3:\n D[s_prime] += D_prev[s] * policy[s, a] * p_sprime\n\n diff = np.amax(abs(D_prev - D)) \n D_prev = np.copy(D)\n \n if t_max is not None:\n t+=1\n if t==t_max: break\n \n if np.sum(np.isnan(D_prev)) > 0: \n raise Exception('NaN encountered in occupancy measure')\n return D\n\n\ndef main():\n learning_rate = 0.1\n epochs = 20\n \n gamma = 1\n horizon = 200\n traj_len = 15\n\n env = FrozenLakeEnvMultigoal(goal=2)\n env.seed(0); prng.seed(10)\n mdp1 = MDP(FrozenLakeEnvMultigoal(is_slippery=False, goal=1))\n r1 = np.zeros(mdp1.nS)\n r1[-1] = 1\n print('Reward used to generate expert trajectories: ', r1)\n\n policy1 = compute_policy(mdp1, gamma, r1, threshold=1e-8, horizon=horizon)\n trajectories1 = generate_trajectories(mdp1, policy1, traj_len, 200)\n print('Generated ', trajectories1.shape[0],' traj of length ', traj_len)\n\n sa_visit_count, _ = compute_s_a_visitations(mdp1, gamma, trajectories1)\n print('Log likelihood of all traj under the policy generated', \n 'from the original reward: ', \n np.sum(sa_visit_count * np.log(policy1)), \n 'average per traj step: ', \n np.sum(sa_visit_count * np.log(policy1)) / \n (trajectories1.shape[0] * trajectories1.shape[1]), '\\n' )\n\n r = np.random.rand(mdp1.nS)\n print('Randomly initialized reward: ',r)\n\n r = max_causal_ent_irl(mdp1, gamma, trajectories1, epochs, learning_rate,\n r = r, horizon=horizon)\n\n print('Final reward: ', r)\n\nif __name__ == \"__main__\":\n main()","sub_path":"MaxCausalEntIRL/MaxCausalEnt.py","file_name":"MaxCausalEnt.py","file_ext":"py","file_size_in_byte":13841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"422475194","text":"from rest_framework import viewsets\n\n\nclass GenericViewMixin(viewsets.ViewSet):\n service_class = None\n lookup_field = 'pk'\n lookup_url_kwarg = None\n\n def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)\n\n def get_serializer_class(self):\n assert self.serializer_class is not None, (\n \"'%s' should either include a `serializer_class` attribute, \"\n \"or override the `get_serializer_class()` method.\"\n % self.__class__.__name__\n )\n\n return self.serializer_class\n\n def get_serializer_context(self):\n return {\n 'request': self.request,\n 'format': self.format_kwarg,\n 'view': self\n }\n\n def get_service_class(self):\n assert self.service_class is not None, (\n \"'%s' should either include a `service_class` attribute, \"\n \"or override the `get_service_class()` method.\"\n % self.__class__.__name__\n )\n pass\n\n def get_service(self, *args):\n service_class = self.get_service_class()\n return service_class(*args)\n\n def get_object(self):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n assert lookup_url_kwarg in self.kwargs, (\n 'Expected view %s to be called with a URL keyword argument '\n 'named \"%s\". Fix your URL conf, or set the `.lookup_field` '\n 'attribute on the view correctly.' %\n (self.__class__.__name__, lookup_url_kwarg)\n )\n filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}\n print(filter_kwargs)","sub_path":"theatre/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"544544808","text":"from pgdrive.constants import TerminationState\nfrom pgdrive.envs.pgdrive_env import PGDriveEnv\nfrom pgdrive.utils import PGConfig\nfrom pgdrive.utils.math_utils import clip\n\n\nclass SafePGDriveEnv(PGDriveEnv):\n def default_config(self) -> PGConfig:\n config = super(SafePGDriveEnv, self).default_config()\n config.update(\n {\n \"environment_num\": 100,\n \"accident_prob\": 0.5,\n \"safe_rl_env\": True, # Should always be True. But we just leave it here for historical reason.\n\n # ===== reward scheme =====\n \"crash_vehicle_penalty\": 0.,\n \"crash_object_penalty\": 0.,\n \"out_of_road_penalty\": 0.,\n\n # ===== cost scheme\n \"crash_vehicle_cost\": 1,\n \"crash_object_cost\": 0.5,\n \"out_of_road_cost\": 1., # only give penalty for out_of_road\n \"traffic_density\": 0.2,\n \"use_lateral\": False\n },\n allow_overwrite=True\n )\n return config\n\n def done_function(self, vehicle_id: str):\n done, done_info = super(SafePGDriveEnv, self).done_function(vehicle_id)\n if self.config[\"safe_rl_env\"]:\n if done_info[TerminationState.CRASH_VEHICLE]:\n done = False\n elif done_info[TerminationState.CRASH_OBJECT]:\n done = False\n return done, done_info\n\n def reward_function(self, vehicle_id: str):\n \"\"\"\n Override this func to get a new reward function\n :param vehicle_id: id of BaseVehicle\n :return: reward\n \"\"\"\n vehicle = self.vehicles[vehicle_id]\n step_info = dict()\n\n # Reward for moving forward in current lane\n current_lane = vehicle.lane if vehicle.lane in vehicle.routing_localization.current_ref_lanes else \\\n vehicle.routing_localization.current_ref_lanes[0]\n long_last, _ = current_lane.local_coordinates(vehicle.last_position)\n long_now, lateral_now = current_lane.local_coordinates(vehicle.position)\n\n # reward for lane keeping, without it vehicle can learn to overtake but fail to keep in lane\n reward = 0.0\n if self.config[\"use_lateral\"]:\n lateral_factor = clip(\n 1 - 2 * abs(lateral_now) / vehicle.routing_localization.get_current_lane_width(), 0.0, 1.0\n )\n else:\n lateral_factor = 1.0\n reward += self.config[\"driving_reward\"] * (long_now - long_last) * lateral_factor\n\n # Penalty for waiting\n low_speed_penalty = 0\n if vehicle.speed < 1:\n low_speed_penalty = self.config[\"low_speed_penalty\"] # encourage car\n reward -= low_speed_penalty\n reward -= self.config[\"general_penalty\"]\n\n reward += self.config[\"speed_reward\"] * (vehicle.speed / vehicle.max_speed)\n step_info[\"step_reward\"] = reward\n\n # for done\n if vehicle.crash_vehicle:\n reward = self.config[\"crash_vehicle_penalty\"]\n elif vehicle.crash_object:\n reward = self.config[\"crash_object_penalty\"]\n elif vehicle.out_of_route:\n reward = self.config[\"out_of_road_penalty\"]\n elif vehicle.crash_sidewalk:\n reward = self.config[\"out_of_road_penalty\"]\n elif vehicle.arrive_destination:\n reward += self.config[\"success_reward\"]\n\n return reward, step_info\n\n def cost_function(self, vehicle_id: str):\n vehicle = self.vehicles[vehicle_id]\n step_info = dict()\n step_info[\"cost\"] = 0\n if vehicle.crash_vehicle:\n step_info[\"cost\"] = self.config[\"crash_vehicle_cost\"]\n elif vehicle.crash_object:\n step_info[\"cost\"] = self.config[\"crash_object_cost\"]\n elif vehicle.out_of_route or vehicle.crash_sidewalk:\n step_info[\"cost\"] = self.config[\"out_of_road_cost\"]\n return step_info['cost'], step_info\n\n\nif __name__ == \"__main__\":\n env = SafePGDriveEnv(\n {\n \"accident_prob\": 1.0,\n \"manual_control\": True,\n \"use_render\": True,\n \"environment_num\": 1,\n \"start_seed\": 187,\n \"out_of_road_cost\": 1,\n \"debug\": True,\n \"cull_scene\": True,\n \"pg_world_config\": {\n \"pstats\": True\n },\n \"vehicle_config\": {\n \"show_lidar\": True,\n \"show_side_detector\": True,\n \"show_lane_line_detector\": True,\n \"side_detector\": dict(num_lasers=2, distance=50), # laser num, distance\n \"lane_line_detector\": dict(num_lasers=2, distance=20), # laser num, distance\n }\n }\n )\n\n o = env.reset()\n total_cost = 0\n for i in range(1, 100000):\n o, r, d, info = env.step([0, 1])\n total_cost += info[\"cost\"]\n env.render(text={\"cost\": total_cost, \"seed\": env.current_map.random_seed, \"reward\": r})\n print(len(env.scene_manager.traffic_manager.traffic_vehicles))\n if d:\n total_cost = 0\n print(\"done_cost:{}\".format(info[\"cost\"]))\n print(\"Reset\")\n env.reset()\n env.close()\n","sub_path":"pgdrive/envs/generation_envs/safe_pgdrive_env.py","file_name":"safe_pgdrive_env.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"487753115","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/2/1 上午11:02\n# @Author : Eric\n\n#当你创建好一个线程对象后,该对象并不会立即执行,除非你调用它的 start() 方法(当你调用\n#start() 方法时,它会调用你传递进来的函数,并把你传递进来的参数传递给该函数)。Python中\n#的线程会在一个单独的系统级线程中执行(比如说一个 POSIX 线程或者一个 Windows 线程),这\n#的线程会在一个单独的系统级线程中执行(比如说一个 POSIX 线程或者一个 Windows 线程),这\n#些线程将由操作系统来全权管理。线程一旦启动,将独立执行直到目标函数返回。你可以查询一个\n#线程对象的状态,看它是否还在执行\n\nimport time\nimport socket\n\ndef countdown(n):\n while n > 0:\n print('T-minus',n)\n n -= 1\n time.sleep(5)\n\n\n\nfrom threading import Thread\nt = Thread(target=countdown,args=(10,))\nt.start()\n\ndef checkoutThread(t,n):\n while n > 0:\n if t.is_alive():\n print('Still running')\n else:\n print('Completed')\n n -= 1\n time.sleep(1)\n\n\n\nt2 = Thread(target=checkoutThread,args=(t,50))\nt2.start()\n\n\n#你也可以将一个线程加入到当前线程,并等待它终止:\n# t.join()\n\n\nt3 = Thread(target=countdown, args=(100,), daemon=True)\nt3.start()\n# t3.join() 会阻塞当前线程\n#后台线程无法等待,不过,这些线程会在主线程终止时自动销毁。\n\nprint('thread 4')\nclass CountdownTask:\n def __init__(self):\n self._running = True\n\n def terminate(self):\n self._running = False\n\n def run(self, n):\n while self._running and n > 0:\n print('T-minus2', n)\n n -= 1\n time.sleep(5)\n print('terminate')\n\nc = CountdownTask()\nt4 = Thread(target=c.run, args=(10,))\nt4.start()\nc.terminate() # Signal termination\nt4.join() # Wait for actual termination (if needed) 等待 主线程终止\n\n\n\nclass IOTask(Thread):\n\n def __init__(self,port,host):\n Thread.__init__(self)\n self._running = True\n # bufsize = 1024\n addr = (host, port)\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.client.connect(addr)\n\n\n\n def terminate(self):\n self._running = False\n\n def run(self):\n # sock is a socket\n #self.client.settimeout(5) # Set timeout period\n self.client.send('Hello'.encode(\"utf8\"))\n while self._running:\n # Perform a blocking I/O operation w/ timeout\n try:\n data = self.client.recv(8192)\n print('receive data %s'%data)\n break\n except socket.timeout:\n continue\n # Continued processing\n # Terminated\n return\n\n\ntask = IOTask(host='localhost',port=18001)\ntask.run()\n","sub_path":"Part_01_并发线程/Day_01_线程启动和停止.py","file_name":"Day_01_线程启动和停止.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"36164165","text":"\"\"\"kolayinibul URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom blog import views as post_views\nfrom base_pages import views as base_pages_view\nfrom user import views as user_pages_view\nfrom job import views as job_views\nfrom blog import views as blog_views\nfrom job import views as job_views\nfrom django.conf import settings\nfrom django.conf.urls import url, handler400, handler500\nfrom django.conf.urls.static import static\nfrom base_pages.sitemaps import PostSitemap, StaticViewSitemap, JobAdvertisementSitemap, CategorySitemap, \\\n SubcategorySitemap\nfrom django.contrib.sitemaps.views import sitemap\n\nsitemaps = {\n 'posts': PostSitemap,\n 'jobs': JobAdvertisementSitemap,\n 'static': StaticViewSitemap,\n 'category': CategorySitemap,\n 'subcategory': SubcategorySitemap,\n}\n\nurlpatterns = [\n # admin App:\n path('admin/', admin.site.urls),\n\n # user App:\n path('user/', include(\"user.urls\", namespace=\"user\")),\n path('accounts/login', include('django.contrib.auth.urls')),\n path('accounts/', include('django.contrib.auth.urls')),\n path('', include('django.contrib.auth.urls')),\n\n # oAuth Login:\n url(r'^oauth/', include('social_django.urls', namespace='social')),\n url(r'^settings/password/$', user_pages_view.github_password, name='password'),\n\n # blog App:\n path('posts/', include(\"blog.urls\", namespace=\"posts\")),\n path('categories/', include(\"blog.urls\", namespace=\"categories\")),\n path('categories/', blog_views.categories, name=\"categories\"),\n\n # job App:\n path('jobs/', include(\"job.urls\", namespace=\"jobs\")),\n\n # base_pages App:\n path('', base_pages_view.index, name=\"index\"),\n path('about_us/', base_pages_view.about, name=\"about_us\"),\n path('dashboard/', base_pages_view.dashboard, name=\"dashboard\"),\n path('my_posts/', base_pages_view.my_posts, name=\"my_posts\"),\n path('my_jobs/', base_pages_view.my_jobs, name=\"my_jobs\"),\n path('my_profile/', base_pages_view.my_profile, name=\"my_profile\"),\n path('faq/', base_pages_view.faq, name=\"faq\"),\n path('contact_us/', base_pages_view.contact_us, name=\"contact_us\"),\n path('search/', base_pages_view.search_view, name=\"search_view\"),\n path('privacy_page/', base_pages_view.privacy_page, name=\"privacy_page\"),\n path('jobs/', base_pages_view.job_page_coming_soon, name=\"job_page_coming_soon\"),\n path('sitemap.xml', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n path('robots.txt/', include(\"robots.urls\", namespace=\"\")),\n url(r'^robots\\.txt', include('robots.urls')),\n\n # Advertorials\n path('ads.txt/', base_pages_view.ads, name=\"ads\"),\n\n # 404 and 500 Error Pages\n path('404_test/', base_pages_view.handler404, name=\"404_test\"),\n path('500_test/', base_pages_view.handler500, name=\"500_test\"),\n\n]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"kolayinibul/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"52720175","text":"if __name__ == '__main__':\n animals = [\n 'Brown Recluse Spider',\n 'Camels',\n 'Cape Gannet Bird',\n 'Chickens',\n 'Chimpanzee',\n 'Cuviers Dwarf Caimans',\n 'Dog',\n ]\n\n # Exercise 1: make it pass\n words_with_len = dict()\n assert words_with_len == {\n 'Brown Recluse Spider': 20, 'Camels': 6, 'Cape Gannet Bird': 16, 'Chickens': 8, 'Chimpanzee': 10,\n 'Cuviers Dwarf Caimans': 21, 'Dog': 3\n }\n\n # Exercise 2: make it pass\n words_with_even_len = dict()\n assert words_with_even_len == {\n 'Brown Recluse Spider': 20, 'Camels': 6, 'Cape Gannet Bird': 16, 'Chickens': 8, 'Chimpanzee': 10\n }\n\n # Exercise 3: make it pass\n assert 'Confluence'.startswith('Conf')\n # Filter words_with_len where key starts with 'C'\n words_with_prefix = dict()\n assert words_with_prefix == {\n 'Camels': 6, 'Cape Gannet Bird': 16, 'Chickens': 8, 'Chimpanzee': 10, 'Cuviers Dwarf Caimans': 21\n }\n","sub_path":"300 - Comprehentions/measure_words_length.py","file_name":"measure_words_length.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"129549657","text":"import unittest\nfrom news_parser.single_thread import *\nfrom news_parser.multi_thread import *\nimport xml.etree.ElementTree as ET\n\nfile = open(\"/Users/GreenUser/PycharmProjects/CALab1/fortest.txt\", 'r')\nasd = file.read()\nfor_test = ET.fromstring(asd)\nitem_for_test = for_test.find('.//item')\nitems = [txt for txt in for_test.findall('.//item')]\nfiltered_items = [i for i in items if contains_score(i)]\nurls = find_by_tag('url', \"/Users/GreenUser/PycharmProjects\"\n \"/CALab1/sportnews.xml\")\n\n\nclass test_single_thread(unittest.TestCase):\n def test_find_by_tag(self):\n self.assertEqual(find_by_tag('url', \"/Users/GreenUser\"\n \"/PycharmProjects\"\n \"/CALab1/sportnews.xml\"),\n ['http://sports.yahoo.com/'\n 'soccer//rss.xml',\n 'http://feeds.news.com.au/public'\n '/rss/2.0/fs_football_20.xml',\n 'http://www.espnfc.com/rss',\n 'http://www.espn.co.uk/rss/sport/story'\n '/feeds/0.xml?sport=3;type=2'])\n\n def test_contains_score(self):\n self.assertEqual(contains_score(item_for_test), False)\n\n def test_write_xml(self):\n self.assertEqual(write_xml(item_for_test, \"UT.xml\"), True)\n\n def test_multi_thread_parse(self):\n test = multi_thread_parse(urls, mock_data)\n result = filtered_items*4\n self.assertEqual(len(test), len(result))\n\n\nclass geventMock:\n def __init__(self):\n self.value = asd\n\n\ndef mock_data(urls):\n return [geventMock() for _ in range(len(urls))]\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(test_single_thread)\nunittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"unit_tests/unit_tests.py","file_name":"unit_tests.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"315652138","text":"import time\nimport multiprocessing\nfrom multiprocessing import Pool, cpu_count\nfrom tasks import get_prime_numbers\n\n\ndef main():\n with Pool(cpu_count() - 1) as pool:\n pool.starmap(get_prime_numbers, zip(range(1000, 16000)))\n pool.close()\n pool.join()\n\n\nif __name__ == \"__main__\":\n start_time = time.perf_counter()\n\n main()\n\n end_time = time.perf_counter()\n print(f\"Elapsed run time: {end_time - start_time} seconds.\")\n","sub_path":"amal_shaji/cpu-bound_parallel_1.py","file_name":"cpu-bound_parallel_1.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"580551160","text":"\"\"\" Parse Heimann defs and table and write to a format we can use. \"\"\"\n\nimport argparse\nimport ast\nimport itertools\nimport os\nfrom pprint import pprint\nimport re\n\nimport numpy as np\n\n# REGEXP for #define statements in defs.h\n# some amount of whitespace then #define then whitespace\n# identifier starts with uppercase then can be any word character\n# then whitespace\n# then (optionally) a token string which is -- at least so far -- numeric,\n# including hex\n# #define statements often end in // comments -- make sure to skip those.\nRE_DEF = re.compile(r'\\s*#define\\s+(?P[A-Z]\\w*)\\s+(?P[0-9A-Fx]*)')\n# REGEXP for the IFDEF in both defs.h and table.c\nRE_IFDEF = re.compile(r'\\s*#ifdef\\s+(?P[A-Z]\\w*)')\n# REGEXP for the ENDIF in both defs.h and table.c\nRE_ENDIF = re.compile(r'\\s*#endif')\n# REGEXP for the CONST arrays in Table.c\nRE_CONST = re.compile(r'\\s*const\\s+unsigned\\s+int\\s+([A-Z]\\w*)')\n\nDATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\n\nPARSER = argparse.ArgumentParser(description=('Create data files for the '\n 'sensor constants and tables'))\nPARSER.add_argument('SampleCode_dir',\n help='Heimann SampleCode directory for the sensor type')\nPARSER.add_argument('device_name', nargs='?',\n help='Heimann device name string from defs.h')\nARGS = PARSER.parse_args()\n\n\ndef parse_defs(defsh):\n \"\"\" Parse the defs.h file and create a dict of device_name's and their\n constants. Also includes the _ALL \"device\" key for global constants. \"\"\"\n all_defines = {'_ALL': {}}\n current_device = None\n\n with open(defsh) as fh:\n for line in fh:\n # Try to match this line against a #define\n match = RE_DEF.match(line)\n if match:\n ident, token = match.groups()\n if current_device is not None:\n current_device[ident] = _parse_int_bool(token)\n else:\n all_defines['_ALL'][ident] = _parse_int_bool(token)\n\n continue\n\n # Wasn't a #define; try to match against an #ifdef\n # If so, then \"open\" the device in our little state machine\n match = RE_IFDEF.match(line)\n if match:\n ident = match.groups()[0]\n if ident.startswith('HTPA'):\n current_device = {'_DEVICE_NAME': ident}\n all_defines[ident] = current_device\n\n return all_defines\n\ndef parse_tables(tablec):\n # set up a dict for tables.\n\n with open(tablec) as fh:\n tables = {}\n current_device = None\n\n for line in fh:\n # Match for #ifdef and in order to look for the device name\n match = RE_IFDEF.match(line)\n if match:\n ident = match.groups()[0]\n if ident.startswith('HTPA'):\n current_device = {}\n tables[ident] = current_device\n\n continue\n\n # Not #ifdef. Look for a constant definition\n match = RE_CONST.match(line)\n if match:\n varname = match.groups()[0]\n assert current_device is not None, \"const line without a current device\"\n\n # Get everything after the = sign. For some definitions this is just\n # the opening bracket; for some it's the entire array\n tablelinestr = line.split('=')[1]\n tablestrs = []\n\n while True:\n # append the latest bit of the contstant to our queue\n tablestrs.append(tablelinestr)\n # look for the end of the definition and break out of this loop\n if '};' in tablelinestr:\n break\n\n tablelinestr = next(fh)\n\n # Create a python-ized version of the C table we've collected:\n # Replace the {'s with ['s and get rid of the ;\n tablestr = ' '.join(tablestrs).replace('{', '[').replace('}', ']')\\\n .replace(';', '')\n # Fix tabs\n # Some arrays are separated by tables and not commas. Replace any\n # tab which is surrounded by digits\n tablestr = re.sub(r'(\\d)\\t(\\d)', r'\\1,\\2', tablestr)\n\n # literal_eval converts our table to a list (1d or 2d), including hex\n # values\n pylist = ast.literal_eval(tablestr)\n # we're assuming that the values in tables.c are all uint32s\n flattened = pylist if isinstance(pylist[0], int) \\\n else list(itertools.chain.from_iterable(pylist))\n assert max(flattened) <= 4294967295, \\\n 'List has value %s' % max(flattened)\n assert min(flattened) >= 0\n\n current_device[varname] = np.array(pylist, dtype='uint32')\n\n continue\n\n match = RE_ENDIF.match(line)\n if match:\n current_device = None\n\n return tables\n\ndef _parse_int_bool(val):\n if not val:\n return True\n\n return int(val, 0)\n\n\n\nif __name__ == '__main__':\n defsh = os.path.join(ARGS.SampleCode_dir, 'defs.h')\n defines = parse_defs(defsh)\n\n tablec = os.path.join(ARGS.SampleCode_dir, 'Table.c')\n tables = parse_tables(tablec)\n\n if not ARGS.device_name:\n tables_list_by_tablenum = {}\n\n for k, v in defines.items():\n if k != '_ALL':\n tablenum = v.get('TABLENUMBER')\n if tablenum not in tables_list_by_tablenum:\n tables_list_by_tablenum[tablenum] = []\n\n tables_list_by_tablenum[tablenum].append(k)\n\n pprint(tables_list_by_tablenum)\n else:\n device = ARGS.device_name\n\n if device not in tables or device not in defines:\n print('Defs or Table for {} not found'.format(device))\n else:\n device_defs = defines[device]\n table = tables[device]\n pprint(device_defs)\n pprint(table)\n\n # Ensure our parsed table sizes match the lengths defined\n assert device_defs['TABLENUMBER']\n assert table['TempTable'].shape == (device_defs['NROFADELEMENTS'], device_defs['NROFTAELEMENTS'])\n\n # NROF*ELEMENTS is used in defs.h to define the size of the array, and the\n # C code to loop through the array. However, there are instances where\n # the number of elements defined in defs.h is smaller than the defined\n # size. I guess that's OK.\n assert len(table['XTATemps']) <= device_defs['NROFTAELEMENTS']\n assert len(table['YADValues']) <= device_defs['NROFADELEMENTS']\n\n\n fname = os.path.join(DATA_DIR, device)\n np.savez_compressed(fname, table=table['TempTable'],\n ta_axes=table['XTATemps'], dk_axes=table['YADValues'],\n metadata=device_defs)\n\n print('{} saved'.format(fname))","sub_path":"thermografree/save_tables.py","file_name":"save_tables.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"171159087","text":"from flask import Flask\nfrom flask import render_template\nimport RPi.GPIO as GPIO\n\n\napp = Flask(__name__)\nGPIO.setmode(GPIO.BCM)\n@app.route(\"/LED/ON\")\ndef ledon():\n GPIO.setup(24, GPIO.OUT)\n GPIO.output(24, GPIO.HIGH)\n return 'led test'\n@app.route(\"/\")\ndef index():\n return render_template(\"selected.html\")\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8886,debug=True)\n \n \n","sub_path":"webapp/ledwep2.py","file_name":"ledwep2.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"63218205","text":"#!/usr/bin/python\nimport subprocess\nimport sys\nimport getpass\nimport os\nimport signal\n\nimport ssh_cmd as ssh\nimport render_manager\nimport config\n\n'''\nParent PID - Host - Client - Render Engine Path -\nRender Files Path - Render Files - Log File - Render Arguments\n'''\nproject_path = os.path.dirname(os.path.realpath(__file__))\npID_instance = os.getpid()\nparent_pID = os.getppid()\n\n\n# Load system arguments\ntry:\n parent_pid = int(sys.argv[1])\n host = str(sys.argv[2])\n client = str(sys.argv[3])\n client_id = int(sys.argv[4])\n render_engine_path = str(sys.argv[5])\n log_file = str(sys.argv[6])\n render_arguments = str(sys.argv[7])\n render_files_path = str(sys.argv[8])\n render_files = sys.argv[9:]\n\nexcept:\n sys.exit()\n\n\ndef sigterm_handler(signal, frame):\n ssh.ssh_close(ssh_connection)\n render_manager.clean(database_path, client_id)\n sys.exit(0)\n\nsignal.signal(signal.SIGTERM, sigterm_handler)\n\n\n# If successful, load the settings\nsettings = config.Settings()\ndatabase_path = settings.render_database_file\nuser = getpass.getuser()\n\n# Start SSH Connection with Client\nssh_connection = ssh.ssh_start(client, user)\nchild_pID = subprocess.check_output([\"pgrep\", \"-P\", str(pID_instance)])\n\n# Add PID, Parent PID and SSH PID to database\nrender_manager.add_pid(database_path, client_id, pID_instance)\nrender_manager.add_pid(database_path, client_id, parent_pID)\nrender_manager.add_pid(database_path, client_id, child_pID)\n\n# Send command for every frame\nfor i, frame in enumerate(render_files):\n render_command = \"{0} -f {1} {2}\".format(render_engine_path,\n render_files_path + \"/\" +\n frame, \"-V 2 0 0 1 0\")\n update_render_db_command = (\"python {0}/render_manager.py {1}\".format(\n project_path,\n client_id) +\n \" None None None None {0}/{1}\".format(\n i + 1,\n len(render_files)))\n ssh.send_cmd(ssh_connection, update_render_db_command)\n ssh.send_cmd(ssh_connection, render_command)\n\n# Close SSH Connection with Client\nssh.ssh_close(ssh_connection)\n\n# Update the Render Database\nrender_manager.clean(database_path, client_id)\n","sub_path":"mantra.py","file_name":"mantra.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"100818698","text":"from flask import Flask, render_template\n\napp = Flask (__name__)\n\n@app.route (\"/\")\ndef home ():\n s = \"\"\n data = open (\"Lincoln_Square_BID_Business_List.csv\", \"r\")\n t = data.readline().split(',')\n for b in t:\n s = s + \"\"\n data.readline()\n for line in data:\n s = s + \"\"\n t = line.split(',')\n for b in t:\n s = s + \"\"\n s = s + \"\"\n data.close()\n s = s + \"
\" + b + \"
\" + b + \"
\"\n replacer(\"home.html\", s)\n return render_template(\"home.html\")\n\n@app.route(\"/about\")\ndef about():\n replacer(\"about.html\", \"

This is really just us messing around with css and tables, honestly.

\")\n return render_template(\"about.html\")\n\ndef replacer(filename, replaceWith):\n with open(\"templates/\" + filename, \"wt\") as fout:\n with open(\"templates/data.html\", \"rt\") as fin:\n for line in fin:\n fout.write(line.replace(\"---REPLACE---\", replaceWith))\n return\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"5/intro-proj1/jessica_fawn/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"77671647","text":"import requests\nfrom PIL import Image\nfrom io import BytesIO\n\n\ndef tkinter_user_prompt(img_url, print_func):\n \"\"\"Display captcha from given URL and ask user for input in GUI window.\n\n Arguments:\n img_url (str): URL of the image with CAPTCHA\n\n Returns:\n str: User answer to the CAPTCHA\n \"\"\"\n import tkinter as tk\n from PIL import ImageTk\n\n root = tk.Tk()\n root.focus_force()\n root.title(\"Opiš kód z obrázku\")\n # use width x height + x_offset + y_offset (no spaces!)\n root.geometry(\"300x140\")\n\n def disable_event():\n pass\n\n root.protocol(\"WM_DELETE_WINDOW\", disable_event)\n\n u = requests.get(img_url)\n raw_data = u.content\n\n im = Image.open(BytesIO(raw_data))\n photo = ImageTk.PhotoImage(im)\n label = tk.Label(image=photo)\n label.image = photo\n label.pack()\n\n entry = tk.Entry(root)\n entry.pack()\n entry.bind('', lambda event: root.quit())\n entry.focus()\n\n tk.Button(root, text='Send', command=root.quit).pack()\n\n root.mainloop() # Wait for user input\n value = entry.get()\n root.destroy()\n return value\n\n\nclass AutoReadCaptcha:\n def __init__(self, model_path, model_url, print_func=print):\n from urllib.request import urlretrieve\n import os\n\n def reporthook(blocknum, block_size, total_size):\n \"\"\"\n Credits to jfs from https://stackoverflow.com/questions/13881092/download-progressbar-for-python-3\n \"\"\"\n readsofar = blocknum * block_size\n if total_size > 0:\n percent = readsofar * 1e2 / total_size\n s = \"\\r%5.1f%% %*d / %d\" % (\n percent, len(str(total_size)), readsofar, total_size)\n print_func(s, end=\"\")\n if readsofar >= total_size: # near the end\n print_func(flush=True)\n else: # total size is unknown\n print_func(\"read %d\" % (readsofar,), flush=True)\n\n if not os.path.exists(model_path):\n print_func(f\"Downloading model from {model_url}\")\n # download into temp model in order to detect incomplete downloads\n model_temp_path = f\"{model_path}.tmp\"\n urlretrieve(model_url, model_temp_path, reporthook)\n print_func(\"Downloading of the model finished\")\n\n # rename temp model\n os.rename(model_temp_path, model_path)\n\n # due to multiprocessing the model model have to be loaded in each\n # process independently\n self.model_content = open(model_path, \"rb\").read()\n self.print_func = print_func\n\n def __call__(self, img_url, print_func):\n import tflite_runtime.interpreter as tflite\n import numpy as np\n\n print_func(\"Auto solving CAPTCHA\")\n\n interpreter = tflite.Interpreter(model_content=self.model_content)\n\n u = requests.get(img_url)\n raw_data = u.content\n\n img = Image.open(BytesIO(raw_data))\n img = np.asarray(img)\n\n # normalize to [0...1]\n img = (img / 255).astype(np.float32)\n\n # convert to grayscale\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n input = 0.299 * r + 0.587 * g + 0.114 * b\n\n # input has nowof shape (70, 175)\n # we modify dimensions to match model's input\n input = np.expand_dims(input, 0)\n input = np.expand_dims(input, -1)\n # input is now of shape (batch_size, 70, 175, 1)\n # output will have shape (batch_size, 4, 26)\n\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n interpreter.set_tensor(input_details[0]['index'], input)\n interpreter.invoke()\n\n # predict and get the output\n output = interpreter.get_tensor(output_details[0]['index'])\n # now get labels\n labels_indices = np.argmax(output, axis=2)\n\n available_chars = \"abcdefghijklmnopqrstuvwxyz\"\n\n def decode(li):\n result = []\n for char in li:\n result.append(available_chars[char])\n return \"\".join(result)\n\n decoded_label = [decode(x) for x in labels_indices][0]\n print_func(f\"CAPTCHA auto solved as '{decoded_label}'\")\n return decoded_label\n","sub_path":"uldlib/captcha.py","file_name":"captcha.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"376370302","text":"import logging\nimport os\n\nimport jinja2\n\nfrom slvcodec import entity, package, typs, package_generator, config\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_filetestbench(enty):\n '''\n Generate a testbench that reads inputs from a file, and writes outputs to\n a file.\n Args:\n `enty`: A resolved entity object parsed from the VHDL.\n '''\n # Generate a record type for the entity inputs (excluding clock).\n inputs = [p for p in enty.ports.values()\n if p.direction == 'in' and p.name not in entity.CLOCK_NAMES]\n input_names_and_types = [(p.name, p.typ) for p in inputs]\n input_record = typs.Record('t_input', input_names_and_types)\n # Generate a record type for the entity outputs.\n outputs = [p for p in enty.ports.values() if p.direction == 'out']\n output_names_and_types = [(p.name, p.typ) for p in outputs]\n output_record = typs.Record('t_output', output_names_and_types)\n # Generate declarations and definitions for the functions to convert\n # the input and output types to and from std_logic_vector.\n input_slv_declarations, input_slv_definitions = (\n package_generator.make_record_declarations_and_definitions(\n input_record))\n output_slv_declarations, output_slv_definitions = (\n package_generator.make_record_declarations_and_definitions(\n output_record))\n # Generate use clauses required by the testbench.\n use_clauses = '\\n'.join([\n 'use {}.{}.{};'.format(u.library, u.design_unit, u.name_within)\n for u in enty.uses.values()])\n use_clauses += '\\n' + '\\n'.join([\n 'use {}.{}_slvcodec.{};'.format(u.library, u.design_unit, u.name_within)\n for u in enty.uses.values() if u.library not in ('ieee', 'std') and '_slvcodec' not in u.design_unit])\n # Get the list of generic parameters for the testbench.\n generic_params = '\\n'.join(['{}: {};'.format(g.name, g.typ)\n for g in enty.generics.values()])\n # Combine the input and output record definitions with the slv conversion\n # functions.\n definitions = '\\n'.join([\n input_record.declaration(), output_record.declaration(),\n input_slv_declarations, input_slv_definitions,\n output_slv_declarations, output_slv_definitions])\n clk_names = [p.name for p in enty.ports.values()\n if (p.direction == 'in') and (p.name in entity.CLOCK_NAMES)]\n clk_connections = '\\n'.join(['{} => {},'.format(clk, clk) for clk in clk_names])\n connections = ',\\n'.join(['{} => {}.{}'.format(\n p.name, {'in': 'input_data', 'out': 'output_data'}[p.direction], p.name)\n for p in enty.ports.values() if p.name not in clk_names])\n dut_generics = ',\\n'.join(['{} => {}'.format(g.name, g.name)\n for g in enty.generics.values()])\n # Read in the testbench template and format it.\n template_fn = os.path.join(os.path.dirname(__file__), 'templates',\n 'file_testbench.vhd')\n with open(template_fn, 'r') as f:\n filetestbench_template = jinja2.Template(f.read())\n filetestbench = filetestbench_template.render(\n test_name='{}_tb'.format(enty.identifier),\n use_clauses=use_clauses,\n generic_params=generic_params,\n definitions=definitions,\n dut_generics=dut_generics,\n dut_name=enty.identifier,\n clk_connections=clk_connections,\n connections=connections,\n )\n return filetestbench\n\n\ndef prepare_files(directory, filenames, top_entity):\n '''\n Parses VHDL files, and generates a testbench for `top_entity`.\n Returns a tuple of a list of testbench files, and a dictionary\n of parsed objects.\n '''\n entities, packages = entity.process_files(filenames)\n resolved_entity = entities[top_entity]\n new_fns = [\n os.path.join(config.vhdldir, 'read_file.vhd'),\n os.path.join(config.vhdldir, 'write_file.vhd'),\n os.path.join(config.vhdldir, 'clock.vhd'),\n ]\n # Make file testbench\n ftb = make_filetestbench(resolved_entity)\n ftb_fn = os.path.join(directory, '{}_tb.vhd'.format(\n resolved_entity.identifier))\n with open(ftb_fn, 'w') as f:\n f.write(ftb)\n new_fns.append(ftb_fn)\n resolved = {\n 'entities': entities,\n 'packages': packages,\n }\n return new_fns, resolved\n\n\ndef add_slvcodec_files(directory, filenames):\n '''\n Parses files, and generates helper packages for existing packages that\n contain functions to convert types to and from std_logic_vector.\n '''\n entities, packages = entity.process_files(filenames, must_resolve=False)\n combined_filenames = [os.path.join(config.vhdldir, 'txt_util.vhd'),\n os.path.join(config.vhdldir, 'slvcodec.vhd')]\n for fn in filenames:\n parsed = package.parsed_from_filename(fn)\n if fn not in combined_filenames:\n combined_filenames.append(fn)\n if parsed.packages and fn[-len('slvcodec.vhd'):] != 'slvcodec.vhd':\n package_name = parsed.packages[0].identifier\n slvcodec_pkg = package_generator.make_slvcodec_package(packages[package_name])\n slvcodec_package_filename = os.path.join(\n directory, '{}_slvcodec.vhd'.format(package_name))\n with open(slvcodec_package_filename, 'w') as f:\n f.write(slvcodec_pkg)\n combined_filenames.append(slvcodec_package_filename)\n return combined_filenames\n","sub_path":"slvcodec/filetestbench_generator.py","file_name":"filetestbench_generator.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"55052407","text":"#!/usr/bin/env python3\r\nimport sys\r\nfrom collections import Counter\r\nclass Person(object):\r\n \"\"\"\r\n 返回具有给定名称的 Person 对象\r\n \"\"\"\r\n def __init__(self, name):\r\n self.name = name\r\n def get_details(self):\r\n \"\"\"\r\n 返回包含人名的字符串\r\n \"\"\"\r\n return self.name\r\n def get_grade(self,fenshu):\r\n return self.fenshu\r\nclass Student(Person):\r\n \"\"\"\r\n 返回 Student 对象,采用 name, branch, year 3 个参数\r\n \"\"\"\r\n def __init__(self, name, branch, year):\r\n Person.__init__(self, name)\r\n self.branch = branch\r\n self.year = year\r\n def get_details(self):\r\n \"\"\"\r\n 返回包含学生具体信息的字符串\r\n \"\"\"\r\n return \"{} studies {} and is in {} year.\".format(self.name, self.branch, self.year)\r\n def get_grade(self,fenshu):\r\n ps=0\r\n fl=0\r\n for f,s in Counter(fenshu).most_common(4):\r\n if f =='A':\r\n ps+=s\r\n elif f =='B':\r\n ps+=s\r\n elif f =='C':\r\n ps+=s\r\n elif f =='D':\r\n fl+=s\r\n fs=(\"Pass:{}, Fail:{}\").format(ps,fl)\r\n return fs\r\nclass Teacher(Person):\r\n \"\"\"\r\n 返回 Teacher 对象,采用字符串列表作为参数\r\n \"\"\"\r\n def __init__(self, name, papers):\r\n Person.__init__(self, name)\r\n self.papers = papers\r\n def get_details(self):\r\n return \"{} teaches {}\".format(self.name, ','.join(self.papers))\r\n def get_grade(self,fenshu):\r\n fs=[]\r\n for f,s in Counter(fenshu).most_common(4):\r\n fs.append(f+':'+str(s))\r\n return \",\".join(fs)\r\nif __name__=='__main__':\r\n juese=sys.argv[1]\r\n if juese == 'teacher':\r\n teacher=Teacher(\"t1\",\"k1\")\r\n fenshu=sys.argv[2]\r\n print(teacher.get_grade(fenshu))\r\n elif juese== 'student':\r\n s=Student(\"susan\",\"CSE\",2009)\r\n print(s.get_grade(sys.argv[2]))\r\n","sub_path":"py3/tiaozhan4/student_teacher.py","file_name":"student_teacher.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"561850162","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.4-intel/egg/tumbledore/templatetags/tumbledore.py\n# Compiled at: 2013-01-29 13:42:58\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.template.base import FilterExpression\nfrom django.template.defaulttags import ForNode\nmodels = __import__('tumbledore', None, None, [], 2).models\nregister = template.Library()\nDEFAULT_NUMBER_OF_POSTS = 5\n\n@register.tag(name='tumblevar')\ndef do_captureas(parser, token):\n try:\n tag_name, args = token.contents.split(None, 1)\n except ValueError:\n raise template.TemplateSyntaxError(\"'tumblevar' node requires a variable name.\")\n\n nodelist = parser.parse(('endtumblevar', ))\n parser.delete_first_token()\n return CaptureasNode(nodelist, args)\n\n\nclass CaptureasNode(template.Node):\n\n def __init__(self, nodelist, varname):\n self.nodelist = nodelist\n self.varname = varname\n\n def render(self, context):\n output = mark_safe(self.nodelist.render(context))\n context[self.varname] = output\n return ''\n\n\n@register.tag(name='tumbleposts')\ndef do_tumbleposts(parser, token):\n \"\"\"\n Iterates given block over tumblelog posts.\n {% tumbleposts as post tumblelog_id=1 **kwargs %}\n {{ post.title }}\n {% endtumbleposts %}\n \"\"\"\n bits = token.contents.split()\n bits.reverse()\n bits = [ bit.strip(',') for bit in bits ]\n tag_name = bits.pop()\n as_name = bits.pop()\n var_name = bits.pop()\n order_by = None\n kwargs_list = [ bit for bit in bits if '=' in bit ]\n kwargs = {}\n for kwarg in kwargs_list:\n key, val = kwarg.split('=')\n try:\n val = int(val)\n except ValueError:\n pass\n\n if val in ('True', 'False'):\n val = bool(val)\n kwargs[key] = val\n\n if not var_name or '=' in var_name or not kwargs.get('tumblelog_id'):\n raise template.TemplateSyntaxError(\"'%s' tag requires at a minimum a post variable name and tumblelog_id.\" % tag_name)\n order_by = None\n limit = None\n if 'order_by' in kwargs:\n order_by = kwargs['order_by']\n del kwargs['order_by']\n if 'limit' in kwargs:\n limit = kwargs['limit']\n del kwargs['limit']\n object_list = models.TumblelogPost.objects.filter(**kwargs)\n if order_by:\n object_list = object_list.order_by(order_by)\n if limit:\n object_list = object_list[:limit]\n for obj in object_list:\n permalink = reverse('tumble_post', urlconf='tumbledore.urls', args=[obj.tumblelog.mount_on, obj.slug])\n obj.__dict__.update(permalink=permalink)\n if isinstance(obj.custom_data, dict):\n obj.__dict__.update(**obj.custom_data)\n\n sequence = FilterExpression('', parser)\n sequence.filters = []\n sequence.var = object_list\n nodelist_loop = parser.parse(('empty', 'endtumbleposts'))\n token = parser.next_token()\n if token.contents == 'empty':\n nodelist_empty = parser.parse(('endtumbleposts', ))\n parser.delete_first_token()\n else:\n nodelist_empty = None\n return ForNode([var_name], sequence, False, nodelist_loop, nodelist_empty)","sub_path":"pycfiles/tumbledore-0.1.10-py2.7/tumbledore.py","file_name":"tumbledore.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"558962508","text":"import datetime \nimport warnings\n\n\ndef create_header(author_list, place = \"Paris\"):\n \n \"\"\"\"Creates the header text based on the authors list\n\n Parameters\n ----------\n author list : a list of dictionnaries\n \n Returns\n ----------\n \n header_text : str\n the header text\n \n Note\n -----\n date is updates at the function execution time\n \"\"\"\n today = datetime.date.today()\n Lines = [\n today.strftime(f\"{place}, le %d/%m/%Y), \\n\\n ## Auteurs : \"),]\n for aut in author_list:\n try:\n first = aut['firstname']\n except KeyError:\n first = ''\n warnings.warn('firstname missing')\n try:\n last = aut['lastname'] = 'XXX'\n except KeyError:\n last = ''\n warnings.warn('lastname missing')\n Lines.append(\"-\" + first + \" \" + last )\n return \"\\n\".join(Lines)","sub_path":"reporter/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"314617697","text":"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Convolution2D, MaxPooling2D\n\n# https://github.com/fchollet/keras-resources\n# https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html\ndef buildModel():\n model = Sequential()\n\n model.add(Convolution2D(96, 55, 55, activation='relu', input_shape=(3, 227, 227)))\n model.add(MaxPooling2D(pool_size=(96, 27, 27)))\n\n model.add(Convolution2D(256, 27, 27, activation='relu'))\n model.add(MaxPooling2D(pool_size=(256, 13, 13)))\n\n model.add(Convolution2D(384, 13, 13, activation='relu'))\n model.add(Convolution2D(384, 13, 13, activation='relu'))\n\n model.add(Convolution2D(256, 13, 13, activation='relu'))\n model.add(MaxPooling2D(pool_size=(256, 6, 6)))\n\n model.add(Dense(4096, 1, 1))\n model.add(Dense(4096, 1, 1))\n model.add(Dense(10, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.SGD(lr=0.0001, momentum=0.9, decay=0.0005))\n\n return model\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"244908942","text":"def la_translate(msg):\n '''\n if you had your eng keyboard on and you were typing in ukrainian or\n    vice versa, this program will correct your msg\n\n >>> la_translate('Руддщб Ш фь пщштп ещ нщг тщц')\n hello, i am going to you now\n >>> la_translate('ghbdsn z nhj[b pfnhbvf.cz')\n привіт я трохи затримаюся\n '''\n msg = list(msg.lower())\n ukr = \"йцукенгшщзхїфівапролджєячсмитьбю\"\n eng = \"qwertyuiop[]asdfghjkl;'zxcvbnm,.\"\n lg = input('Please, enter a language in which the msg will be la_translated: eng or ukr ')\n if lg == 'eng':\n for i in range(len(msg)):\n if msg[i] not in ukr:\n continue\n else:\n ind = ukr.index(msg[i])\n msg[i] = eng[ind]\n elif lg == 'ukr':\n for i in range(len(msg)):\n if msg[i] not in eng:\n continue\n else:\n ind = eng.index(msg[i])\n msg[i] = ukr[ind]\n print(''.join(msg))\n","sub_path":"la_translate.py","file_name":"la_translate.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"331329469","text":"#sudo pip install pydot==1.2.3\n#sudo pip install decorator==4.4.2\n#sudo pip install networkx=2.2.0\n\n#sudo pip install setuptools==20.7.0\n#pip 8.1.1\n\nfrom math import pow, sqrt\nimport argparse\nimport networkx as nx\nimport matplotlib.pyplot as plt\n#from drive import Driver\n\n\ndef subtract(s1,s2):\n #print(s1,s2)\n s1 = xy(s1)\n s2 = xy(s2)\n return sqrt(pow(s1[0]-s2[0],2)+pow(s1[1]-s2[1],2))\n\ndef xy(s1):\n s1 = s1[2:-2]\n s1 = s1.split(',')\n x = float(s1[0])\n y = float(s1[1])\n return x,y\n\n\nclass Map:\n def __init__(self):\n # read from file \n parser = argparse.ArgumentParser(description='Read in Dot file and 1-2 x y points')\n parser.add_argument('f', help='filename')\n parser.add_argument('e', type=float, nargs=2, help='end')\n parser.add_argument('-s','--start', type=float, nargs=2, help='start')\n args = parser.parse_args() # maybe\n filename = args.f\n G = nx.drawing.nx_pydot.read_dot(filename)\n nx.set_edge_attributes(G, values = 1, name = 'weight')\n g = nx.Graph()\n self.virtual = True\n if (args.start is None):\n self.driver = Driver()\n start = self.driver.start()\n self.virtual = False\n else:\n start = args.start\n beststart = 100\n beststartnode = 0\n s1 = \"\\\"\"+str(start)+\"\\\"\"\n bestend = 100\n bestendnode = 0\n e1 = \"\\\"\"+str(args.e)+\"\\\"\"\n for n in list(G.nodes(data=True)):\n g.add_node(n[0],label = n[1]['label'])\n if (subtract(n[1]['label'],s1) < beststart):\n beststart = subtract(n[1]['label'],s1)\n beststartnode = n\n if (subtract(n[1]['label'],e1) < bestend):\n bestend = subtract(n[1]['label'],e1)\n bestendnode = n\n for e in list(G.edges()):\n g.add_edge(e[0],e[1],weight=subtract(G.nodes[e[0]]['label'],G.nodes[e[1]]['label']))\n g.add_node(\"start\",label=s1)\n g.add_edge(\"start\",beststartnode[0],weight=beststart)\n g.add_node(\"end\",label=e1)\n g.add_edge(\"end\",bestendnode[0],weight=bestend)\n G = g\n pos=nx.spring_layout(G) # pos = nx.nx_agraph.graphviz_layout(G)\n nx.draw_networkx(G,pos)\n labels = nx.get_edge_attributes(G,'weight')\n nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)\n #nx.draw(G)\n plt.show()\n self.graph = G\n\n def path(self):\n length, path = nx.single_source_dijkstra(self.graph,\"start\",target=\"end\",weight='weight')\n #print(length)\n #print(path)\n #for i in path:\n #print(self.graph.nodes[i]['label'])\n\n def drive(self):\n length, path = nx.single_source_dijkstra(self.graph,\"start\",target=\"end\",weight='weight')\n #print(length)\n #print(path)\n for i in path:\n #print(self.graph.nodes[i]['label'])\n x,y = xy(self.graph.nodes[i]['label'])\n self.driver.goto(x,y)\n\n\n\n\nm = Map()\nif m.virtual:\n m.path()\nelse:\n m.drive()\n\n\n","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"298712072","text":"import sys, math\n\ndef c(x):\n\t# x -> C(x)\n\t# may replaced by any coding scheme\n\treturn x\n\ndef dc(cx):\n\t# C(x) -> x\n\treturn cx\n\nclass LZ78:\n\tdef __init__(self, s):\n\t\tself.s = s\n\t\tself.encoding = []\n\t\tself.entry_to_index = {(): -1}\n\n\tdef encode(self):\n\t\tcur = 0\n\t\tMAX_LEN = len(self.s)\n\t\tbuffer = []\n\n\t\twhile True:\n\t\t\tif cur == MAX_LEN:\n\t\t\t\tcur_byte = 'EOF'\n\t\t\t\tbuffer.append(cur_byte)\n\t\t\t\ttp = tuple(buffer)\n\t\t\t\tlast_index = self.entry_to_index[tp[:-1]]\n\t\t\t\tself.encoding.append((last_index, c(cur_byte)))\n\t\t\t\tself.entry_to_index[tp] = len(self.encoding) - 1\n\t\t\t\tbreak\n\n\t\t\tcur_byte = self.s[cur]\n\t\t\tbuffer.append(cur_byte)\n\t\t\ttp = tuple(buffer)\n\n\t\t\tif tp in self.entry_to_index:\n\t\t\t\tcur += 1\n\t\t\t\tcontinue\n\n\t\t\tlast_index = self.entry_to_index[tp[:-1]]\n\t\t\tself.encoding.append((last_index, c(cur_byte)))\n\t\t\tself.entry_to_index[tp] = len(self.encoding) - 1\n\t\t\tbuffer.clear()\n\t\t\tcur += 1\n\n\tdef _find_entry(self, last_index, codeword):\n\t\trs = [dc(codeword)]\n\t\tif last_index == -1:\n\t\t\treturn rs\n\n\t\tprev_last_index, prev_codeword = self.encoding[last_index]\n\t\trs += self._find_entry(prev_last_index, prev_codeword)\n\t\treturn rs\n\n\n\tdef decode(self):\n\t\ts = []\n\t\tfor last_index, codeword in self.encoding:\n\t\t\ttmp = self._find_entry(last_index, codeword)\n\t\t\ttmp.reverse()\n\t\t\ts += tmp\n\t\treturn ''.join(s[:-1])\n\n\n\tdef save_file(self, path):\n\t\tfile = open(path, 'w')\n\t\tfor last_index, codeword in self.encoding:\n\t\t\tfile.write(str(last_index) + ' ' + str(codeword) + '\\n')\n\t\tfile.close()\n\n\nif __name__ == '__main__':\n\n\tfile = open('Introduction to Data Compression.txt','r', encoding='UTF-8')\n\ts = file.read()\n\tfile.close()\n\n\tencoder = LZ78(s)\n\tencoder.encode()\n\t\n\tprint('----- Statistics -----')\n\tprint('Original file size:', len(s), 'bytes')\n\tprint('# Encoding entries:', len(encoder.encoding), '\\n')\n\n\tprint('Size after compression (byte base):', len(encoder.encoding) * (math.ceil(math.log(len(encoder.encoding), 256)) + 1), 'bytes')\n\tprint('Compression rate:', len(encoder.encoding) * (math.ceil(math.log(len(encoder.encoding), 256)) + 1) / len(s), '\\n')\n\t\n\tcompression_size = 0\n\tfor _, codeword in encoder.encoding:\n\t\tcompression_size += math.log(len(encoder.encoding), 2) + len(codeword) * 8\n\n\tprint('Size after compression (bit base):', math.ceil(compression_size / 8), 'bytes')\n\tprint('Compression rate:', math.ceil(compression_size / 8) / len(s))\n\n\tnew_s = encoder.decode()\n\n\tfile = open('test_LZ78.txt', 'w',encoding='UTF-8')\n\tfile.write(new_s)\n\tfile.close()\n","sub_path":"LZ78.py","file_name":"LZ78.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"216707499","text":"#input\nnumbers = [1, 6, 8, 1, 2, 1, 5, 6]\n\nn = int(input(\"Enter your number you want to count: \"))\n\n#process\ncount = 0\nfor i in numbers:\n if n == i:\n count += 1\n\n#output\nprint(numbers) \n\nprint(count, \"times\")\n","sub_path":"Session 5/homework/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"368346862","text":"import numpy as np\nfrom lin_eq import *\n\ndef probA1():\n print('Task A.1:\\n ')\n A = matrix(4,3)\n dat = np.random.rand(4,3)*4\n A.view(dat)\n print('Starting matrix')\n A.show()\n Q,R = qr_gs_decomp(A)\n print('Is R upper triangular?')\n R.show()\n print('Is QTQ=1?')\n multiply(trans(Q),Q).show()\n print('Is QR=A?\\nA=')\n A.show()\n print('QR=')\n multiply(Q,R).show()\n\n\ndef probA2():\n print('\\nTask A.2:\\n')\n A = matrix(3,3)\n dat = np.random.rand(3,3)*4\n A.view(dat)\n print('Starting matrix A:')\n A.show()\n b = np.random.rand(3,1)*3\n print('Vector b:')\n for i in range(len(b)):\n print(float(b[i]))\n Q,R = qr_gs_decomp(A)\n print('\\nQ=')\n Q.show()\n print('R=')\n R.show()\n x = qr_gs_solve(Q,R,b)\n print('x=')\n for i in range(len(x)):\n print(float(x[i]))\n print('\\nAx=')\n b2 = multiplyv(A,x)\n for i in range(len(b2)):\n print(float(b2[i]))\n \n\ndef probB():\n print('Task B:\\n')\n A = matrix(4,4)\n dat = np.random.rand(4,4)*5-2.5\n A.view(dat)\n print('Starting matrix A:')\n A.show()\n Q,R = qr_gs_decomp(A)\n B = qr_gs_inverse(Q,R)\n print('Inverse matrix B:')\n B.show()\n print('Is AB=I?')\n print('AB=')\n multiply(A,B).show()\n print('BA=')\n multiply(B,A).show()\n\n","sub_path":"num_meth/linear_eq/problems.py","file_name":"problems.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"547968774","text":"'''\nscript for submitting jobs to runjob on fine-tune scibert.\n'''\nfrom __future__ import print_function\nfrom itertools import product\n\nimport os\nimport argparse\n\ndef main(debug):\n ## hardcord python file name and script folder name.\n COMMAND_TEMPLATE = 'python senBERT_finetune_cond.py ' if not debug else 'python senBERT_finetune_cond.py --debug '\n SCRIPT_FILE = 'scripts'\n file_dir = os.path.dirname(os.path.realpath(__file__))\n\n ## hyperparameters.\n lr_list = [5e-5,2e-5,5e-6]\n epoch_list = [5,10,15]\n cnt = 0\n combinations = list(product(*[lr_list,epoch_list]))\n for comb in combinations:\n comb = list(comb)\n output_dir = f'model-continued_sen_SciBERT_lr-{comb[0]}_epoch-{comb[1]}'\n command = COMMAND_TEMPLATE + (f'--learning_rate={comb[0]} --num_epochs={comb[1]} ')\n\n # print(command)\n os.makedirs(SCRIPT_FILE, exist_ok=True)\n bash_file = os.path.join(file_dir, SCRIPT_FILE, f'{output_dir}.sh')\n dest_file = os.path.join(file_dir, SCRIPT_FILE, f'{output_dir}.out')\n with open( bash_file, 'w' ) as OUT:\n OUT.write('source ~/.bashrc\\n')\n OUT.write('conda activate COVID_torch\\n')\n OUT.write(f'cd {file_dir}\\n')\n OUT.write(command)\n print(command)\n qsub_command = f'qsub -P medifor -q all.q -j y -o {dest_file} -l h_rt=24:00:00,h_vmem=10g,gpu=1 {bash_file}'\n print(qsub_command)\n os.system( qsub_command )\n cnt += 1\n print( 'Submitted #{}'.format(cnt))\n if debug:\n break\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Sci-sentence-bert on CORD_19.')\n parser.add_argument('--debug', action=\"store_true\", help=\"debug mode or not.\")\n args = parser.parse_args()\n main(args.debug)\n","sub_path":"fine_tune/sen_level/sentence-bert-nli-stsb/fine_tune_run_job.py","file_name":"fine_tune_run_job.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"576977280","text":"from scripts import tools\nimport os\n\n#INSTALL\npackages = ['i3',\n 'rxvt-unicode',\n 'rofi',\n 'scrot',\n 'feh',\n 'imagemagick',\n 'volumeicon', \n 'dunst',\n 'thunar',\n 'thunar-volman',\n 'thunar-archive-plugin',\n 'gvfs', \n 'tumbler',\n 'xclip',\n 'redshift']\ntools.pacaur(packages)\n\nwith tools.cd('~/Projects/'):\n tools.git_clone('git@anhtuann.com:anhtuann/useful-scripts.git')\n\n#CONFIGURATION\ntools.mkdir('~/.config/i3')\ntools.mkdir('~/.config/i3status')\ntools.mkdir('~/.config/volumeicon')\ntools.mkdir('~/.config/dunst')\ntools.link_conf('~/Projects/dotfiles/confs/i3_conf','~/.config/i3/config')\ntools.link_conf('~/Projects/dotfiles/confs/i3status_conf', '~/.config/i3status/config')\ntools.link_conf('~/Projects/dotfiles/confs/Xresources_conf', '~/.Xresources')\ntools.link_conf('~/Projects/dotfiles/confs/volumeicon_conf', '~/.config/volumeicon/volumeicon')\ntools.link_conf('~/Projects/dotfiles/confs/dunst_conf','~/.config/dunst/dunstrc')\ntools.link_conf('~/Projects/dotfiles/confs/redshift_conf', '~/.config/redshift.conf')\n\nscreenshots_dir = '~/Pictures/screenshots'\ntools.mkdir(screenshots_dir)\nwallpapers_dir = '~/Pictures/wallpapers'\nwallpaper = '~/Projects/dotfiles/statics/dayofthetentacle.jpg'\ntools.mkdir(wallpapers_dir)\ntools.bash_cmd(['cp', os.path.expanduser(wallpaper), os.path.expanduser(wallpapers_dir)])\ntools.link_conf('~/Projects/dotfiles/confs/fehbg_conf','~/.fehbg')\n\n","sub_path":"scripts/i3wm.py","file_name":"i3wm.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"357439052","text":"#!/usr/bin/env python\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport logging\nfrom datetime import datetime\nfrom point_selectors.unlabeled_selector import unlabeled_selector\nfrom score_functions.search_expected_utility import search_expected_utility\nfrom models.knn_model import knn_model_prob, knn_search\nfrom query_strategies.argmax import argmax\nfrom label_oracles.lookup_oracle import lookup_oracle\nfrom greedy_policy import greedy\nfrom two_step_policy import twoStep\nfrom ens_policy import ens\nfrom mf_ens_policy import mfEns\n\npd.set_option('display.max_columns', 30)\npd.set_option('display.max_rows', None)\nlogging.getLogger().setLevel(logging.INFO)\nlogging.basicConfig(format='%(message)s')\nlogging.info(\"{}\".format(datetime.now()))\n\n #loading Data: has x,y,labels\ndata_df = pd.read_csv(\"./data/toy_problem/toy_example_data_500.csv\",index_col=0)\n\ndef main():\n num_queries = 38\n alpha = 0.1\n k = 50\n visual = False\n repetition = 30\n policies = [\"greedy\", \"ens\", \"2-step\"]\n cost1 = 0.7\n cost2 = 1\n p1 = 0.7\n p2 = 1\n # initialization\n train_df = pd.DataFrame()\n num_points = data_df.shape[0]\n \n #initial point\n positive_labels = data_df.loc[data_df['labels']==1]\n positive_labels_counts = positive_labels['labels'].sum()\n print(positive_labels_counts)\n \n\n # making probabilistic model\n [neighbours, distances] = knn_search(data_df[['x','y']], k)\n similarities = 1/distances\n weights = pd.DataFrame(0, index=range(num_points), columns=range(num_points))\n for i in range(num_points):\n weights.iloc[i,neighbours.iloc[i].tolist()] = similarities.iloc[i].tolist()\n\n pos_count_ens = []\n first_train_point = []\n for idx in range(positive_labels_counts):\n train_df = positive_labels.sample()\n positive_labels.drop(train_df.index, inplace=True)\n logging.info(\"-------------------------------\")\n logging.info(train_df)\n logging.info(\"-------------------------------\")\n first_train_point.append(train_df.index.tolist()[0])\n\n logging.info(\"*************** ENS ***************\")\n pos_count_ens.append(ens(data_df, train_df, weights, alpha, visual, num_queries))\n\n logging.info(\"**********************************************\")\n logging.info(\"first_train_point at iteration {}: {}\".format(idx,\n first_train_point[idx]))\n logging.info(\"pos_count_ens at iteration {}: {}\".format(idx,\n pos_count_ens[idx]))\n logging.info(\"**********************************************\")\n\n logging.info(\"first_train_point : {}\".format(first_train_point))\n logging.info(\"pos_count_ens : {}\".format(pos_count_ens))\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"compare-ens.py","file_name":"compare-ens.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"420062422","text":"#!/usr/bin/env python\n\n\"\"\"Usage:\n pontoon image list [--with-ids]\n pontoon image oses\n pontoon image show \n\nOptions:\n --with-ids Include ids in output. Useful for other software that uses\n Digital Ocean ids for input (like Packer).\n -h --help Show this page.\n\"\"\"\n\nfrom .. import ui\nfrom .. import Command\nfrom .. import ImageException\n\n\nclass ImageCommand(Command):\n\n def list(self):\n available = self.pontoon.image.list()\n ui.message(\"Available images:\")\n for s in available:\n if self.args['--with-ids']:\n ui.message(\" - %-10s %s\" % (str(s.id) + ':', s.name))\n else:\n ui.message(\" - %s\" % s.name)\n return 0\n\n def show(self):\n img = self.pontoon.image.show(self.args[''])\n for k, v in img.__dict__.items():\n ui.message(\" %s: %s\" % (k, v))\n\n def oses(self):\n available = self.pontoon.image.oses()\n ui.message(\"Available Operating Systems:\")\n for o in available:\n ui.message(\" - %s\" % o)\n return 0\n\n\ndef main():\n try:\n cmd = ImageCommand(str(__doc__))\n exit(cmd.run())\n except ImageException as e:\n ui.message(str(e))\n exit(1)\n\n exit(0)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pontoon/cmd/pontoon_image.py","file_name":"pontoon_image.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"15167494","text":"from MyScene import *\r\nimport os\r\n\r\nrunning = True\r\n\r\ndef handle_events():\r\n global running\r\n for event in p.event.get():\r\n\r\n if event.type == p.KEYUP:\r\n if event.key == p.K_RETURN:\r\n running = False\r\n if event.key == p.K_ESCAPE:\r\n running = False\r\n if event.type == p.QUIT:\r\n running = False\r\n return running\r\n\r\nwhile running:\r\n screen.fill((0, 0, 0))\r\n draw()\r\n p.display.flip()\r\n\r\n handle_events()\r\n\r\np.quit()","sub_path":"New folder (3)/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"365583076","text":"# 같은 숫자는 싫어\n# 추천이 많은 풀이\n# list에 마지막값이랑 for i 랑 비교해서 같으면 continue 다르면 append ,,,,good\ndef solution(s):\n a = []\n for i in s:\n if a[-1:] == [i]: continue\n a.append(i)\n return a\n\n# 나의 풀이\n\"\"\"\ndef solution(arr):\n b =[]\n b.append(arr[0])\n for i in range(1,len(arr)):\n if arr[i] != arr[i-1]:\n b.append(arr[i])\n \n return b\n\"\"\"\n","sub_path":"lev1_Overlap.py","file_name":"lev1_Overlap.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"488951674","text":"import natural as f1\ndef POZ_Z_D(mas): # на вход функция получает целое число\n # Семёнов Михаил\n # Знак целого числа\n if mas[0] == '1': # первый символ числа \"-\"\n res = 1\n elif mas[0] == '0': # число равно нулю\n res=0\n else : #число положительное\n res = 2\n return res\n\ndef ABS_Z_N(celoe):\n # Семёнов Михаил\n # Модуль целого числа\n if POZ_Z_D(celoe) == '-':\n return celoe[1:]\n else:\n return celoe\n\ndef SUB_ZZ_Z(list1, list2):\n # Семёнов Михаил\n # Вычитание целых чисел\n if POZ_Z_D(list1) == '+' and POZ_Z_D(list2) == '-':\n return f1.ADD_NN_N(list1, ABS_Z_N(list2))\n elif POZ_Z_D(list1) == '-' and POZ_Z_D(list2) !='-':\n if POZ_Z_D(list2) == '+':\n return ['-'] + f1.ADD_NN_N(ABS_Z_N(list1), ABS_Z_N(list2))\n else :\n return list1\n \n elif f1.COM_NN_D(ABS_Z_N(list1), ABS_Z_N(list2)) == 2 :\n if list2 != 0:\n return f1.SUB_NN_N(ABS_Z_N(list1), ABS_Z_N(list2))\n else :\n return list1\n elif POZ_Z_D(list1) == 0 :\n if POZ_Z_D(list2) == '-':\n return list2[1:]\n elif list2 == [0]:\n return list2\n else :\n return ['-'] + list2\n\n else:\n if POZ_Z_D(list1) == '+':\n return ['-'] + f1.SUB_NN_N(ABS_Z_N(list2), ABS_Z_N(list1))\n else :\n return f1.SUB_NN_N(ABS_Z_N(list2), ABS_Z_N(list1))\n \n \n \n\ndef ADD_ZZ_Z(b1, n1, list1, b2, n2, list2):\n #Дашкин Дамир\n #Сложение целых чисел\n str1 = \"\"\n str2 = \"\"\n for i in range(len(list1)):\n str1 = str1 + str(list1[i])\n for j in range(len(list2)):\n str2 = str2 + str(list2[j])\n if b1 == 1:\n str1 = \"-\" + str1\n num1 = int(str1)\n if b2 == 1:\n str2 = \"-\" + str2\n num2 = int(str2)\n if f1.POZ_Z_D(num1) == 2 and f1.POZ_Z_D(num2) == 2:\n res = f1.ADD_NN_N(num1, num2)\n if f1.POZ_Z_D(num1) == 1 and f1.POZ_Z_D(num2) == 1:\n mod1 = f1.ABS_Z_N(num1)\n mod2 = f1.ABS_Z_N(num2)\n res = f1.ADD_NN_N(mod1, mod2)\n res = f1.MUL_ZM_Z(res)\n else:\n mod1 = f1.ABS_Z_N(num1)\n mod2 = f1.ABS_Z_N(num2)\n if f1.COM_NN_D(mod1, mod2) == 2:\n if f1.POZ_Z_D(num1) == 1:\n res = f1.SUB_NN_N(mod1, mod2)\n res = f1.MUL_ZM_Z(res)\n else:\n res = f1.SUB_NN_N(mod1, mod2)\n if f1.COM_NN_D(mod1, mod2) == 1:\n if f1.POZ_Z_D(num2) == 1:\n res = f1.SUB_NN_N(mod2, mod1)\n res = MUL_ZM_Z(res)\n else:\n res = f1.SUB_NN_N(mod2, mod1)\n else:\n res = 0\n return res\n \ndef MOD_ZZ_Z(b1, n1, list1, b2, n2, list2):\n #Дашкин Дамир\n #Остаток от деления целых чисел\n str1 = \"\"\n str2 = \"\"\n for i in range(len(list1)):\n str1 = str1 + str(list1[i])\n for j in range(len(list2)):\n str2 = str2 + str(list2[j])\n if b1 == 1:\n str1 = \"-\" + str1\n num1 = int(str1)\n if b2 == 1:\n str2 = \"-\" + str2\n num2 = int(str2)\n q = DIV_ZZ_Z(num1, num2)\n k = MUL_ZZ_Z(q, num2)\n res = SUB_ZZ_Z(num1, k)\n return res\n\n\n","sub_path":"integer.py","file_name":"integer.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"489878501","text":"# 如何统计序列中元素出现频度\n\nfrom random import randint\n\n# 方法1\ndata = [randint(0, 20) for _ in range(30)]\n\nc = dict.fromkeys(data, 0)\n\nfor x in data:\n c[x] += 1\n\nprint(c)\n\n# 方法2\nfrom collections import Counter\n\nc2 = Counter(data)\n# 出现频度最高的元素\nprint(c2.most_common(3))\n\n# 方法3\n\nimport re\n\ntxt = open('2-2.txt').read()\n\ntxt = re.split('\\W+', txt)\n\nc3 = Counter(txt)\n\nprint(c3)\nprint(c3.most_common(10))\n","sub_path":"py_file/2-3.py","file_name":"2-3.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"7711846","text":"from image_generator import ImageGenerator\nfrom models import SSD300\nfrom utils.prior_box_creator import PriorBoxCreator\nfrom utils.prior_box_assigner import PriorBoxAssigner\nfrom utils.box_transformer import BoxTransformer\nfrom utils.XML_parser import XMLParser\nfrom utils.utils import split_data\nfrom utils.utils import read_image, resize_image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimage_shape = (300, 300, 3)\nmodel = SSD300(image_shape)\nbox_creator = PriorBoxCreator(model)\nprior_boxes = box_creator.create_boxes()\n\nlayer_scale, box_arg = 0, 780\nbox_coordinates = prior_boxes[layer_scale][box_arg, :, :]\nimage_path = '../images/'\nimage_key = '007040.jpg'\nbox_creator.draw_boxes(image_path + image_key, box_coordinates)\n\ndata_path = '../datasets/VOCdevkit/VOC2007/'\nground_truths = XMLParser(data_path+'Annotations/').get_data()\nprior_box_manager = PriorBoxAssigner(prior_boxes, ground_truths)\nassigned_boxes = prior_box_manager.assign_boxes()\nprior_box_manager.draw_assigned_boxes(image_path, image_shape[0:2], image_key)\nbatch_size = 7\ntrain_keys, validation_keys = split_data(assigned_boxes, training_ratio=.8)\n\nassigned_image_generator = ImageGenerator(assigned_boxes, batch_size,\n image_shape[0:2],\n train_keys, validation_keys,\n data_path+'JPEGImages/')\n\ntransformed_image = next(assigned_image_generator.flow(mode='demo'))[0]\ntransformed_image = np.squeeze(transformed_image[0]).astype('uint8')\noriginal_image = read_image(data_path+'JPEGImages/'+validation_keys[0])\noriginal_image = resize_image(original_image, image_shape[0:2])\nplt.figure(1)\nplt.subplot(121)\nplt.title('Original image')\nplt.imshow(original_image)\nplt.subplot(122)\nplt.title('Transformed image')\nplt.imshow(transformed_image)\nplt.show()\n\nbox_transfomer = BoxTransformer(assigned_boxes, ground_truths)\nencoded_boxes = box_transfomer.encode_boxes()\n\nimage_generator = ImageGenerator(encoded_boxes, batch_size,\n image_shape[0:2],\n train_keys, validation_keys,\n data_path+'JPEGImages/')\n\n\n\n\n\n","sub_path":"src/old_code/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"439420197","text":"import logging\n\n# Kenny loggins\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.WARN)\nlog_formatter = logging.Formatter('%(asctime)s {%(levelname)s}: %(message)s')\n\n# console log\nstream_handler = logging.StreamHandler()\nstream_handler.setLevel(logging.WARN)\nstream_handler.setFormatter(log_formatter)\n\n# set it all up\nlog.addHandler(stream_handler)","sub_path":"576/4/lib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"368039741","text":"# henrietta.natalia.main.py\nfrom _spy.vitollino.main import Cena, Texto, Elemento, STYLE\nfrom _spy.vitollino.main import INVENTARIO as inv\n\nfrom soraya.main import Bloco\nfrom browser import alert\n\n\nSTYLE[\"width\"] = 600\nSTYLE[\"height\"] = \"600px\"\n\nTEMPLO = \"http://3.bp.blogspot.com/-UsnGAupu3XM/VHvU2M5BHUI/AAAAAAAAdCE/UKbq_5dTM7k/s1600/IMG_6098.JPG\"\nCORREDOR = \"http://i.muyinteresante.com.mx/dam/sociedad/historia/17/03/8/rabbit-hole-700-year-old-secret-knights-templar-cave-network-8-58c006f4a30df__880.jpg.imgo.jpg\"\nOCEANO = \"https://freeclipartspot.com//storage/upload/ocean-clip-art/ocean-clip-art-51.jpg\"\nALGA = \"https://i.pinimg.com/originals/70/68/5f/70685fa634c3bb82a8eb5771a0a869ed.png\"\nCONCHA = \"http://www.mat.uc.pt/~picado/conchas/imagens/p10.png\"\nAQUARIO = \"https://www.tenstickers.pt/autocolantes-decorativos/img/preview/autocolante-decorativo-infantil-peixe-aquario-3634.png\"\nTRANSPARENTE = \"http://1.bp.blogspot.com/-eK24sreQNsg/Uvy1AT5iVSI/AAAAAAAAAGo/TRHh_nkqhVY/s1600/fundo-blog.png\"\nFLORESTA = \"https://st.depositphotos.com/1718692/2958/i/950/depositphotos_29580473-stock-photo-stones-and-tree-roots-in.jpg\"\nOCULOS = \"https://www.dvosky.com/media/catalog/product/cache/1/image/1200x1200/9df78eab33525d08d6e5fb8d27136e95/d/v/dvsk1003-preto-prata.png\"\nhistoria = \"eu friccionei a pedra e gerou fogo\"\nverbos_altos = [\"ger\", \"atrit\", \"roç\", \"direcion\", \"friccion\", \"elev\", \"decid\", \"faz\", \"concl\", \"us\",\n \"remanej\" ,\"erg\", \"suspend\", \"ate\", \"esfreg\", \"trisc\"] \nverbos_altos == 3\nverbos_medios = [\"bat\", \"gir\", \"colo\", \"manipul\", \"mov\", \"surg\", \"peg\", \"levant\", \"bat\"]\nverbos_medios == 2\n\nverbos_fracos = [\"rod\", \"bot\", \"sub\", \"pux\", \"form\", \"tent\", \"cli\", \"abaix\", \"mex\", \"encost\", \"rel\"] \nverbos_fracos == 1\nverbos = [ (3,verbos_altos),(2,verbos_medios),(1,verbos_fracos)]\n\nclass Estados:\n def __init__(self):\n floresta = Cena(FLORESTA)\n self.fantasma = Cena()\n floresta.vai()\n self.galhos = gag = Elemento(img=TRANSPARENTE,tit=\"galhos\", style=dict(\n left=28, top=130, width=60, height=\"60px\"))\n gag.entra(floresta)\n gag.vai = self.fogo_galhos\n self.pedra = aqua = Elemento(img=TRANSPARENTE,tit=\"pedras\", style=dict(\n left=500, top=300, width=60, height=\"60px\")) \n aqua.entra(floresta)\n aqua.vai = self.fogo_pedra\n self.oculos = ocu = Elemento(img=OCULOS, tit=\"OCULOS\", style=dict(\n left=28, top=130, width=60, height=\"60px\"))\n inv.bota(self.oculos)\n ocu.vai = self.fogo_oculos\n \n def fogo_pedra(self, *_):\n resposta=input(\"Voce fez fogo usando pedras! Como vc fez?\")\n self.pedra.entra(self.fantasma)\n alert(avaliar(resposta))\n def fogo_galhos(self, *_):\n resposta=input(\"voce fez fogo usando galhos! como vc fez?\")\n self.galhos.entra(self.fantasma)\n alert(avaliar(respostas))\n def fogo_oculos(self, *_):\n respostas=input(\"voce fez fogo usando oculos! como vc fez?\")\n self.oculos.entra(self.fantasma)\n alert(avaliar(respostas))\n\ndef avaliar(you):\n pontuacao = 0\n for peso,verbo in verbos:\n for prefixo in verbo:\n pontuacao += peso if prefixo in you else 0 \n return pontuacao\n \n\n \n \n \n\n\nif __name__ == \"__main__\":\n Estados()\n \n \n \n ","sub_path":"natalia/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"161254689","text":"def fn():\r\n n = int(input().strip())\r\n b = []\r\n q = []\r\n for i in range(1,10):\r\n q.append(i)\r\n while len(q) != 0:\r\n tmp = q.pop(0)\r\n if tmp > n:\r\n continue\r\n b.append(tmp)\r\n left = tmp % 10\r\n if left > 0:\r\n q.append(tmp * 10 + (left-1) )\r\n if left < 9:\r\n q.append(tmp * 10 + (left+1) )\r\n b.sort()\r\n print(\"0 \",end = '')\r\n for i in b:\r\n print(i,\"\",end = '')\r\n print()\r\n\r\nfor _ in range(int(input().strip())):\r\n fn()","sub_path":"python/jumping_numbers.py","file_name":"jumping_numbers.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"37244204","text":"from bs4 import BeautifulSoup\nimport requests\nimport time\nimport json\nimport sys\nimport os\nimport requests.packages.urllib3.util.ssl_\n#print(requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS)\nrequests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS = 'ALL'\n\nSITE = 'https://www.ptt.cc'\nSITE_SEED = '/bbs/'\nSITE_FOOT = '/index.html'\nBOARD = str(sys.argv[1])\n# full url\nSITE_URL = SITE + SITE_SEED + BOARD + SITE_FOOT\n\ndef get_page_number():\n html_text = session.get(SITE_URL, cookies={'over18': '1'}, verify = True)\n soup = BeautifulSoup(html_text.text.encode('utf-8'), 'html.parser')\n class_found = soup.find_all('a', {'class': 'btn wide'})\n #print (class_found[1]['href'])\n page_link = class_found[1]['href']\n start_index = page_link.find('index') + 5\n end_index = page_link.find('.html')\n num = page_link[start_index : end_index]\n page_number = int(num) + 1\n\n return page_number\n\ndef get_link(start_page, end_page):\n link_file = open(BOARD + '_article_links.txt', 'w')\n full = ''\n for index in range(start_page, end_page+1):\n page_url = SITE + SITE_SEED + BOARD + '/index' + str(index) + '.html'\n html_text = session.get(page_url, cookies={'over18': '1'}, verify = True)\n soup = BeautifulSoup(html_text.text.encode('utf-8'), 'html.parser')\n link_list = soup.find_all('div', {'class': 'title'})\n \n for link in link_list:\n if link.find('a') is not None:\n article_link = link.find('a')['href']\n link_file.write(SITE + article_link + '\\n')\n\n link_file.close()\n\ndef get_article():\n article_list = []\n with open(BOARD + '_article_links.txt') as fp:\n for line in fp:\n html_text = session.get(line.strip('\\n'), cookies={'over18': '1'}, verify = True)\n soup = BeautifulSoup(html_text.text.encode('utf-8'), 'html.parser')\n article_id = line.split('/')[5]\n end_index = article_id.find('.html')\n article_id = article_id[0 : end_index]\n print (article_id)\n\n author = ''\n title = ''\n post_time = ''\n article_meta = soup.find_all('div', {'class': 'article-metaline'})\n if article_meta:\n author = article_meta[0].find('span', {'class': 'article-meta-value'}).string if article_meta[0].find('span', {'class': 'article-meta-value'}) else author\n title = article_meta[1].find('span', {'class': 'article-meta-value'}).string if article_meta[1].find('span', {'class': 'article-meta-value'}) else title\n post_time = article_meta[2].find('span', {'class': 'article-meta-value'}).string if article_meta[2].find('span', {'class': 'article-meta-value'}) else post_time\n\n push_list = []\n push_num = 0\n boo_num = 0\n arrow_num = 0\n main_content = soup.find('div', {'id': 'main-content'})\n # find all pushes\n pushes = main_content.find_all('div', {'class': 'push'})\n for item in pushes:\n if not item.find('span', {'class': 'push-tag'}):\n continue\n push_tag = item.find('span', {'class': 'push-tag'}).string.strip(' ')\n push_user = item.find('span', {'class': 'push-userid'}).string\n push_content = item.find('span', {'class': 'push-content'}).getText()\n # remove ': '\n push_content = push_content[2:]\n # print (push_content)\n push_time = item.find('span', {'class': 'push-ipdatetime'}).string.strip('\\n')\n push_list.append({'push_tag': push_tag, 'push_user': push_user, 'push_content': push_content, 'push_time': push_time})\n if push_tag == '推':\n push_num += 1\n elif push_tag == '噓':\n boo_num += 1\n else:\n arrow_num += 1\n \n # remove all meta tags\n removed_elements = main_content.find_all('div', {'class': 'article-metaline'})\n for element in removed_elements:\n element.decompose()\n\n main_content.find('div', {'class': 'article-metaline-right'}).decompose()\n\n removed_elements = main_content.find_all('span', {'class': 'f2'})\n for element in removed_elements:\n element.decompose()\n\n # remove all pushes\n for item in pushes:\n item.decompose()\n\n # get article main content\n main_content = main_content.getText()\n\n article = {\n 'Board': BOARD,\n 'Title': title,\n 'Author': author,\n 'Post_time': post_time,\n 'Content': main_content,\n 'Push_num': push_num,\n 'Boo_num': boo_num,\n 'Arrow_num': arrow_num,\n 'Pushes': push_list\n }\n\n article_list.append(article)\n \n fp.close()\n\n file_name = BOARD + '_article_' + str(time.strftime(\"%Y%m%d\")) + '.json'\n article_file = open(file_name, 'w', encoding='utf-8')\n article_file.write(json.dumps(article_list, indent=4, ensure_ascii=False))\n article_file.close()\n\n\nif __name__ == \"__main__\":\n session = requests.session()\n # get total page number\n total_page = get_page_number()\n # print (total_page)\n\n start_page = int(sys.argv[2])\n if int(sys.argv[3]) == -1:\n end_page = total_page\n else:\n end_page = int(sys.argv[3])\n\n # find and store all article links\n get_link(start_page, end_page)\n\n # get and store all article content\n get_article()\n\n\n\n","sub_path":"ptt_crawler.py","file_name":"ptt_crawler.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"490788577","text":"import argparse\nimport torch\n\n\ndef str2bool(v):\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")\n\n\n# def save_load_dict(save_path, args=None, overwrite=False, verbose=True):\n# # saves a dictionary, 'args', as a json file. Or loads if it exists.\n\n# if os.path.exists(save_path) and not overwrite:\n# warnings.warn(\n# \"args file exists and overwrite is not set to True. Using existing args file.\"\n# )\n\n# # load argsions file\n# with open(save_path, \"rb\") as f:\n# args = json.load(f)\n# else:\n# # make a copy if the args file exists\n# if os.path.exists(save_path):\n# the_time = datetime.datetime.now().strftime(\"%Y-%m-%d-%H:%M:%S\")\n# shutil.copyfile(save_path, \"{0}_{1}\".format(save_path, the_time))\n\n# with open(save_path, \"w\") as f:\n# json.dump(args, f, indent=4, sort_keys=True)\n\n# return args\n\n\ndef get_activation(activation):\n if activation is None or activation.lower() == \"none\":\n return torch.nn.Sequential()\n\n elif activation.lower() == \"relu\":\n return torch.nn.ReLU(inplace=True)\n\n elif activation.lower() == \"prelu\":\n return torch.nn.PReLU()\n\n elif activation.lower() == \"sigmoid\":\n return torch.nn.Sigmoid()\n\n elif activation.lower() == \"leakyrelu\":\n return torch.nn.LeakyReLU(0.2, inplace=True)\n\n\ndef load_state(model, optimizer, path, gpu_id):\n\n checkpoint = torch.load(path)\n\n model.load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n\n\ndef save_state(model, optimizer, path, gpu_id):\n\n checkpoint = {\"model\": model.state_dict(), \"optimizer\": optimizer.state_dict()}\n torch.save(checkpoint, path)\n","sub_path":"geneselection/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"641300112","text":"#模块基础\nimport numpy\n#创建一维数组\n#numpy.array([元素1,元素2])\nx=numpy.array([\"22\",\"19\",\"9\",\"8\"])\n#创建二维数组\n#numpy.array([[元素1,元素2],[元素1,元素2],[元素1,元素2]])\ny=numpy.array([[\"hdsafg\",\"asjhgf\",\"878392\"],[\"jhyfsdaeku\",\"jhgsda\"],[\"素1\",\"元素2\"]])\n#排序\nx.sort()\n#print(x)\n'''\n注意:\n#如果取最大值和最小值,元素必须是数字\n#同一维度各个数组之间,长度一样\n'''\n#会出错#y=numpy.array([[\"3\",\"10\",\"2\"],[\"9\",\"7\"],[\"5\",\"9\"]])\n#会出错#y=numpy.array([[3,10,2],[9,17],[5,9]])\ny=numpy.array([[3,10,2],[9,17,8],[5,9,1]])\nxm=y.max()\n#print(xm)\nymin=y.min()\n#print(ymin)\n#切片\n#数组[起始下标:最终下标+1]\nx1=x[1:3]\n#print(x1)\nx2=x[:2]\n#print(x2)\nx3=x[1:]\n#print(x3)\n\n#pandas\nimport pandas as pda\n'''\nSeries:序列\nDataFrame:\n'''\na=pda.Series([9,2,8,1])\nb=pda.DataFrame([[\"3\",\"10\",\"2\"],[\"9\",\"7\",\"4\"],[\"5\",\"9\",\"1\"]])\n#取头部数据,默认前五行\nb.head()\nb.head(2)\n#取尾部数据,默认后五行\nb.tail()\nb.tail(2)\n#显示数据的基本统计信息\nb.describe()\n#转置\nb.T\n","sub_path":"模块使用基础.py","file_name":"模块使用基础.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"61719741","text":"\"\"\"Range Sum Query - Immutable\nGiven an integer array nums, handle multiple queries of the following type:\n\n1. Calculate the sum of the elements of nums between indices left and right inclusive where\n left <= right.\n\nImplement the NumArray class:\n\n* NumArray(int[] nums) Initializes the object with the integer array nums.\n* int sumRange(int left, int right) Returns the sum of the elements of nums between indices\n left and right inclusive (i.e. nums[left] + nums[left + 1] + ... + nums[right]).\n\nExample 1:\n Input:\n [\"NumArray\", \"sumRange\", \"sumRange\", \"sumRange\"]\n [[[-2, 0, 3, -5, 2, -1]], [0, 2], [2, 5], [0, 5]]\n Output:\n [null, 1, -1, -3]\n\nExplanation:\n NumArray numArray = new NumArray([-2, 0, 3, -5, 2, -1]);\n numArray.sumRange(0, 2); // return (-2) + 0 + 3 = 1\n numArray.sumRange(2, 5); // return 3 + (-5) + 2 + (-1) = -1\n numArray.sumRange(0, 5); // return (-2) + 0 + 3 + (-5) + 2 + (-1) = -3\n\nConstraints:\n * 1 <= nums.length <= 10^4\n * -10^5 <= nums[i] <= 105\n * 0 <= left <= right < nums.length\n * At most 10^4 calls will be made to sumRange.\n\"\"\"\n\nfrom typing import List\n\n\nclass NumArray:\n\n def __init__(self, nums: List[int]):\n self.prefix_sum = nums.copy()\n for i in range(1, len(nums)):\n self.prefix_sum[i] += self.prefix_sum[i-1]\n\n def sumRange(self, left: int, right: int) -> int:\n return self.get(right) - self.get(left - 1)\n\n def get(self, i):\n if i < 0 or i >= len(self.prefix_sum):\n return 0\n return self.prefix_sum[i]\n\n\nif __name__ == '__main__':\n cases = [\n ([-2, 0, 3, -5, 2, -1], [[0, 2], [2, 5], [0, 5]], [1, -1, -3])\n ]\n\n for case in cases:\n num_array = NumArray(case[0])\n for item in zip(case[1], case[2]):\n assert num_array.sumRange(*item[0]) == item[1]\n\n","sub_path":"leetcode/303_range_sum_query_immutable.py","file_name":"303_range_sum_query_immutable.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"577116312","text":"# https://leetcode.com/problems/cheapest-flights-within-k-stops/description/\n# Bellman ford\ndef findCheapestPrice(n, flights, src, dst, K):\n INF = 10000\n mn = [INF]*n\n mn[src] = 0\n \n for k in range(K+1):\n newmn = mn[:]\n for flight in flights:\n a = flight[0]\n b = flight[1]\n cost = flight[2]\n\n newmn[b] = min(newmn[b],mn[a]+cost)\n mn=newmn\n if mn[dst] != INF:\n return mn[dst]\n return -1\n\nn = 3 \nedges = [[0,1,100],[1,2,100],[0,2,500]]\nsrc = 0 \ndst = 2 \nk = 0\nassert(findCheapestPrice(n, edges, src, dst, k) == 500)","sub_path":"tests/bellman_ford_test.py","file_name":"bellman_ford_test.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"583200492","text":"import numpy as np \n\n\ncorMat = np.load(open(\"corSparseMatrix\"))\n\ndef gmlfilecreator(i):\t\n\t\n\tfile1 = open(\"graphFile%d.gml\"%i, \"a\") \n\t#you have to start with this\n\tfile1.write(\"graph\\n[\")\n\t#now based on the number of nodes you create the nodes in the graph\n\n\tfor size in range(357):\n\t\tfile1.write(\"\\n node \\n [\\n id %d\\n ]\"%size)\n\tfor source in range(357):\n\t\tfor target in corMat[i][source]:\n\t\t\tfile1.write(\"\\n edge \\n [\\n source %d\\n target %d\\n ]\"%(source, target)) \t\n\tfile1.write(\"\\n]\")\n","sub_path":"createGraphFormat.py","file_name":"createGraphFormat.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"507515743","text":"from __future__ import annotations\n\nfrom asyncio import CancelledError\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Any, List, Optional, cast\n\nfrom ....jsonrpc2.protocol import rpc_method\nfrom ....utils.async_event import async_tasking_event\nfrom ....utils.logging import LoggingDescriptor\nfrom ..has_extend_capabilities import HasExtendCapabilities\nfrom ..language import HasLanguageId, HasRetriggerCharacters, HasTriggerCharacters\nfrom ..lsp_types import (\n Position,\n ServerCapabilities,\n SignatureHelp,\n SignatureHelpContext,\n SignatureHelpOptions,\n SignatureHelpParams,\n TextDocumentIdentifier,\n)\nfrom ..text_document import TextDocument\n\nif TYPE_CHECKING:\n from ..protocol import LanguageServerProtocol\n\nfrom .protocol_part import LanguageServerProtocolPart\n\n\nclass SignatureHelpProtocolPart(LanguageServerProtocolPart, HasExtendCapabilities):\n\n _logger = LoggingDescriptor()\n\n def __init__(self, parent: LanguageServerProtocol) -> None:\n super().__init__(parent)\n\n @async_tasking_event\n async def collect(\n sender, document: TextDocument, position: Position, context: Optional[SignatureHelpContext] = None\n ) -> Optional[SignatureHelp]:\n ...\n\n def extend_capabilities(self, capabilities: ServerCapabilities) -> None:\n if len(self.collect):\n trigger_chars = [\n k\n for k in chain(\n *[\n cast(HasTriggerCharacters, e).__trigger_characters__\n for e in self.collect\n if isinstance(e, HasTriggerCharacters)\n ]\n )\n ]\n\n retrigger_chars = [\n k\n for k in chain(\n *[\n cast(HasRetriggerCharacters, e).__retrigger_characters__\n for e in self.collect\n if isinstance(e, HasRetriggerCharacters)\n ]\n )\n ]\n\n capabilities.signature_help_provider = SignatureHelpOptions(\n trigger_characters=trigger_chars if trigger_chars else None,\n retrigger_characters=retrigger_chars if retrigger_chars else None,\n )\n\n @rpc_method(name=\"textDocument/signatureHelp\", param_type=SignatureHelpParams)\n async def _text_document_signature_help(\n self,\n text_document: TextDocumentIdentifier,\n position: Position,\n context: Optional[SignatureHelpContext] = None,\n *args: Any,\n **kwargs: Any,\n ) -> Optional[SignatureHelp]:\n\n results: List[SignatureHelp] = []\n document = self.parent.documents[text_document.uri]\n for result in await self.collect(\n self,\n document,\n position,\n context,\n callback_filter=lambda c: not isinstance(c, HasLanguageId) or c.__language_id__ == document.language_id,\n ):\n if isinstance(result, BaseException):\n if not isinstance(result, CancelledError):\n self._logger.exception(result, exc_info=result)\n else:\n if result is not None:\n results.append(result)\n\n if len(results) > 0:\n # TODO: can we combine signature help results?\n if results[-1].signatures:\n return results[-1]\n\n return None\n","sub_path":"robotcode/language_server/common/parts/signature_help.py","file_name":"signature_help.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"538538259","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis file includes all the tests to test the api functionallity.\r\n\r\n\"\"\"\r\n\r\nfrom django.test import TestCase\r\nfrom django.test.client import Client\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.contrib.auth.models import User\r\nfrom django.contrib.gis.geos import GEOSGeometry\r\nfrom django.core import mail\r\nfrom django.utils import simplejson as json\r\nfrom models import EmailConfirmation\r\nfrom models import EmailAddress\r\n\r\n \r\nclass EmailTest(TestCase):\r\n\r\n def setUp(self):\r\n self.client = Client()\r\n self.user = None\r\n \r\n user1 = User.objects.create_user('test_user','', 'test_pass')\r\n self.client.login(username='test_user', password='test_pass')\r\n\r\n\r\n\r\n \r\n def test_email_update(self):\r\n \"\"\"\r\n Test registering new email\r\n \"\"\"\r\n \r\n #case 1 - no email sent\r\n post_content = {\"value\" : \"\"}\r\n \r\n response = self.client.post(reverse('api_manage_email'),\r\n json.dumps(post_content),\r\n content_type='application/json')\r\n \r\n self.assertEquals(response.status_code,\r\n 400,\r\n \"trying to set empty email address\")\r\n\r\n def test_email_update_with_data(self):\r\n post_content = {\"value\" : \"test@aalto.fi\"}\r\n response = self.client.post(reverse('api_manage_email'),\r\n json.dumps(post_content),\r\n HTTP_X_REQUESTED_WITH='XMLHttpRequest',\r\n content_type='application/json')\r\n\r\n #Test if confirmation email is sent\r\n self.assertEquals(len(mail.outbox), 1, \"Confirmation email not sent\")\r\n\r\n #confirm the email\r\n\r\n emailAddress = EmailAddress.objects.get(email = \"test@aalto.fi\")\r\n emailConfirmation = EmailConfirmation.objects.get(email_address = emailAddress)\r\n\r\n response = self.client.get(reverse('api_emailconfirmation', args=[emailConfirmation.confirmation_key]))\r\n self.assertEquals(response.status_code,\r\n 200,\r\n \"the email address confirmation url is not working\")\r\n response = self.client.get(reverse('api_manage_email'))\r\n responsejson = json.loads(response.content)\r\n self.assertEquals(responsejson.get('email'),\r\n \"test@aalto.fi\",\r\n \"The email obtain using get is not ok\")\r\n\r\n #delete the email and test again the GET\r\n response = self.client.delete(reverse('api_manage_email'))\r\n self.assertEquals(response.status_code,\r\n 200,\r\n \"the email address delete not working\")\r\n response = json.loads(self.client.get(reverse('api_manage_email')).content)\r\n self.assertEquals(response.get('email'), \"\", \"The email obtain using GET after delete is not an empty string\")\r\n \r\n def test_email_fail_for_registration(self):\r\n \r\n user2 = User.objects.create_user('cristian1001','', 'cristi')\r\n EmailAddress.objects.add_email(user2, 'test2@test.com')\r\n user3 = User.objects.create_user('cristian1002','', 'cristi')\r\n EmailAddress.objects.add_email(user3, 'test3@test.com')\r\n \r\n post_content = {\"value\" : \"test2@test.com\", \"registration\" : True}\r\n response = self.client.post(reverse('api_manage_email'),\r\n json.dumps(post_content),\r\n content_type='application/json')\r\n self.assertEquals(response.status_code,\r\n 400,\r\n \"There email address has to be unique. Duplicates has been aceepted!\")\r\n \r\n user_list = User.objects.filter(email=\"test2@test.com\")\r\n \r\n self.assertEquals(len(user_list),\r\n 0,\r\n \"The user that tried to register with a duplicate email was not deleted!\")\r\n","sub_path":"email_rest/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"107792977","text":"import calculos\nimport time\nimport turtle\n\n#Travando\ndef grafico_percurso(lst):\n fturt = turtle.Turtle()\n fturt.hideturtle()\n fturt.up()\n cord_x, cord_y = ts.window_width()/2 - 100, ts.window_height()/-2 + 230\n fturt.goto(cord_x, cord_y)\n fturt.down()\n fturt.color(\"green\")\n fturt.goto(cord_x, cord_y+10)\n fturt.color(\"black\")\n fturt.write(0)\n fturt.color(\"green\")\n fturt.goto(cord_x, cord_y-10)\n fturt.color(\"black\")\n distancia_atual = 0\n i = 1\n km = 0\n while i != len(latitudes):\n distancia_atual += calculos.distancia(latitudes[i-1:i+1], longitudes[i-1:i+1])\n if distancia_atual >= 1:\n fturt.goto(cord_x+lst[i][0], cord_y+lst[i][1]+5)\n km += 1\n fturt.write(km)\n fturt.goto(cord_x+lst[i][0], cord_y+lst[i][1]-5)\n fturt.up()\n fturt.goto(cord_x+lst[i][0], cord_y+lst[i][1])\n fturt.down()\n distancia_atual = 0\n fturt.goto(cord_x+lst[i][0], cord_y+lst[i][1])\n i += 1\n fturt.color(\"red\")\n fturt.goto(cord_x+lst[-1][0], cord_y+lst[-1][1]+10)\n fturt.goto(cord_x+lst[-1][0], cord_y+lst[-1][1]-10)\n fturt.down()\n\ndef juncao_graficos(lst1, lst2, lst3):\n global pos_y_graf\n listas = [lst1, lst2, lst3]\n maximos = [max(lst1), max(lst2), max(lst3)]\n indice = 0\n for i in range(len(maximos)):\n if maximos[i] > maximos[indice]:\n indice = i\n escalas = calculos.escala(len(listas[indice]), max(listas[indice]))\n i = 0\n pen.color(\"blue\")\n while i < len(listas):\n if i != indice:\n grafico(listas[i], False, escalas)\n pen.color(\"red\")\n i += 1\n pen.color(\"black\")\n grafico(listas[indice])\n nome_listas = [\"ritmos\", \"altitudes\", \"bpms\"]\n pen.goto(ts.window_width()/-2 + 30, pos_y_graf+210)\n pen.down()\n pen.goto(ts.window_width()/-2 + 45, pos_y_graf+210)\n pen.up()\n pen.goto(ts.window_width()/-2 + 50, pos_y_graf+202)\n pen.write(nome_listas[indice])\n pen.color(\"blue\")\n pen.goto(ts.window_width()/-2 + 30, pos_y_graf+195)\n pen.down()\n pen.goto(ts.window_width()/-2 + 45, pos_y_graf+195)\n pen.color(\"black\")\n pen.up()\n pen.goto(ts.window_width()/-2 + 50, pos_y_graf+187)\n nome_listas.pop(indice)\n pen.write(nome_listas[0])\n pen.up()\n pen.goto(ts.window_width()/-2 + 30, pos_y_graf+180)\n pen.down()\n pen.color(\"red\")\n pen.goto(ts.window_width()/-2 + 45, pos_y_graf+180)\n pen.up()\n pen.goto(ts.window_width()/-2 + 50, pos_y_graf+172)\n pen.color(\"black\")\n pen.write(nome_listas[1])\n pen.up()\n pos_y_graf -= 50\n\ndef grafico(lst, avancar=True, escalas=[]):\n global pos_y_graf\n pos_x = ts.window_width()/-2 + 30\n\n if avancar:\n pen.goto(pos_x, pos_y_graf-10)\n pen.down()\n pen.goto(pos_x, pos_y_graf+210)\n pen.up()\n pen.goto(pos_x-10, pos_y_graf)\n pen.down()\n pen.goto(pos_x+320, pos_y_graf)\n pen.up()\n calculos.pontos_x(len(lst), pos_x, pos_y_graf, pen)\n calculos.pontos_y(max(lst), pos_x, pos_y_graf, pen)\n escs = calculos.escala(len(lst), max(lst))\n else:\n escs = escalas\n\n pen.goto(pos_x, pos_y_graf+lst[0]*escs[1])\n pen.down()\n i = 1 \n for x in lst[1:]:\n pen.goto(pos_x+i*escs[0], pos_y_graf+x*escs[1])\n i += 1\n pen.up()\n\n if avancar:\n pos_y_graf -= 250\n print(pos_y_graf)\n\ndef escrever(str, turt, jump=20):\n global pos_y\n turt.goto((ts.window_width()/2-300, pos_y))\n turt.write(str, font=(\"Arial\", 14, \"normal\"))\n pos_y -= jump \n\ndef infos_tempo():\n Tturt = turtle.Turtle()\n Tturt.hideturtle()\n Tturt.up()\n escrever(\"Data: {}\".format(time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(tempos[0]))), Tturt)\n escrever(\"Duração: {}\".format(time.strftime(\"%H:%M:%S\", time.gmtime(calculos.duracao(tempos)))), Tturt, 40)\n return Tturt\n\ndef infos_resumo_corrida():\n Tturt = turtle.Turtle()\n Tturt.hideturtle()\n Tturt.up()\n distancia_total, tempo_total, alt_max, alt_min = calculos.resumo_corrida(dados, tempos, latitudes, longitudes, altitudes, False)\n escrever(\"Distância total percorrida: {0:.2f} km\".format(distancia_total), Tturt)\n escrever(\"Tempo total: {}\".format(time.strftime(\"%H:%M:%S\", time.gmtime(tempo_total))), Tturt)\n escrever(\"Ritmo: {0:.0f} mins/km\".format(calculos.ritmo(registros, latitudes, longitudes)), Tturt)\n if max(bpms) == 0:\n bpm_max = \"Indisponível\"\n bpm_min = \"Indisponível\"\n else:\n bpm_max = max(bpms)\n bpm_min = calculos.min_bpms(bpms)\n escrever(\"Bpm máxima {0:.0f}\".format(bpm_max), Tturt)\n escrever(\"Bpm mínima: {0:.0f}\".format(bpm_min), Tturt)\n escrever(\"Cadência: {0:.0f} passos/min\".format(calculos.cadencia(registros, passos)), Tturt)\n escrever(\"Altitude máxima: {0:.6f}\".format(alt_max), Tturt)\n escrever(\"Altitude mínima: {0:.6f}\".format(alt_min), Tturt, 40)\n return Tturt\n\ndef infos_km(turt):\n i = 1\n inicio = 0\n distancia_atual = 0\n while i != len(latitudes):\n distancia_atual += calculos.distancia(latitudes[i-1:i+1], longitudes[i-1:i+1])\n if distancia_atual >= 1:\n dif_altitudes = altitudes[i] - altitudes[inicio]\n tempo = registros[i] - registros[inicio]\n escrever(time.strftime(\"%H:%M:%S\", time.gmtime(tempo)), turt)\n escrever(\"Ritmo: {} mins/km\".format(tempo//distancia_atual), turt)\n escrever(\"Cadência: {} passos/min\".format(calculos.cadencia(registros[inicio:i+1], passos[inicio:i+1])), turt)\n escrever(\"Altitude: {}\".format(dif_altitudes), turt)\n escrever(\"Média ponderada de BPM: {}\".format(calculos.media_pond(bpms[inicio:i+1])), turt, 40)\n distancia_atual = 0\n inicio = i\n i += 1\n\ndados = []\n\narquivo = input(\"Digite o nome do arquivo: \")\n\nwith open(arquivo, \"r\") as infos:\n dados_atuais = ()\n for info in infos:\n if info[0] == \"#\":\n dados.append(dados_atuais)\n dados_atuais = ()\n else:\n dados_atuais = dados_atuais + (info[:len(info)-1],)\n\nregistros = calculos.find_dados(dados, \"r\")\nlongitudes = calculos.find_dados(dados, \"n\")\nlatitudes = calculos.find_dados(dados, \"l\")\naltitudes = calculos.find_dados(dados, \"a\")\nbpms = calculos.find_dados(dados, \"b\")\npassos = calculos.find_dados(dados, \"p\")\ntempos = [int(x) for x in str(dados[0][0]).split()]\nritmos = calculos.ritmos(registros, latitudes, longitudes)\n\nduracao = tempos[1] - tempos[0]\n\n#janela\nwindow = turtle.Screen()\nwindow.setup(width = 1.0, height = 1.0)\nts = turtle.getscreen()\npen = turtle.Turtle()\npen.hideturtle()\nwindow.delay(0)\npen.up()\n\npos_y = ts.window_height()/2 - 30\npos_y_graf = ts.window_height()/2 - 250 \n\n\"\"\"e = 0\nwhile e != 1:\n actions = []\n e = 1\"\"\"\n\ninfos_tempo()\ninfos_resumo_corrida()\ngrafico_percurso(calculos.percurso(latitudes, longitudes))\ngrafico(altitudes)\ngrafico(bpms)\njuncao_graficos(ritmos, altitudes, bpms)\n#infos_km(pen)\n\nwindow.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"548657865","text":"import utils\nimport numpy as np\nimport tensorflow as tf\n\n\nclass PolicyEstimator():\n \"\"\"\n Policy Function Neural Net. \n \"\"\"\n def __init__(self, state_dim, action_dim=6, learning_rate=0.001, scope=\"policy_estimator\"):\n with tf.variable_scope(scope):\n self.state = tf.placeholder(dtype=tf.float32, shape=[state_dim,], name=\"state\")\n self.action = tf.placeholder(dtype=tf.int32, name=\"action\")\n self.target = tf.placeholder(dtype=tf.float32, name=\"target\")\n\n self.hidden_layer_1 = tf.layers.dense(\n inputs=tf.expand_dims(self.state, 0),\n units=128,\n activation=tf.nn.relu\n )\n self.hidden_layer_2 = tf.layers.dense(\n inputs=tf.expand_dims(self.hidden_layer_1, 0),\n units=128,\n activation=tf.nn.relu\n )\n self.hidden_layer_3 = tf.layers.dense(\n inputs=tf.expand_dims(self.hidden_layer_2, 0),\n units=128,\n activation=tf.nn.relu\n )\n self.output_layer = tf.contrib.layers.fully_connected(\n inputs=self.hidden_layer_3,\n num_outputs=action_dim,\n activation_fn=None,\n weights_initializer=tf.zeros_initializer)\n\n self.action_probs = tf.squeeze(tf.nn.softmax(self.output_layer))\n self.picked_action_prob = tf.gather(self.action_probs, self.action)\n\n # Loss and train op\n self.loss = -tf.log(self.picked_action_prob) * self.target\n\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.train_op = self.optimizer.minimize(\n self.loss, global_step=tf.train.get_global_step())\n\n def predict(self, state, sess=None):\n sess = sess or tf.get_default_session()\n return sess.run(self.action_probs, { self.state: state })\n\n def update(self, state, target, action, sess=None):\n sess = sess or tf.get_default_session()\n feed_dict = { self.state: state, self.target: target, self.action: action }\n _, loss = sess.run([self.train_op, self.loss], feed_dict)\n return loss ","sub_path":"PolicyEstimator.py","file_name":"PolicyEstimator.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"537196911","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/5/2 17:23\n# @Author : RanyLra\n# @Wechat : RanY_Luck\n# @File : db.py\n\nimport pymysql\n\nfrom tools.read_file import ReadFile\n\n\nclass DB:\n mysql = ReadFile.read_config('$.database')\n\n def __init__(self):\n \"\"\"初始化连接Mysql\"\"\"\n self.connection = pymysql.connect(\n host=self.mysql.get('host', 'host'),\n port=self.mysql.get('port', 'port'),\n user=self.mysql.get('user', 'user'),\n password=self.mysql.get('password', 'password'),\n db=self.mysql.get('db_name', 'db_name'),\n charset=self.mysql.get('charset', 'utf8mb4'),\n cursorclass=pymysql.cursors.DictCursor\n )\n # def __init__(self):\n # \"\"\"初始化连接Mysql\"\"\"\n # self.connection = pymysql.connect(\n # host=self.mysql.get('host', 'localhost'),\n # port=self.mysql.get('port', 3306),\n # user=self.mysql.get('user', 'root'),\n # password=self.mysql.get('password', '123456'),\n # db=self.mysql.get('db_name', 'test'),\n # charset=self.mysql.get('charset', 'utf8mb4'),\n # cursorclass=pymysql.cursors.DictCursor\n # )\n\n def fetch_one(self, sql: str) -> object:\n \"\"\"查询数据,查一条\"\"\"\n with self.connection.cursor() as cursor:\n cursor.execute(sql)\n result = cursor.fetchone()\n # 使用commit解决查询数据出现概率查错问题\n self.connection.commit()\n return result\n\n def close(self):\n \"\"\"关闭数据库连接\"\"\"\n self.connection.close()\n\n\nif __name__ == '__main__':\n print(ReadFile.read_config('$.database'))\n DB()\n","sub_path":"tools/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"108813381","text":"import sys\nsys.path.append(\"../..\")\nimport game\nPRONF=2\nmax_val=-50\nmin_val=50\ncpt = 0 \n\ndef evaluation(jeu,coup, joueur,ev):\n\n return ev[0]*Score(jeu,coup,joueur)+ev[1]*CaseConsecutif(jeu,coup,joueur)+ev[2]*Piege(jeu,joueur)+ev[3]*Krou(jeu,coup,joueur)\n \ndef Score(jeu,coup,joueur):\n L=game.getScores(jeu)\n \n if joueur==1:\n return L[0]-L[1]\n else:\n return L[1]-L[0]\n \ndef CaseConsecutif(jeu,coup,joueur):\n cj = game.getCoupsJoues(jeu)\n i=-6\n\n while i<0:\n if len(cj)<15 and (len(cj)+i)<0 : #si in est plus en debut et si il n'y a plus de coup precedent\n return 0\n elif abs(cj[i][1]-coup[1])==1: #si c'est un voisin des deux dernier\n return 0\n i=i+2\n return 1\n\ndef Piege(jeu,joueur):\n L=game.getScores(jeu)\n eval=0\n j1=0\n j2=0\n\n \n if(L[0]+L[1]>48-15):\n for i in range(0,6):\n j1=game.getCaseVal(jeu,0,i)\n j2=game.getCaseVal(jeu,1,i)\n \n if(joueur==1):\n \"\"\"\n if game.getCaseVal(jeu,1,1)<=1 and game.getCaseVal(jeu,1,0)==1:\n eval=eval+2\n \"\"\"\n eval=eval+j1-j2\n \n else:\n \"\"\"\n if game.getCaseVal(jeu,0,4)<=1 and game.getCaseVal(jeu,0,5)==1:\n eval=eval+2\n \"\"\"\n eval=eval+j2-j1\n return eval\n\"\"\"\ndef Piege(jeu,coup,joueur):\n L=game.getScores(jeu)\n eval=0\n\n \n if(L[0]+L[1]>48-15):\n if(joueur==1):\n if game.getCaseVal(jeu,1,1)<=1 and game.getCaseVal(jeu,0,0)==1:\n if (coup[1]-game.getCaseVal(jeu,joueur-1,0)) >=0:\n return 2\n \n \n if game.getCaseVal(jeu,0,0)>=1:\n if coup[1]-game.getCaseVal(jeu,0,coup[1])<0:\n return 1\n else:\n return -1\n else:\n if game.getCaseVal(jeu,0,5)<=1 and game.getCaseVal(jeu,0,0)==1:\n if (5-coup[1]-game.getCaseVal(jeu,joueur-1,0)) >=0:\n return 5\n \n if game.getCaseVal(jeu,0,0)>=1:\n if 5-coup[1]-game.getCaseVal(jeu,0,coup[1])<0:\n return 1\n else:\n return -1\n return 0\n \n\"\"\" \ndef Krou(jeu,coup,joueur):\n eval=0\n for i in range(0,6):\n if game.getCaseVal(jeu,joueur-1,i)>=12:\n eval=eval+1\n return eval\n \n \n return case\n \n\n\ndef decision(jeu,ev): \n joueur = game.getJoueur(jeu)\n L= game.getCoupsValides(jeu)\n J=[]\n alpha=-5000000\n beta=5000000\n challenger=0\n \n i=0\n if(joueur==1):\n i=len(L)-1\n ok=0\n while(ok==0):\n l=estimation(game.getCopieJeu(jeu),L[i],1,joueur,alpha,beta,ev)\n if(l>alpha):\n alpha=l\n challenger=i\n if(joueur==1):\n i-=1\n if(i<0):\n ok=1\n else:\n i+=1\n if(i>=len(L)):\n ok=1\n #J.append(l)\n #jeu2=game.getCopieJeu(jeu)\n\n\n\n #while i< len (J):\n # if(J[challenger]=PRONF:\n return evaluation(jeu,coup,joueur,ev)\n \n else:\n L=game.getCoupsValides(jeu)\n \n if(joueur==game.getJoueur(jeu)):\n val = -5000000\n for i in L:\n jeu2=game.getCopieJeu(jeu)\n val = max(val, estimation(jeu2,i,prf+1,joueur,alpha,beta,ev))\n if val >= beta:\n return val+1\n alpha = max(val, alpha)\n return val\n else:\n val = 5000000\n for i in L:\n jeu2=game.getCopieJeu(jeu)\n val = min(val, estimation(jeu2,i,prf+1,joueur,alpha,beta,ev))\n if val <= alpha:\n return val-1\n beta = min(val, beta)\n return val\n \n \n\ndef saisieCoup(jeu,ev): \n coup=decision(jeu,ev)\n return coup\n\n\n\n","sub_path":"Awele/Joueurs/Alpha_Beta_Train.py","file_name":"Alpha_Beta_Train.py","file_ext":"py","file_size_in_byte":4397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"242053082","text":"from SimPEG import Survey\n\nclass BaseSrc(Survey.BaseSrc):\n \"\"\"\n Defining Base frequency source\n\n :param float maxFreq: maximum frequency of source wavelet. Default value is 30.0\n \"\"\"\n def __init__(self,rxList,**kwargs):\n self.maxFreq=30.0 # if 'maxFreq' is not in kwargs, then default value is 30.0\n super().__init__(rxList,**kwargs)\n\n\nif __name__ == \"__main__\":\n src = BaseSrc([],maxFreq=2.3)\n print(src.maxFreq)\n #print(src.__doc__)\n","sub_path":"simpegseis/Acoustic/FD/SrcFD.py","file_name":"SrcFD.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"513045723","text":"\"\"\"\nCommon tools for deploy run shell and the like.\n\"\"\"\nimport os\nimport sys\n\nfrom fabric.api import lcd, task, local\n\n\n@task\ndef test():\n \"\"\"Run the test suite\"\"\"\n local(\"python -m unittest discover\")\n with lcd('assets'):\n local('node_modules/grunt/bin/grunt jasmine')\n\n\n@task\ndef install():\n \"\"\"Install the node_modules dependencies\"\"\"\n local('git submodule update --init')\n with lcd('assets'):\n local('npm install')\n\n\n@task\ndef watch():\n \"\"\"Grunt watch development files\"\"\"\n with lcd('assets'):\n local('node_modules/grunt/bin/grunt concat copy less:dev watch')\n\n\n@task\ndef compile():\n \"\"\"Compile assets for production.\"\"\"\n with lcd('assets'):\n local('node_modules/grunt/bin/grunt copy less:prod min')\n\ndef setup_paths():\n \"\"\"Setup sys.path with everything we need to run.\"\"\"\n import google\n \n DIR_PATH = os.path.abspath(os.path.dirname(os.path.dirname(google.__file__)))\n \n EXTRA_PATHS = [\n os.getcwd(),\n DIR_PATH,\n os.path.join(DIR_PATH, 'lib', 'antlr3'),\n os.path.join(DIR_PATH, 'lib', 'django_1_4'),\n os.path.join(DIR_PATH, 'lib', 'fancy_urllib'),\n os.path.join(DIR_PATH, 'lib', 'ipaddr'),\n os.path.join(DIR_PATH, 'lib', 'jinja2'),\n os.path.join(DIR_PATH, 'lib', 'protorpc'),\n os.path.join(DIR_PATH, 'lib', 'markupsafe'),\n os.path.join(DIR_PATH, 'lib', 'webob_0_9'),\n os.path.join(DIR_PATH, 'lib', 'webapp2'),\n os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),\n os.path.join(DIR_PATH, 'lib', 'simplejson'),\n os.path.join(DIR_PATH, 'lib', 'google.appengine._internal.graphy'),\n ]\n \n sys.path = EXTRA_PATHS + sys.path\n\ndef read_app_config():\n \"\"\"Load the app.yaml file and add some things to env.\"\"\"\n setup_paths()\n from google.appengine.tools.dev_appserver import ReadAppConfig\n appinfo = ReadAppConfig('app.yaml')\n return appinfo\n\n@task\ndef deploy(version='', appid='.'):\n \"\"\"\n Deploy an application::\n \n Standard Deploy\n $ fab deploy\n \n Deploy a different version than in the app.yaml\n $ fab deploy:version=fabulous\n \n Deploy to a different appid\n $ fab deploy:appid=someotherapp\n \"\"\"\n compile()\n yaml_file = os.path.join(appid, 'app.yaml')\n assert os.path.isfile(yaml_file), \"Could not find app.yaml file\"\n version_str = '-V %s' % version if version else ''\n cmd = 'appcfg.py update %s %s' % (version_str, appid)\n local(cmd)\n\ndef prep_shell(prefix, appid=None, server=None):\n \"\"\"Setup the remote shell for either remote or local.\n \n * prefix: either 's~' or 'dev~' \n * appid: Use a different application id\n * server: Point to a different server\n \"\"\"\n\n appinfo = read_app_config()\n # TODO: assert 'remote_api' in appinfo.builtins\n \n if hasattr(appinfo, 'env_variables'):\n os.environ.update(appinfo.env_variables)\n \n if appid is not None:\n appinfo.application = appid\n \n if server is None:\n server = '%s.appspot.com' % appinfo.application\n \n application = '%s%s' % (prefix, appinfo.application)\n \n return application, server\n\n@task\ndef remote_shell(appid=None, server=None):\n \"\"\"\n Open a remote shell for this application\n \n The builtin 'remote_api' must be set to 'on' in your app.yaml file.\n \n $ fab remote_shell\n \n Use a different application id:\n $ fab remote_shell:different-id\n \n Point to a different server\n $ fab remote_shell:server=other-app.appspot.com\n \"\"\"\n \n application, server = prep_shell('s~', appid, server)\n \n from google.appengine.tools import remote_api_shell\n from google.appengine.tools import appengine_rpc\n \n remote_api_shell.remote_api_shell(server, application, \n remote_api_shell.DEFAULT_PATH, True, appengine_rpc.HttpRpcServer)\n\n@task\ndef local_shell(appid=None, server=None):\n \"\"\"\n Open a local shell for this application\n \n The builtin 'remote_api' must be set to 'on' in your app.yaml file.\n The default will use the remote api against your local dev server,\n which is at 'localhost:8080'\n \n $ fab local_shell\n \n Use a different application id:\n $ fab local_shell:different-id\n \n Point to a different server\n $ fab local_shell:server=localhost:8081\n \"\"\"\n server = server or 'localhost:8080'\n \n application, server = prep_shell('dev~', appid, server)\n \n from google.appengine.tools import remote_api_shell\n from google.appengine.tools import appengine_rpc\n \n remote_api_shell.remote_api_shell(server, application, \n remote_api_shell.DEFAULT_PATH, False, appengine_rpc.HttpRpcServer)\n\n@task\ndef runserver(use_sqlite='True', port=8080, clear_datastore=False):\n \"\"\"\n Run the development server.\n \n Helper command to run dev_appserver::\n \n $ fab runserver\n \n Clear the datastore\n $ fab runserver:clear_datastore=1\n \n Use a different port.\n $ fab runserver:port=8089\n \"\"\"\n cmd = 'dev_appserver.py -p %s ' % port\n if use_sqlite.lower() not in ['0', 'false', 'f']:\n cmd += '--use_sqlite '\n if clear_datastore:\n cmd += '-c '\n cmd += '.'\n local(cmd)","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"604562194","text":"# coding: utf-8\nfrom asyncio import AbstractEventLoop\nimport glob\nimport ntpath\nimport os\nimport typing\n\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.engine import create_engine\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.orm import sessionmaker\n\nfrom rolling.exception import ComponentNotPrepared\nfrom rolling.exception import NoZoneMapError\nfrom rolling.exception import RollingError\nfrom rolling.log import kernel_logger\nfrom rolling.map.legend import WorldMapLegend\nfrom rolling.map.legend import ZoneMapLegend\nfrom rolling.map.source import WorldMapSource\nfrom rolling.map.source import ZoneMap\nfrom rolling.map.source import ZoneMapSource\nfrom rolling.map.type.world import Sea\nfrom rolling.map.type.world import WorldMapTileType\nfrom rolling.map.type.zone import ZoneMapTileType\nfrom rolling.server.extension import ClientSideDocument\nfrom rolling.server.extension import ServerSideDocument\nfrom rolling.server.zone.websocket import ZoneEventsManager\n\n\nclass Kernel(object):\n def __init__(\n self,\n world_map_str: str = None,\n loop: AbstractEventLoop = None,\n tile_maps_folder: typing.Optional[str] = None,\n ) -> None:\n self._tile_map_legend: typing.Optional[ZoneMapLegend] = None\n self._world_map_legend: typing.Optional[WorldMapLegend] = None\n self._world_map_source: typing.Optional[WorldMapSource] = WorldMapSource(\n self, world_map_str\n ) if world_map_str else None\n self._tile_maps_by_position: typing.Optional[\n typing.Dict[typing.Tuple[int, int], ZoneMap]\n ] = None\n\n # Database stuffs\n self._client_db_session: typing.Optional[Session] = None\n self._client_db_engine: typing.Optional[Engine] = None\n\n self._server_db_session: typing.Optional[Session] = None\n self._server_db_engine: typing.Optional[Engine] = None\n\n # Zone websocket\n self._server_zone_events_manager = ZoneEventsManager(self, loop=loop)\n\n # Generate tile maps if tile map folder given\n if tile_maps_folder is not None:\n self._tile_maps_by_position: typing.Dict[\n typing.Tuple[int, int], ZoneMap\n ] = {}\n\n for tile_map_source_file_path in glob.glob(\n os.path.join(tile_maps_folder, \"*.txt\")\n ):\n tile_map_source_file_name = ntpath.basename(tile_map_source_file_path)\n row_i, col_i = map(\n int, tile_map_source_file_name.replace(\".txt\", \"\").split(\"-\")\n )\n kernel_logger.debug(\n 'Load tile map \"{}\"'.format(tile_map_source_file_name)\n )\n\n with open(tile_map_source_file_path, \"r\") as f:\n tile_map_source_raw = f.read()\n\n self._tile_maps_by_position[(row_i, col_i)] = ZoneMap(\n row_i, col_i, ZoneMapSource(self, tile_map_source_raw)\n )\n\n @property\n def server_zone_events_manager(self) -> ZoneEventsManager:\n if self._server_zone_events_manager is None:\n raise ComponentNotPrepared(\n \"self._server_zone_events_manager must be prepared before usage\"\n )\n\n return self._server_zone_events_manager\n\n @property\n def world_map_source(self) -> WorldMapSource:\n if self._world_map_source is None:\n raise ComponentNotPrepared(\n \"self._world_map_source must be prepared before usage\"\n )\n\n return self._world_map_source\n\n @world_map_source.setter\n def world_map_source(self, value: WorldMapSource) -> None:\n self._world_map_source = value\n\n @property\n def tile_maps_by_position(self) -> typing.Dict[typing.Tuple[int, int], ZoneMap]:\n if self._world_map_source is None:\n raise ComponentNotPrepared(\n \"self._tile_maps_by_position must be prepared before usage\"\n )\n\n return self._tile_maps_by_position\n\n @property\n def world_map_legend(self) -> WorldMapLegend:\n if self._world_map_legend is None:\n # TODO BS 2018-12-20: Consider it can be an external source\n self._world_map_legend = WorldMapLegend(\n {\n \"~\": \"SEA\",\n \"^\": \"MOUNTAIN\",\n \"ፆ\": \"JUNGLE\",\n \"∩\": \"HILL\",\n \"⡩\": \"BEACH\",\n \"⠃\": \"PLAIN\",\n },\n WorldMapTileType,\n default_type=Sea,\n )\n\n return self._world_map_legend\n\n @property\n def tile_map_legend(self) -> ZoneMapLegend:\n if self._tile_map_legend is None:\n # TODO BS 2018-12-20: Consider it can be an external source\n self._tile_map_legend = ZoneMapLegend(\n {\n \" \": \"NOTHING\",\n \"⡩\": \"SAND\",\n \"⁘\": \"SHORT_GRASS\",\n \"ൖ\": \"DRY_BUSH\",\n \"#\": \"ROCK\",\n \"⑉\": \"ROCKY_GROUND\",\n \"~\": \"SEA_WATER\",\n },\n ZoneMapTileType,\n )\n\n return self._tile_map_legend\n\n @property\n def client_db_session(self) -> Session:\n if self._client_db_session is None:\n raise ComponentNotPrepared(\"client_db_session is not created yet\")\n\n return self._client_db_session\n\n @property\n def server_db_session(self) -> Session:\n if self._server_db_session is None:\n raise ComponentNotPrepared(\"server_db_session is not created yet\")\n\n return self._server_db_session\n\n def get_tile_map(self, row_i: int, col_i: int) -> ZoneMap:\n try:\n return self.tile_maps_by_position[(row_i, col_i)]\n except KeyError:\n raise NoZoneMapError(\"No zone map for {},{} position\".format(row_i, col_i))\n\n def init_client_db_session(self) -> None:\n kernel_logger.info('Initialize database connection to \"client.db\"')\n self._client_db_engine = create_engine(\"sqlite:///client.db\")\n self._client_db_session = sessionmaker(bind=self._client_db_engine)()\n ClientSideDocument.metadata.create_all(self._client_db_engine)\n\n def init_server_db_session(self) -> None:\n kernel_logger.info('Initialize database connection to \"server.db\"')\n self._server_db_engine = create_engine(\"sqlite:///server.db\")\n self._server_db_session = sessionmaker(bind=self._server_db_engine)()\n ServerSideDocument.metadata.create_all(self._server_db_engine)\n\n def get_start_world_coordinates(self) -> typing.Tuple[int, int]:\n # FIXME BS 2019-01-10: hardcoded\n return 2, 1\n\n def get_start_zone_coordinates(\n self, world_row_i: int, world_col_i: int\n ) -> typing.Tuple[int, int]:\n # FIXME BS 2019-01-10: hardcoded\n return 29, 29\n","sub_path":"rolling/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"29008945","text":"## -*- Encoding: UTF-8 -*-\nimport urllib.request\nimport urllib.parse \n\nfrom tkinter import *\nfrom tkinter.simpledialog import *\nfrom tkinter import ttk\n\nimport json \n\nclass Vue():\n def __init__(self,parent):\n self.parent=parent\n self.root=Tk()\n self.cadreapp=Frame(self.root)\n self.canevas=Canvas(self.cadreapp,width=800,height=600)\n self.canevas.create_text(400,100,anchor=CENTER,text=\"Bienvenue a SaaS COMMANDE\")\n self.canevas.pack()\n self.listeclients=Listbox(self.canevas,width=50,height=10)\n self.canevas.create_window(400,300,anchor=CENTER,window=self.listeclients)\n self.btnclients=Button(self.canevas,text=\"Chercher nos client\",command=self.demanderclients)\n self.canevas.create_window(400,500,anchor=CENTER,window=self.btnclients)\n self.cadreapp.pack()\n \n def demanderclients(self):\n self.parent.demanderclients()\n \n def afficherclients(self,clients):\n self.listeclients.delete(0,END)\n for i in clients:\n self.listeclients.insert(END,i)\n \nclass Modele():\n def __init__(self,parent,nom,compagnie):\n self.parent=parent\n self.nom=nom\n self.compagnie=compagnie\n\n def demanderclients(self):\n self.clients=self.parent.requeteserveur(\"demanderclients\")\n self.clients.sort()\n listedeclients=[]\n for i in self.clients:\n listedeclients.append(i[0]+\", \"+i[1]+\", \"+i[2])\n self.parent.afficherclients(listedeclients)\n \n \nclass Controleur():\n def __init__(self):\n self.vue=Vue(self)\n print(sys.argv)\n self.urlserveur=sys.argv[1]\n usager=json.loads(sys.argv[2])\n self.modele=Modele(self,usager[0],usager[1])\n self.vue.root.mainloop()\n \n def requeteserveur(self,fonc):\n leurl=self.urlserveur+\"/requeteserveur\"\n params = {\"fonction\":fonc}\n reptext=self.appelserveur(leurl,params)\n rep=json.loads(reptext)\n return rep\n \n # fonction d'appel normalisee, utiliser par les methodes du controleur qui communiquent avec le serveur\n def appelserveur(self,url,params):\n query_string = urllib.parse.urlencode( params )\n data = query_string.encode( \"ascii\" )\n url = url + \"?\" + query_string \n rep=urllib.request.urlopen(url , data)\n reptext=rep.read()\n return reptext\n\n def demanderclients(self):\n self.modele.demanderclients()\n \n def afficherclients(self,clients):\n self.vue.afficherclients(clients)\nif __name__ == '__main__':\n c=Controleur()","sub_path":"_Backup/CodeProf/SaaS_serveur/SaaS_modules/SaaS_commande.py","file_name":"SaaS_commande.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"612726651","text":"from encounter import break_shield_trixie\nfrom fast import sleep, skip, accept, agree, fast\nfrom info import *\nfrom model import State, Pos, Pony\nfrom util import logboth\n\n# See README.md for description of the role of this file\n\n@logboth\ndef intercourse(st: State, square: Pos, wait=0.8, click=0):\n \"\"\"\n Manage clicking at the right time and places during the intercourse scene(s).\n Also update the state as needed.\n \"\"\"\n square.click(); sleep(6.6)\n pos_exclamation.click(); sleep(wait)\n skip(click); sleep(.4)\n pos_end.click(); sleep(5)\n pos_next.click(); sleep(2.6)\n st.location = home\n st.day += 1\n\n\n@logboth\ndef get_vinyl(st: State):\n vinyl_disk.touch(st); sleep(3.5)\n skip(2)\n forward.do()\n center.click()\n sleep(18.5)\n intercourse(st, square=pos_four)\n\n\n@logboth\ndef get_trixie1(st: State):\n assert(st.pony == Pony.HORN)\n st.assert_moon()\n trixie.touch(st) # Zoom\n break_shield_trixie(st)\n skip()\n pos_trixie_square.click()\n sleep(3.2)\n skip(5)\n sleep(3)\n skip(5)\n sleep(3)\n intercourse(st, square=pos_four, wait=3.2, click=5)\n\n\n@logboth\ndef get_pinkie_pie(st: State):\n cake_house.go(st)\n forward.do()\n sleep(10)\n intercourse(st, square=pos_four, wait=2)\n\n\n@logboth\ndef get_fluttershy(st: State):\n fluttershy_window.touch(st)\n skip(7); sleep(3.4)\n skip()\n fluttershy_lamp.touch(st); sleep(2.6)\n skip(2); sleep(.6)\n accept() # Why?\n skip(8); sleep(.6)\n accept() # Cheer up\n skip(2); sleep(.6)\n agree() # Let's fuck\n skip(4); sleep(.6)\n accept() # Want it?\n skip(4); sleep(3)\n intercourse(st, square=pos_four, wait=2.4, click=4)\n\n\n@logboth\ndef get_derpy(st: State):\n Location([forward, rotate_left]).go(st)\n center.click()\n skip(2)\n center.click()\n skip()\n sleep(4) # hearing\n center.click()\n skip(2)\n agree()\n skip()\n sleep(29) # saving derpy & intro\n intercourse(st, square=pos_three, wait=13.2, click=0)","sub_path":"pony_up.py","file_name":"pony_up.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"6117417","text":"import sys, os\nfrom PyQt4 import QtCore, QtGui\nimport random\n\nclass AddressBook(QtGui.QWidget):\n def __init__(self, parent=None):\n super(AddressBook, self).__init__(parent)\n \n mainLayout = QtGui.QHBoxLayout()\n \n label = QtGui.QLabel(\"Menu\")\n label.setFrameStyle(label.Box)\n label.setMinimumWidth(100)\n \n policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)\n label.setSizePolicy(policy)\n mainLayout.addWidget(label)\n \n\n self.styleComboBox = QtGui.QComboBox()\n self.styleComboBox.addItem(\"abc________\")\n self.styleComboBox.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents) \n mainLayout.addWidget(self.styleComboBox)\n \n mainLayout.addStretch(1)\n self.setLayout(mainLayout)\n\n self.setWindowTitle(\"Combo Sizing Policy\")\n \n self.styleComboBox.activated[str].connect(self.addStyle)\n self.acc = 'sos'\n \n def addStyle(self, stl):\n self.acc = '%sx%x' % (self.acc, random.randint(10, 10000))\n self.styleComboBox.addItem(self.acc)\n \nif __name__ == '__main__':\n import sys\n\n app = QtGui.QApplication(sys.argv)\n\n addressBook = AddressBook()\n \n addressBook.setGeometry(100, 100, 640, 480)\n addressBook.show()\n\n sys.exit(app.exec_())\n \n","sub_path":"pyqt_sizing/sizing.py","file_name":"sizing.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"190981931","text":"import numpy as np\nfrom playML.metrics import r2_score\n\nclass LinearRegression:\n\n def __init__(self):\n \"初始化 LinearRegression模型\"\n # coef:系数\n # interception:截距\n # theta:表示整个参数向量\n # 变量后带有_ 代表本类中提供的变量\n # 变量前带有_ 代表私有的变量 但都不是py强制的 是自己定义的规则\n self.coef_ = None\n self.interception_ = None\n self._theta = None\n\n def fit_normal(self,X_train,Y_train):\n assert X_train.shape[0] == Y_train.shape[0],\"数据尺寸大小必须一致\"\n\n # len方法返回向量的行数长度\n X_b = np.hstack([np.ones((len(X_train),1)),X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(Y_train)\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n\n def fit_gd(self, x_train, y_train, eta = 0.01, n_iters = 1e4,epsilon=1e-8):\n # x是特征 y是值 train是训练数据 test是测试数据\n # 使用梯度下降法训练模型\n # x一般是矩阵 y一般向量即可\n assert x_train.shape[0] == y_train.shape[0],\"数据纬度必须一致\"\n\n def J(theta, X_b, y):\n # 定义损失函数 MSE\n try:\n return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)\n except:\n return float('inf')\n\n def dJ(theta, X_b, y):\n # 定义梯度函数,返回梯度参数\n # res = np.empty(len(theta))\n # res[0] = np.sum(X_b.dot(theta) - y)\n # for i in range(1, len(theta)):\n # # 向量点乘会自动对应项相乘再累加\n # # X_b.dot(theta) 是向量类型\n # res[i] = (X_b.dot(theta) - y).dot(X_b[:, i])\n # return res * 2 / len(X_b)\n\n return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)\n\n def gradient_descent(X_b, y, initial_theta, eta,n_iters = 1e4,epsilon=1e-8):\n # eta 是下降速率\n theta = initial_theta\n i_iter = 0\n\n while i_iter < n_iters:\n gradient = dJ(theta, X_b, y)\n last_theta = theta\n theta = theta - eta * gradient\n\n if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):\n break\n\n i_iter += 1\n\n return theta\n\n X_b = np.hstack([np.ones((len(x_train),1)),x_train])\n # 从0开始搜索\n initial_theta = np.zeros(X_b.shape[1])\n self._theta = gradient_descent(X_b,y_train,initial_theta,eta,n_iters,epsilon)\n\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n\n def fit_sgd(self,x_train,y_train,n_iters = 5,t0 = 5,t1 = 50):\n # 随机梯度下降法\n # n_iters表明看整个样本几圈\n assert x_train.shape[0] == y_train.shape[0],\"数据维度必须一致\"\n assert n_iters >= 1,\"次数至少大于等于1\"\n\n def dJ_sgd(theta,x_b_i,y_i):\n return x_b_i.T.dot(x_b_i.dot(theta) - y_i) * 2\n\n def sgd(x_b,y,initial_theta,n_iters,t0=5,t1=50):\n\n def learning_theta(t):\n return t0 / (t + t1)\n\n theta = initial_theta\n m = len(x_b)\n\n # 这里因为不能保证随机梯度下降一直朝梯度下降方向,所以判断两者损失函数差的条件可以去掉\n for cur_iter in range(n_iters):\n # 为了保证看几圈同时每个数据项都被计算到,进行改进\n # 对原先索引进行乱序\n indexs = np.random.permutation(m)\n x_b_new = x_b[indexs]\n y_new = y[indexs]\n for i in range(m):\n # 现在能保证每一圈都可以看到每个数据项\n gradient = dJ_sgd(theta, x_b_new[i], y_new[i])\n # 一样的用法\n theta = theta - learning_theta(cur_iter * m + i) * gradient\n\n return theta\n\n x_b = np.hstack([np.ones((len(x_train),1)),x_train])\n initial_theta = np.zeros(x_b.shape[1])\n self._theta = sgd(x_b, y_train, initial_theta, n_iters,t0,t1)\n\n self.interception_ = self._theta[0]\n self.coef_ = self._theta[1:]\n\n return self\n\n\n def predict(self,X_test):\n # 预测多元线性结果\n assert self.coef_ is not None and self.interception_ is not None,\"必须事先调用dit方法\"\n assert X_test.shape[1] == len(self.coef_),\"预测特征数必须和训练时参数一致\"\n\n X_b = np.hstack([np.ones((len(X_test),1)),X_test])\n #print(\"theta shape is \",self._theta.shape)\n # 矩阵和向量相乘会根据左右乘选择向量用行还是列\n return X_b.dot(self._theta)\n\n def score(self,X_test,Y_test):\n # 用R^2评价模型\n\n y_predict = self.predict(X_test)\n return r2_score(Y_test,y_predict)\n\n def __repr__(self):\n return \"LinearRegression()\"","sub_path":"playML/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"398547407","text":"from datetime import timedelta, datetime\n\nfrom django.forms.models import model_to_dict\nfrom jsonrpc import jsonrpc_method\n\nfrom .common import api_v1_site\nfrom apps.organization.models import Location\nfrom apps.scheduling.models import Event, StandardReservation\n\n\n@jsonrpc_method('event.adjecents(String, String, String, String, Number, Array) -> Array', site=api_v1_site,\n authenticated=True)\ndef event_adjecents(request, start_date, start_time, end_date, end_time, event=None, locations=None):\n \"\"\"Returns the events which take place adjacent to the to-be-planned event.\"\"\"\n\n start = datetime.strptime(\"%s %s\" % (start_date, start_time), \"%d-%m-%Y %H:%M\") - timedelta(minutes=15)\n end = datetime.strptime(\"%s %s\" % (end_date, end_time), \"%d-%m-%Y %H:%M\") + timedelta(minutes=15)\n realstart = datetime.strptime(\"%s %s\" % (start_date, start_time), \"%d-%m-%Y %H:%M\")\n realend = datetime.strptime(\"%s %s\" % (end_date, end_time), \"%d-%m-%Y %H:%M\")\n\n # Haal alle conflicting events op met een kwartier speling aan beide\n # einden. Haal vervolgens de de echte conflicting events eruit, zodat de\n # adjacent events overblijven.\n if locations:\n locations = Location.objects.filter(pk__in=locations)\n events = Event.objects.none()\n adjevents = Event.objects.none()\n for location in locations:\n events |= Event.conflicting_events(realstart, realend, location)\n adjevents |= Event.conflicting_events(start, end, location)\n else:\n events = Event.conflicting_events(realstart, realend)\n adjevents = Event.conflicting_events(start, end)\n\n if event:\n events = events.exclude(pk=event)\n\n result = []\n for event in adjevents:\n if event not in events:\n result.append(model_to_dict(event))\n\n return result\n\n\n@jsonrpc_method('event.conflicts(String, String, String, String, Number, Array) -> Array', site=api_v1_site,\n authenticated=True)\ndef event_conflicts(request, start_date, start_time, end_date, end_time, event_id=None, locations=None):\n \"\"\"Returns the events which take place at the same time as the to-be-planned event.\"\"\"\n\n start = datetime.strptime(\"%s %s\" % (start_date, start_time), \"%d-%m-%Y %H:%M\")\n end = datetime.strptime(\"%s %s\" % (end_date, end_time), \"%d-%m-%Y %H:%M\")\n\n if locations:\n locations = Location.objects.filter(pk__in=locations.split(','))\n events = Event.objects.none()\n for location in locations:\n events |= Event.conflicting_events(start, end, location)\n else:\n events = Event.conflicting_events(start, end)\n\n if event_id:\n events = events.exclude(pk=event_id)\n\n result = []\n for event in events:\n result.append(model_to_dict(event))\n\n return result\n\n\n@jsonrpc_method('event.conflicts_standard(String, String, String, String, Array) -> Array', site=api_v1_site,\n authenticated=True)\ndef event_conflicts_standard(request, start_date, start_time, end_date, end_time, locations=None):\n \"\"\"Returns the standard reservations which are booked at the same time as the to-be-planned event.\"\"\"\n\n start = datetime.strptime(\"%s %s\" % (start_date, start_time), \"%d-%m-%Y %H:%M\")\n end = datetime.strptime(\"%s %s\" % (end_date, end_time), \"%d-%m-%Y %H:%M\")\n s_reservations = StandardReservation.objects.occuring_at(start, end)\n\n if locations:\n locations = Location.objects.filter(pk__in=locations.split(','))\n s_reservations = s_reservations.filter(location__in=locations)\n\n result = []\n for event in s_reservations:\n result.append(model_to_dict(event))\n\n return result\n","sub_path":"api/v1/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"500340206","text":"import os\n\nfrom discord import Intents\nfrom discord.ext import commands\nfrom discord.ext.commands.context import Context\nfrom discord.guild import Guild\nfrom discord.member import Member\nfrom source.default import DefaultSource\nfrom source.interface import SourceInterface\n\nintents: Intents = Intents(guilds=True, messages=True)\nbot = commands.Bot(command_prefix='.', intents=intents)\nsource: SourceInterface = DefaultSource()\n\n\n@bot.command()\nasync def marshall(ctx):\n await ctx.send('meow')\n\n\n@bot.command()\nasync def me(ctx: Context):\n await ctx.send(about_me(ctx))\n\n\n@bot.event\nasync def on_guild_join(guild: Guild):\n source.register_guild(guild.id)\n\n\n@bot.event\nasync def on_guild_remove(guild: Guild):\n source.deactivate_guild(guild.id)\n\n\ndef about_me(ctx: Context) -> str:\n if isinstance(ctx.author, Member):\n ret_str = 'Meowmber: '\n if ctx.author.nick is not None:\n ret_str += ctx.author.nick\n else:\n ret_str += ctx.author.name\n else:\n ret_str = 'Meowser: ' + ctx.author.name\n return ret_str\n\n\nif __name__ == '__main__':\n bot.run(os.environ['BOT_TOKEN'])\n","sub_path":"listen.py","file_name":"listen.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"542247263","text":"import socket, datetime\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom random import randint\nfrom time import sleep\nfrom collections import OrderedDict\nimport RPi.GPIO as gpio\n\ndef send_pickle_stream(data, sock, target):\n\n data_string = pickle.dumps(data)\n sock.sendto(data_string, target)\n \ndef prepare_packet_data(address, event_id, initial_time_stamp):\n\n\tnow_time_stamp = pd.Timestamp(np.datetime64(datetime.datetime.now()))\n\telapsed_time = now_time_stamp - initial_time_stamp\n\t\n\tdata = OrderedDict()\n\tdata[\"event_id\"] = event_id\n\tdata[\"timestamp\"] = now_time_stamp\n\tdata[\"timedelta\"] = elapsed_time\n\tdata[\"address\"] = address\n\n\tevent_id += 1 \n\n\treturn data\n\nhost = '161.122.21.46'\nport = 60001\ndimension = 30*30 - 1\n\n\nrw_port = 2\nfifo_rd_port = 19\nfifo_empty_port = 20\nbuf_empty_port = 16\nfifo_full_port = 21\nneur_adr_ports = [14,15,18,23,24,25,8,7,12,16] ## NEUR_ADR[9:0]\n\nout_ports = [rw_port, fifo_rd_port]\nin_ports = [fifo_empty_port, fifo_full_port, neur_adr_ports]\n\ngpio.setmode(gpio.BCM)\nfor i in in_ports:\n gpio.setup(i, gpio.IN)\nfor i in out_ports:\n gpio.setup(i, gpio.OUT)\n\ngpio.output(rw_port, False)\n\n\n# Create a socket (SOCK_DGRAM means a UPD socket)\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ninitial_time_stamp = pd.Timestamp(np.datetime64(datetime.datetime.now()))\nevent_id = long(0)\naddress = 0\ntry:\n while True:\n if gpio.input(fifo_empty_port) == False:\n b9 = gpio.input(neur_adr_ports[0])\n b8 = gpio.input(neur_adr_ports[1])\n b7 = gpio.input(neur_adr_ports[2])\n b6 = gpio.input(neur_adr_ports[3])\n b5 = gpio.input(neur_adr_ports[4])\n b4 = gpio.input(neur_adr_ports[5])\n b3 = gpio.input(neur_adr_ports[6])\n b2 = gpio.input(neur_adr_ports[7])\n b1 = gpio.input(neur_adr_ports[8])\n b0 = gpio.input(neur_adr_ports[9])\n \n address = 512*b9 + 256*b8 + 128*b7 + 64*b6 + 32*b5 + 16*b4 + 8*b3 + 4*b2 + 2*b1 + b0\n\n gpio.output(fifo_rd_port, True)\n gpio.output(fifo_rd_port, False)\n\n## if gpio.input(fifo_full_port) == True:\n## print 'Buffer is FULL.' \n \n data = prepare_packet_data(address,event_id,initial_time_stamp)\t\n send_pickle_stream(data, sock, target= (host,port))\n\t\t\n\t\t# send signal that nothing is firing\n##\t\tsleep(wait*0.001)\n##\t\taddress = -1\n##\t\tdata = prepare_packet_data(address,event_id,initial_time_stamp)\n##\t\tprint \"Client: Sending:\", data\n##\t\tsend_pickle_stream(data, sock, target= (host,port))\n\nfinally:\n sock.close()\n","sub_path":"FPGA/NeuralNet-controller/2-SEND_SPIKES.py","file_name":"2-SEND_SPIKES.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"160846415","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Syco Signer\n ~~~~~~~~~~~~~~\n\n :author: Daniel Lindh \n :copyright: (c) 2014 System Console project\n :license: see LICENSE for more details.\n\"\"\"\n\nimport os\nfrom datetime import datetime, timedelta\nimport time\nfrom collections import OrderedDict\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql.expression import text\nfrom flask import Flask, request, g, redirect, url_for, render_template, flash\n\n\n\n# create our little application :)\napp = Flask(__name__)\n\n# Load default config and override config from an environment variable\napp.config.update(dict(\n DATABASE=\"mysql+mysqlconnector://root:root@127.0.0.1/syslog?charset=utf8\",\n DEBUG=True,\n SECRET_KEY='development key',\n USERNAME='admin',\n PASSWORD='default'\n))\n\n\n# Create mysql connection\nengine = create_engine(\n app.config['DATABASE'],\n convert_unicode=True, pool_size=50, pool_recycle=3600\n)\n\n\n@app.before_request\ndef connect_db():\n \"\"\"Create a connection to the database before each request.\"\"\"\n if not hasattr(g, 'con'):\n g.con = engine.connect()\n\n\n@app.teardown_request\ndef close_db(ex):\n \"\"\"Close database connection after each request.\"\"\"\n if hasattr(g, 'con'):\n g.con.close()\n delattr(g, 'con')\n\n\n@app.route('/')\ndef signed():\n args = {'REMOTE_USER': os.environ.get(\"REMOTE_USER\", 'Unknown')}\n return render_template('signed.html', entries=signed_days(), **args)\n\n\ndef signed_days():\n \"\"\"Return a dict with all signed and unsigned days since first sign.\n\n days['2014-01-01'] = {'created': '2014-01-01', 'sign':... }\n\n \"\"\"\n days = unsigned_days()\n cur = g.con.execute('SELECT * FROM signed ORDER BY id DESC')\n for row in cur.fetchall():\n key = row['created'].strftime('%Y-%m-%d')\n days[key] = dict(row)\n return days\n\n\ndef unsigned_days():\n \"\"\"Return a dict with all days since first sign.\n Value is a default dict.\n\n days['2014-01-01'] = {'created': '2014-01-01' }\n \"\"\"\n cur = g.con.execute('SELECT min(signdate) as signdate FROM signed')\n first_sign_date = cur.fetchone()['signdate']\n if not first_sign_date:\n first_sign_date = datetime.now()\n\n def _key(x):\n return (datetime.now() - timedelta(x)).strftime('%Y-%m-%d')\n\n days = (datetime.now() - first_sign_date).days\n unsigned = OrderedDict()\n for x in xrange(days):\n unsigned[_key(x)] = {'created': _key(x)}\n\n return unsigned\n\n\n@app.route('/log-entries/')\ndef log_entries(date):\n \"\"\"Takes a date with format 2013-01-23\"\"\"\n # TODO: remove hardcoded date.\n date = '2014-02-21'\n day = datetime.strptime(date, '%Y-%m-%d')\n cur = g.con.execute(\n text(\n 'SELECT * FROM SystemEvents '\n 'WHERE ReceivedAt BETWEEN :from_date AND :to_date ORDER BY id DESC'\n ),\n from_date=day.strftime('%Y-%m-%d 00:00:00'),\n to_date=day.strftime('%Y-%m-%d 23:59:59')\n )\n\n entries = cur.fetchall()\n args = {\n 'REMOTE_USER': os.environ.get(\"REMOTE_USER\", 'Unknown')\n }\n\n return render_template('log-entries.html', entries=entries, **args)\n\n\n@app.route('/log-entries', methods=['POST'])\ndef add_entry():\n db = get_db()\n db.execute('INSERT INTO entries (title, text) VALUES (?, ?)',\n [request.form['title'], request.form['text']])\n db.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('logs'))\n\n\n@app.route('/exclude')\ndef exclude():\n cur = g.con.execute('SELECT * FROM exclude ORDER BY id DESC')\n entries = cur.fetchall()\n args = {\n 'REMOTE_USER': os.environ.get(\"REMOTE_USER\", 'Unknown')\n }\n\n return render_template('exclude.html', entries=entries, **args)\n\n\n@app.route('/alert')\ndef alert():\n cur = g.con.execute('SELECT * FROM alert ORDER BY id DESC')\n entries = cur.fetchall()\n args = {\n 'REMOTE_USER': os.environ.get(\"REMOTE_USER\", 'Unknown')\n }\n\n return render_template('alert.html', entries=entries, **args)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"signer/signer.py","file_name":"signer.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"213297665","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom time import sleep\n\ndriver = webdriver.Firefox()\ndriver.get('http://www.google.com')\n\n# Right-clicking elements\nimages_button = driver.find_element_by_xpath(\".//*[@id='gbw']/div/div/div[1]/div[2]/a\")\nactions = ActionChains(driver)\nactions.move_to_element(images_button)\nactions.click(images_button)\nactions.perform()\nsleep(1)\n\ndriver.quit()\n","sub_path":"udemy/actions2.py","file_name":"actions2.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"177690401","text":"from majority_vote_example import *\n\nsc = StandardScaler()\n\nX_train_std = sc.fit_transform(X_train)\n\nfrom itertools import product\n\nall_clf = [pipe1, clf2, pipe3, mv_clf]\n\n# select min and max of x and y to delimited the boundary \n# of the graph\nx_min = X_train_std[:, 0].min() - 1\nx_max = X_train_std[:, 0].max() + 1\ny_min = X_train_std[:, 1].min() - 1\ny_max = X_train_std[:, 1].max() + 1\n\n# Note : we standardize alos the Decision Tree because we gonna\n# display decision boundary for all classifier and we want to\n# have similar data so we can compare the graphs\n\n# np.arange create a array from x_min to x_max each 0.1 step\nxx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),\n\t\t\t\t\t np.arange(y_min, y_max, 0.1))\n\n# xx : here a 62x57 matrix with the same row being\n# [x_min, x_min+0.1, ..., ... x_max]\n\n## idem for yy but for the y axis. yy same dimension as xx\n\n# axarr is an array of axis object\n# f is the figure\nf, axarr = plt.subplots(nrows=2, ncols=2, # 2 graphs by rows/cols\n\t\t\t\t\t\tsharex='col', # each subplot col will share a X axis\n\t\t\t\t\t\tsharey='row', # each subplot row will share a Y axis\n\t\t\t\t\t\tfigsize=(14,10))\n\n# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy\n# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111\n# here : product([0, 1], [0, 1]) --> [(0, 0), (0, 1), (1, 0), (1, 1)]\nfor idx, clf, tt in zip(product([0, 1], [0, 1]),\n\t\t\t\t\t\tall_clf, clf_labels):\n\n\t# train each classifier\n\tclf.fit(X_train_std, y_train)\n\n\t# numpy.c : Translates slice objects to concatenation along the second axis.\n\t# np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]\n\t# return : array([[1, 2, 3, 0, 0, 4, 5, 6]])\n\n\t# ravel : Return a contiguous flattened array.\n\t# x = np.array([[1, 2, 3], [4, 5, 6]])\n\t# return : [1 2 3 4 5 6]\n\n\t# we actually predict class label on every single pt of the\n\t# meshgrid !!!\n\t# it return an 1D array : shape = [n_samples]\n\tZ = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\t# reshape the prediction so it will be the same size\n\t# as the xx matrix : 62*57\n\t# note : size of Z before must be equal to 62*57...\n\tZ = Z.reshape(xx.shape)\n\n\taxarr[idx[0],idx[1]].contourf(xx, yy, Z, alpha=0.3)\n\n\t# scatter plot of class 0 (y_train==0) using 2 features (0 and 1)\n\taxarr[idx[0], idx[1]].scatter(X_train_std[y_train==0, 0],\n\t\t\t\t\t\t\t\t X_train_std[y_train==0, 1],\n\t\t\t\t\t\t\t\t c='blue',\n\t\t\t\t\t\t\t\t marker='^',\n\t\t\t\t\t\t\t\t s=50)\n\t# scatter plot of class 0 (y_train==0) using 2 features (0 and 1)\n\taxarr[idx[0], idx[1]].scatter(X_train_std[y_train==1, 0],\n\t\t\t\t\t\t\t\t X_train_std[y_train==1, 1],\n\t\t\t\t\t\t\t\t c='red',\n\t\t\t\t\t\t\t\t marker='o',\n\t\t\t\t\t\t\t\t s=50)\n\taxarr[idx[0], idx[1]].set_title(tt)\n\nplt.text(-4, -4,\n\t\t s='Sepal width [standardized]',\n\t\t ha='center', va='center', fontsize=12)\nplt.text(-12, 4,\n\t\t s='Petal length [standardized]',\n\t\t ha='center', va='center',\n\t\t fontsize=12, rotation=90)\n\nplt.show()\n\n\n# Note : to access individual parameter inside a GridSearch obj\n# we use .get_params() that we defined in marority_vote.py :\n# mv.clf_get_params()","sub_path":"ch7/EnsembleLearning/decision_region.py","file_name":"decision_region.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"286240514","text":"from django.urls import path\nfrom . import views as matching_views\n\nurlpatterns = [\n path('send_match_request/', matching_views.send_match_request, name='send_match_request'),\n path('accept_match_request/', matching_views.accept_match_request, name='accept_match_request'),\n path('delete_match/', matching_views.delete_match, name='delete_match'),\n path('reject_match_request/', matching_views.reject_match_request, name='reject_match_request'),\n path('view_matches', matching_views.view_matches, name='view_matches'),\n path('view_requests', matching_views.view_requests, name='view_requests'),\n path('view_request_details/', matching_views.view_request_details, name='view_request_details'),\n]","sub_path":"matching/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"41834213","text":"from django.conf.urls import patterns, url\n\n\nurlpatterns = patterns('',\n url(r'^profile/(?P\\w+)/', 'social.views.profile', name='profile'),\n url(r'^messages/', 'social.views.messages', name='messages'),\n url(r'^search/', 'social.views.search', name='search'),\n url(r'^settings/', 'social.views.settings', name='settings'),\n url(r'^help/', 'social.views.help', name='help'),\n url(r'^register/', 'social.views.register', name='register'),\n url(r'^login/', 'social.views.login_user', name='login'),\n url(r'^logout/', 'social.views.logout_user', name='logout'),\n url(r'^dashboard/', 'social.views.dashboard', name='dashboard'),\n url(r'^$', 'social.views.home', name='home'),\n)","sub_path":"social/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"58718532","text":"# -*- coding: utf-8 -*-\n\n#from bts.api import BTS\n#import json\n#import os\n\n\nclass BTSWallet(object):\n def __init__(self, client):\n self.bts_client = client\n self.pusher = None\n self.account = None\n self.balance = None\n client_info = self.bts_client.get_info()\n self.height = int(client_info[\"blockchain_head_block_num\"])\n self.accounts = None\n self.update_accounts()\n self.trx = None\n self.update_trx()\n\n def myPublish(self, topic, event):\n if self.pusher:\n self.pusher.emit(topic, event, namespace=\"\")\n\n def get_account(self):\n if self.account in self.accounts:\n return self.account\n else:\n return None\n\n def get_all_account(self):\n return self.accounts\n\n def set_account(self, account):\n if account in self.accounts:\n if account != self.account:\n self.account = account\n self.update_balance()\n return True\n else:\n return False\n\n def update_balance(self):\n account = self.get_account()\n if account is None:\n return None\n balance = self.bts_client.get_balance(account)\n if balance != self.balance:\n self.myPublish(\"balance\", balance)\n self.balance = balance\n\n def update_accounts(self):\n accounts = self.bts_client.list_accounts()\n if accounts != self.accounts:\n self.myPublish(\"account_list\", accounts)\n self.accounts = accounts\n\n def update_trx(self, start=0, end=-1):\n pass\n #trx = self.bts_client.get_trx()\n #if trx is not None:\n # self.trx.extend(trx)\n # self.myPublish(\"trx\", trx)\n\n def execute(self):\n client_info = self.bts_client.get_info()\n height_now = int(client_info[\"blockchain_head_block_num\"])\n if(height_now <= self.height):\n return\n\n self.update_balance()\n self.update_accounts()\n self.update_trx()\n self.height = height_now\n","sub_path":"btsbots_wallet/bts_wallet.py","file_name":"bts_wallet.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"243894128","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8\n#Author: Shinya Suzuki\n#Created: 2016-06-21\nimport numpy as np\nimport networkx as nx\nfrom functools import reduce\nfrom queue import Queue\n\ndef hmc_loss_score(true_matrix, pred_matrix, graph, root, label_list, cost_list, alpha=None, beta=None, average=\"micro\"):\n label_list = list(label_list)\n cost_list = np.array(cost_list)\n validate_root(graph, root)\n validate_coefficient(alpha, beta)\n validate_average(average)\n validate_list(label_list, cost_list)\n loss = get_loss(true_matrix, pred_matrix, graph, label_list, cost_list, alpha, beta, average)\n return loss\n\ndef get_loss(true_matrix, pred_matrix, graph, label_list, cost_list, alpha, beta, average):\n true_matrix_filled = fill_parent_node(true_matrix, label_list, graph)\n pred_matrix_filled = fill_parent_node(pred_matrix, label_list, graph)\n c_matrix = remove_matrix_redunduncy(true_matrix_filled-pred_matrix_filled, label_list, graph)\n\n if average == \"macro\":\n if alpha is None or beta is None:\n gamma = get_gamma(true_matrix_filled, average)\n alpha, beta = get_alpha_beta(gamma)\n fn_ci = np.where(c_matrix==1, np.c_[alpha]*cost_list, 0)\n fp_ci = np.where(c_matrix==-1, np.c_[beta]*cost_list, 0)\n loss_list = np.sum(fn_ci+fp_ci, axis=1)\n loss = np.mean(loss_list)\n elif average == \"micro\":\n if alpha is None or beta is None:\n gamma = get_gamma(true_matrix_filled, average)\n alpha, beta = get_alpha_beta(gamma)\n fn_ci = np.mean(np.where(c_matrix==1, cost_list, 0), axis=0)\n fp_ci = np.mean(np.where(c_matrix==-1, cost_list, 0), axis=0)\n loss = alpha * np.sum(fn_ci) + beta * np.sum(fp_ci)\n return loss\n\ndef fill_parent_node(input_matrix, label_list, graph):\n \"\"\"\n If child node will be penalized, the parent node's will be penalized\n \"\"\"\n child_index = get_child_index_list(graph, label_list)\n\n m = np.empty(input_matrix.shape, dtype=int)\n for j in range(nx.dag_longest_path_length(graph)):\n if j == 0:\n matrix = input_matrix\n else:\n matrix = m\n\n for i, c in enumerate(child_index):\n if c == []:\n v = matrix[:, i]\n else:\n v = np.where(np.any(matrix[:, c], axis=1)==1, 1, matrix[:,i])\n m[:, i] = v\n return m\n\ndef remove_matrix_redunduncy(matrix, label_list, graph):\n \"\"\"\n If parent node will be penalized, the node's penalty is removed\n \"\"\"\n parent_index = get_parent_index_list(graph, label_list)\n\n m = np.empty(matrix.shape, dtype=int)\n for i, p in enumerate(parent_index):\n if p == []:\n v = matrix[:, i]\n else:\n v = np.where(np.any(matrix[:, p], axis=1)==1, 0, matrix[:,i])\n m[:, i] = v\n return m\n\ndef get_gamma(true_matrix, average):\n if average == \"macro\":\n n_one = np.sum(true_matrix, axis=1)\n n_zero = true_matrix.shape[1] - n_one\n gamma = n_zero / n_one\n elif average == \"micro\":\n n_one = np.count_nonzero(true_matrix)\n n_zero = reduce(lambda x,y:x*y, true_matrix.shape) - n_one\n gamma = n_zero / n_one\n return gamma\n\ndef get_alpha_beta(gamma):\n beta = 2 / (1 + gamma)\n alpha = 2 - beta\n return (alpha, beta)\n\ndef get_parent_index_list(graph, label_list):\n \"\"\"\n Return parent index in label_list.\n To strugle with node that has more than one parent, result list is nested.\n \"\"\"\n parent_index = []\n for label in label_list:\n tmp = [label_list.index(parent) for parent in graph.successors(label)]\n parent_index.append(tmp)\n return parent_index\n\ndef get_child_index_list(graph, label_list):\n \"\"\"\n Return child node index in label_list.\n\n \"\"\"\n child_index = []\n for label in label_list:\n tmp = []\n for child in graph.predecessors(label):\n try:\n tmp.append(label_list.index(child))\n except ValueError:\n pass\n child_index.append(tmp)\n return child_index\n\ndef validate_root(graph, root):\n if len(graph.successors(root)) !=0:\n raise ValueError(\"Graph direction is wrong.\",\n \"This function requires bottom-up direction.\")\n return 0\n\ndef validate_list(label_list, cost_list):\n if len(label_list) != len(cost_list):\n raise ValueError(\"label_list length doesn't match length of cost_list\")\n return 0\n\ndef validate_coefficient(alpha, beta):\n if (alpha is None and beta is not None):\n raise ValueError(\"Beta is None in spite of alpha is used\")\n elif (alpha is not None and beta is None):\n raise ValueError(\"Alpha is None in spite of beta is used\")\n return 0\n\ndef validate_average(average):\n valid_average = [\"micro\", \"macro\"]\n if average not in valid_average:\n raise ValueError(\"Invalid input of average:{0}\".format(average))\n return 0\n\ndef get_node_cost(graph, node, cost_dict):\n \"\"\"\n get cost of input node along\n If parent node cost is not calculated, return 0.\n This node calculate again in the loop by DAG structure.\n \"\"\"\n cost = 0\n ancestors = graph.successors(node)\n if len(ancestors) == 0:\n return 1\n for ancestor in ancestors:\n try:\n cost += cost_dict[ancestor]/len(graph.predecessors(ancestor))\n except KeyError:\n return 0\n return cost\n\ndef get_cost_dict(graph, root):\n \"\"\"\n Return dictionary that has cost of each node in the graph\n \"\"\"\n cost_dict = {}\n q = Queue()\n cost_dict[root] = get_node_cost(graph, root, cost_dict)\n for predecessor in graph.predecessors(root):\n q.put(predecessor)\n while q.empty() == False:\n child = q.get()\n cost_dict[child] = get_node_cost(graph, child, cost_dict)\n child_predecessors = graph.predecessors(child)\n for predecessor in child_predecessors:\n q.put(predecessor)\n return cost_dict\n\ndef get_cost_list(graph, root, label_list):\n cost_dict = get_cost_dict(graph, root)\n cost_list = [cost_dict[node] for node in label_list]\n return cost_list\n\ndef main():\n pass\n\nif __name__ =='__main__':\n main()\n","sub_path":"hmc_loss/hmc_loss.py","file_name":"hmc_loss.py","file_ext":"py","file_size_in_byte":6234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"368986149","text":"import urllib.request, urllib.error,urllib.parse\nimport json\n\n\ndef sentiment_analysis(text):\n APPLICATION_ID = 'ceb14481'\n APPLICATION_KEY = 'c282071cd4dec2e9d2ecba28ed0e6cd7'\n def call_api(endpoint, parameters):\n url = 'https://api.aylien.com/api/v1/' + endpoint\n headers = {\n \"Accept\": \"application/json\",\n \"Content-type\": \"application/x-www-form-urlencoded\",\n \"X-AYLIEN-TextAPI-Application-ID\": APPLICATION_ID,\n \"X-AYLIEN-TextAPI-Application-Key\": APPLICATION_KEY\n }\n\n opener = urllib.request.build_opener()\n request = urllib.request.Request(url,urllib.parse.urlencode(parameters).encode('utf-8'), headers)\n response = opener.open(request)\n return json.loads(response.read().decode())\n\n parameters = {\"text\": text}\n sentiment = call_api(\"sentiment\",parameters)\n return sentiment['polarity']\n","sub_path":"text_process.py","file_name":"text_process.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"28484024","text":"import os\nimport ConfigParser\nfrom pp_utils import Monitor\n\n\nclass ResourceReader(object):\n config = None\n\n def __init__(self):\n self.mon = Monitor()\n self.mon.on()\n\n def read(self, pp_dir, pp_home, pp_profile):\n \"\"\"\n looks for resources.cfg in the profile, then in pp_home, then in the pi_presents directory.\n returns True if it finds the resources.cfg, otherwise returns False\n\n ::param pp_dir: the PiPresents directory\n ::param pp_home: the current pp_home directory\n ::param pp_profile: the current profile directory\n \"\"\"\n if not ResourceReader.config:\n profile_config = os.path.join(pp_profile, \"resources.cfg\")\n home_config = os.path.join(pp_home, \"resources.cfg\")\n pp_config = os.path.join(pp_dir, 'pp_home', \"resources.cfg\")\n\n # try inside profile\n if os.path.exists(profile_config):\n config_path = profile_config\n\n # try inside pp_home\n elif os.path.exists(home_config):\n config_path = home_config\n\n # try in the pi presents directory\n elif os.path.exists(pp_config):\n config_path = pp_config\n\n else:\n # throw an error if we can't find any config files\n self.mon.err(self, \"resources.cfg not found at {0}, {1} or {2}\".format(profile_config, home_config, pp_config))\n return False\n\n ResourceReader.config = ConfigParser.ConfigParser()\n ResourceReader.config.read(config_path)\n self.mon.log(self, \"resources.cfg read from \" + config_path)\n return True\n\n def get(self, section, item):\n if not ResourceReader.config.has_option(section, item):\n return False\n else:\n return ResourceReader.config.get(section, item)\n \n\n \n\n\n","sub_path":"pp_resourcereader.py","file_name":"pp_resourcereader.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"45543713","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\nfrom pdf2image import convert_from_path\nimport logging\nimport requests\nfrom io import BytesIO\nimport os\n\n\ndef get_logger(name):\n \"\"\"\n Add a StreamHandler to a logger if still not added and\n return the logger\n\n :param name: str\n :return: logger.logger object\n \"\"\"\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.propagate = 1 # propagate to parent\n console = logging.StreamHandler()\n logger.addHandler(console)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s [%(levelname)s] %(message)s')\n console.setFormatter(formatter)\n return logger\n\n\nLOGGER = get_logger(__name__)\nLOGGER.setLevel(logging.INFO)\n\n\ndef azure_ocr(pil_image):\n \"\"\"\n Return a string containing the text that has been ocred\n from a given page of a pdf.\n :param pil_image: PIL Image\n :return: str\n \"\"\"\n api_url = (\n \"https://westeurope.api.cognitive.microsoft.com/vision/v2.0/ocr\"\n )\n header = {\n 'Ocp-Apim-Subscription-Key': 'YOUR_KEY',\n 'Content-Type': 'application/octet-stream'\n }\n params = {'language': 'it'}\n try:\n img = pil_image\n bin_img = BytesIO()\n img.save(bin_img, format='JPEG')\n img.close()\n img_data = bin_img.getvalue()\n bin_img.close()\n r = requests.post(\n api_url,\n params=params,\n headers=header,\n data=img_data\n )\n r.raise_for_status()\n data = r.json()\n text = ''\n for item in data['regions']:\n for line in item['lines']:\n for word in line['words']:\n text += ' ' + word['text']\n return text\n except Exception as e:\n LOGGER.error(e)\n return ''\n\n\ndef pdf_splitter(stream_in):\n \"\"\"\n Split a PDF into a N files where N is the number of pages.\n :param stream_in: bytes\n :return: list of absolute file paths\n \"\"\"\n pdf_files = []\n reader = PdfFileReader(stream_in)\n writer = PdfFileWriter()\n for page in range(reader.getNumPages()):\n writer.addPage(reader.getPage(page))\n output_filename = 'tmp_{}.pdf'.format(page+1)\n with open(output_filename, 'wb') as out:\n writer.write(out)\n LOGGER.info('Created: {}'.format(output_filename))\n pdf_files.append(output_filename)\n return pdf_files\n\n\ndef delete_files(file_list):\n \"\"\"\n Delete all the files in a given dir\n :param file_list: list of absolute file paths\n :return: None\n \"\"\"\n \"\"\"\n Delete a sequence of files within a given list of paths\n \"\"\"\n try:\n for filename in file_list:\n LOGGER.info('Removed: {}'.format(filename))\n os.remove(filename)\n except OSError:\n pass\n\n\ndef extract_text_from_unsearchable_pdf(stream_in):\n \"\"\"\n Return a string containing the text present in a given PDF\n :param stream_in: bytes\n :return: str\n \"\"\"\n file_list = pdf_splitter(stream_in)\n text = \"\"\n for single_pdf in file_list:\n image_from_path = convert_from_path(single_pdf, fmt='jpg')[0]\n text += azure_ocr(image_from_path)\n delete_files(file_list)\n return text\n","sub_path":"misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"553624354","text":"\nfrom random import randint\nfrom typing import List\n\nclass Node:\n def __init__(self, d: int, next=None):\n self.data: int = d\n self.next: Node = None\n\n\n def __str__(self):\n return str(self.data)\n\n# singly linked list\nclass LinkedList:\n 'Simple singly linked list'\n def __init__(self, values: List[int] = None) -> None:\n self.head = None\n self.tail = None\n if values is not None:\n [self.append_to_tail(value) for value in values]\n\n def __str__(self) -> None:\n current: Node = self.head\n values = []\n while(current.next != None):\n values.append(str(current.data))\n current = current.next\n values.append(str(current.data))\n return (' -> ').join(values)\n\n def __len__(self):\n result = 0\n node = self.head\n while node:\n result += 1\n node = node.next\n return result\n \n def append_to_tail(self, d: int) -> None: \n if self.head is None:\n self.head = self.tail = Node(d)\n return\n self.tail.next = Node(d)\n self.tail = self.tail.next\n return \n \n def append_node_to_tail(self, n: Node) -> None:\n if self.head is None:\n self.head = self.tail = n\n return\n self.tail.next = n\n end = n\n while end.next:\n end = end.next\n self.tail = end\n return\n \n def append_to_head(self, d: int) -> None:\n if self.head is None:\n self.tail = self.head = Node(d)\n return\n new_node = Node(d)\n new_node.next = self.head\n self.head = new_node\n\n\n def length(self) -> int: \n current = self.head\n length = 0\n while(current.next!=None):\n length += 1\n current = current.next\n return length + 1\n\n def generate(self, n, min_value, max_value):\n 'generates n random values between min_value and max_value and adds them to the list'\n self.head = self.tail = None\n for _i in range(n):\n self.append_to_tail(randint(min_value, max_value))\n return self\n\n def contains(self, data: int):\n 'returns whether or not the list contains a given value'\n current = self.head\n while (current):\n if current.data == data:\n return True\n current = current.next\n return False\n\n\nimport unittest\n\nclass Test(unittest.TestCase):\n\n def test_linked_list(self):\n my_list = LinkedList([7])\n self.assertEqual(my_list.length(), 1)\n [my_list.append_to_tail(x) for x in [9,8]]\n print(str(my_list))\n self.assertEqual(my_list.length(), 3)\n\n def test_linked_list_with_multiple_values_in_constructor(self):\n my_new_list = LinkedList([4, 5, 6])\n print(str(my_new_list))\n self.assertEqual(my_new_list.length(), 3)\n\n def test_linked_list_generator(self):\n my_new_list = LinkedList()\n my_new_list.generate(4, 1, 9)\n print(str(my_new_list))\n self.assertEqual(my_new_list.length(), 4)\n\n def test_contains(self):\n my_list = LinkedList([3, 4, 5])\n self.assertEqual(my_list.contains(5), True)\n self.assertEqual(my_list.contains(6), False)\n\n def test_append_to_head(self):\n my_list = LinkedList([3, 4, 5])\n my_list.append_to_head(Node(2))\n self.assertEqual(True, my_list.contains(2))\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"chapter4/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"74288040","text":"import occ_grid\nimport controller\nimport movement\n\ndef placeEntities(grid, Robbers, Bank, moneys):\n occ_grid.set_cell(grid, Robbers.position, occ_grid.GATHERER)\n occ_grid.set_cell(grid, Bank.position, occ_grid.GENERATOR)\n for x in range(0, len(moneys)):\n occ_grid.set_cell(grid, moneys[x].position, occ_grid.RESOURCE)\n \ndef isValidPosition(Robbers):\n if 0 <= Robbers.position.x < 60 and 0 <= Robbers.position.y < 60:\n return True\n else:\n return False\n\ndef updateEntities(grid, robbers, Bank, Money):\n controller.controls(robbers, Money)\n movement.robber_trail(grid)\n controller.start(robbers)\n movement.makeRobberGo(robbers, Money)\n if isValidPosition(robbers) == True:\n occ_grid.set_cell(grid, robbers.position, occ_grid.GATHERER)","sub_path":"hw4/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"375608149","text":"# PygLatin Translation version 1.0\npyg = 'ay'\n# This created the \"ay\" index.\noriginal = input('Enter a word:')\n\nif len(original) > 0 and original.isalpha():\n print (original)\n# This is to test if the person actually typed something that's not a numeral in the field.\nelse:\n print ('Please type in a word, would ya?')\n#Now to the coding: \nword = original.lower() # make it lowecase\nfirst = word[0] #\"original\" is in here now\nnew_word = word+first+pyg\nnew_word = new_word[1:len(new_word)]\n# this changed the \"word+first+pyg\" to the new deal.\nprint (new_word)\n","sub_path":"Py files/ISALPHA.py","file_name":"ISALPHA.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"228599346","text":"#!/usr/local/bin/python\n\n# Just messing around with trying to create a password list\n# The intent was to be able to uppercase x number of letters in a word\n# I'm sure there is a way to do this with rules in john but I am struggling\n# with trying to figure out how to do this.\n\n\nwordlist = ['python', 'perfect', 'street', 'especially']\n\n\ndef to_uppercase(word):\n converted = []\n length = len(word)\n for x in range(length):\n for y in range(length):\n mySet = list((x, y))\n if x != y:\n new = (\"\".join(c.upper() if i in mySet else c for i,\n c in enumerate(word)))\n converted.append(new)\n return(converted)\n\n\nfor word in wordlist:\n converted_words = to_uppercase(word)\n print(\"Word:{:<10} Length:{:<5} Converted:{}\".format(word,len(word),len(converted_words)))\n print(\" from: {} - {}\".format( converted_words[0], converted_words[-1]))\n","sub_path":"testpassword.py","file_name":"testpassword.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"607162438","text":"\"\"\"\nGiven an array of integers and an integer k, return true if and only if there are two distinct indices i and j in the array \nsuch that nums[i] = nums[j] and the difference between i and j is at most k.\n\"\"\"\n\nclass Solution:\n # @param {integer[]} nums\n # @param {integer} k\n # @return {boolean}\n def containsNearbyDuplicate(self, nums, k):\n N = len(nums)\n if N < 2 or k == 0:\n return False\n res = False\n odict = collections.OrderedDict()\n for x in nums:\n if len(odict) == k+1:\n odict.popitem(False)\n if x in odict:\n if odict[x] > 1:\n return False\n odict[x] += 1\n res = True\n else:\n odict[x] = 1\n return res\n\n \n","sub_path":"leetcode/Contains Duplicate II.py","file_name":"Contains Duplicate II.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"235861768","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 26 10:17:00 2019\n\n@author: BeccaYiu\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import roc_curve, auc\n\n\n##Pre-processing\n#Reading in and pre-processing data\ndf = pd.read_csv(\"Telco_churn.csv\")\ndf.TotalCharges = pd.to_numeric(df.TotalCharges, errors = 'coerce')\ndf.TotalCharges.fillna(0, inplace = True)\n#Binary value\nconvert_columns = ['Churn', \n 'Partner',\n 'Dependents',\n 'PhoneService',\n 'PaperlessBilling']\nfor item in convert_columns:\n df[item].replace(to_replace = \"Yes\", value = 1, inplace = True)\n df[item].replace(to_replace = \"No\", value = 0, inplace = True)\nuser_id = df.customerID #Retrieve ID\ndf = df.iloc[:, 1:]#Remove ID\ndf = pd.get_dummies(df)#Creating dummy variables for modeling\n\n\nlabels = pd.Series(df['Churn'])\nfeatures = df.drop('Churn', axis = 1)\nfeatures_name = list(features.columns)\nfeatures = pd.DataFrame(features)\n\n##Modeling\n#Splitting data\ntrain_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.25, random_state = 37)\n\n#Random forest\nrf = RandomForestClassifier()\n\n#Parameter-tuning\nn_estimators = [int(x) for x in np.linspace(100, 1000, num = 20)]\nmax_features = ['auto', 'sqrt']\nrandom_grid_rf = {'n_estimators': n_estimators,\n 'max_features': max_features}\n\nrf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid_rf, n_iter = 10, cv = 3, random_state = 42)\nrf_random.fit(train_features, train_labels)\nbest_rf_random = rf_random.best_estimator_\nbest_rf_random.fit(train_features, train_labels)\nrf_labels = best_rf_random.predict(test_features)\n\n#Evaluating\nfp_rate, tp_rate, thresholds = roc_curve(test_labels, rf_labels)\nroc_auc = auc(fp_rate, tp_rate)\n\n#Printing Results\nchurn_user = pd.DataFrame(rf_labels, index = test_labels.index)\nchurn_id = user_id.iloc[churn_user.index]\nprint(churn_id)\nprint(roc_auc)\n","sub_path":"prediction_model.py","file_name":"prediction_model.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"476006133","text":"from os.path import isdir, dirname, realpath, join\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\n\ndef random_string_gen(range1=12):\n import string\n import random\n return ''.join(random.choice(string.ascii_uppercase) for _ in range(range1))\n\n# # setup variables and functions\nrand_str = random_string_gen()\nfiles_path = join(dirname(realpath(__file__)), '')\nlist_files = ['00001.txt', '00001_1.txt', '00002_1.txt2', '00002_2.txt', '00002.txt',\n '00004.txt', '00002.pt', '00004.pt']\nmkdir_msg = 'Test of {} is postponed, since the random path exists.'\n# format a set-up path (attention, the parent path(s) of this might be hardcoded).\ntest_p_parent = join(files_path, 'test_mkdirs', rand_str, '')\nif isdir(test_p_parent):\n print(mkdir_msg.format('tests_general_testing_path'))\nelse:\n rand_str = random_string_gen(range1=22)\n test_p_parent = join(files_path, rand_str, '')\ntest_p = join(test_p_parent, 'testing', 'files', '')\n\n\ndef aux_require_file_existence(filenames, path):\n # Aux function: creates iteratively the files requested in the path.\n if not isdir(path):\n assert isdir(path) # # temp for debugging.\n return\n for file1 in filenames:\n # http://stackoverflow.com/a/12654798/1716869\n open(path + file1, 'a').close()\n\n\ndef bytes2str(info):\n \"\"\" Converts bytes to string. \"\"\"\n if isinstance(info, bytes):\n info = info.decode(\"utf-8\")\n return info\n\n# # grigoris, this file contains few common functions, reference variables that\n# # are required in all the files.\n","sub_path":"research_pyutils/tests/tests_base.py","file_name":"tests_base.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"169201180","text":"from datetime import datetime as dt\nimport re\nfrom .botmodule import BotModule\nfrom .twitchtools import parse_wrapper\n\nclass ConnectivityMonitor(BotModule):\n \"\"\"Module for monitor health of connection to chat\"\"\"\n\n def __init__(self, name):\n \"\"\"Init function for ConnectivityMonitor\"\"\"\n BotModule.__init__(self, name)\n self.config = self.config_manager.parse_file('twircbot/config/defaultConnectivityMonitor.config')\n self.uptime_string = '\\\\' + self.config['invoke_string'] + self.config['uptime_suffix']\n self.bornTime = dt.utcnow()\n self.last_data = dt.utcnow()\n\n @parse_wrapper\n def parse(self, data):\n \"\"\"Parse chat data and log it\"\"\"\n self.last_data = dt.utcnow()\n\n if (data.type == 'privmsg') or (data.type == 'whisper'):\n uptime_match = re.search(self.uptime_string, data.content)\n\n if uptime_match:\n uptime_message = 'Uptime: ' + self.pretty_timedelta(self.lifetime)\n self.reply(data, uptime_message)\n\n def check_timers(self):\n \"\"\"Function to check timers\"\"\"\n\n now = dt.utcnow()\n inputDelta = now - self.last_data\n self.lifetime = now - self.bornTime\n\n if inputDelta.seconds > self.config['reconnect_timer']:\n self.host.reconnect = True\n self.last_data = now\n else:\n self.host.reconnect = False\n\n if self.lifetime.seconds > self.config['stayalive_timer'] and self.config['stayalive_timer'] > 0:\n self.host.stayAlive = False\n\n def pretty_timedelta(self, t_delta):\n \"\"\"Turn a timedelta object into a nicely formatted string\"\"\"\n (hours, hours_remainder) = (int(t_delta.seconds / 3600), t_delta.seconds % 3600)\n (minutes, seconds) = (int(hours_remainder / 60), hours_remainder % 60)\n \n # Round to the nearest second\n if t_delta.microseconds >= 0.5:\n seconds += 1\n\n pretty_string = ''\n\n if t_delta.days > 0:\n pretty_string += '{0!s} days, '.format(t_delta.days)\n\n # Make sure values are correcty padded\n if hours >= 10:\n hours = str(hours)\n else:\n hours = '0' + str(hours)\n\n if minutes >= 10:\n minutes = str(minutes)\n else:\n minutes = '0' + str(minutes)\n\n if seconds >= 10:\n seconds = str(seconds)\n else:\n seconds = '0' + str(seconds)\n\n pretty_string += '{0}:{1}:{2} [hh:mm:ss]'.format(hours, minutes, seconds)\n\n return pretty_string\n","sub_path":"twircbot/connectivitymonitor.py","file_name":"connectivitymonitor.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"152242904","text":"import sys,argparse,glob\n\nimport PrepareData\nimport PlotTools\nfrom Configuration import *\nfrom HelperTools import *\nfrom TrainDNN import TrainDNN,TrainRNN\nfrom LoadDNN import LoadDNN\nfrom KFold_xval import doKFold\nfrom ConfigClass import ConfigClass\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Deep Neural Network Training and testing Framework')\n group = parser.add_mutually_exclusive_group()\n\n # Main Job to Run\n group.add_argument('-c', '--ConvertRootToPD', action=\"store_true\", help=\"Takes flat ROOT Ntuples as input and outputs Panda Dataframes\")\n group.add_argument('-p', '--CreateTrainTestPD', action=\"store_true\", help=\"Mixing and shuffling of Panda Dataframes Signal and Background MC samples\")\n group.add_argument('-t', '--Train', action=\"store_true\", help=\"Train a DNN model\")\n group.add_argument('-l', '--LoadTrainedModel', action=\"store_true\", help=\"Will load an existing trained model\")\n group.add_argument('--ValidPlotsFromTrainTestDF', action=\"store_true\", help=\"plot input variables\")\n group.add_argument('--ValidPlotsDataMC', action=\"store_true\", help=\"plot Data/MC variables\")\n group.add_argument('-d', '--printParamSet', action=\"store_true\", help=\"Dump the available HyperParameter sets\")\n group.add_argument('-D', '--printSetup', action=\"store_true\", help=\"Dump the setup\")\n\n #Options on Convert ROOT Files to Pandas\n parser.add_argument('--inROOTFiles', required='--ConvertRootToPD' in sys.argv, help='input ROOT file list, comma separated', type=str)\n parser.add_argument('--InputMLNtuplePath', required='--ConvertRootToPD' in sys.argv, help='input ROOT file directory', type=str)\n\n #Options on Create the mixed S+B Pandas file to be used as input for training and testing\n parser.add_argument('--inSignalFiles', required=('--ValidPlotsDataMC' in sys.argv) or '--CreateTrainTestPD' in sys.argv or '-p' in sys.argv, default='None', help='input Signal file list, comma separated',type=str)\n parser.add_argument('--inBackgrFiles', required=('--ValidPlotsDataMC' in sys.argv) or '--CreateTrainTestPD' in sys.argv or '-p' in sys.argv, default='None', help='input Background file list, comma separated',type=str)\n parser.add_argument('--inDataFiles' , required=('--ValidPlotsDataMC' in sys.argv), default='None', help='input Data file list, comma separated',type=str)\n\n #Options on Training\n parser.add_argument('-y', '--hyperparamset', default=\"-1\", required='--Train' in sys.argv or '-t' in sys.argv, help=\"Select of the parameter set to train with. With -1 you get a random one out of all the permutations\",type=int)\n parser.add_argument('-k', '--doKFold', action=\"store_true\", help=\"Do K-Fold validation\")\n parser.add_argument('-M', '--modelPrefixName', default=\"llqqDNN\", help=\"output prefix of the model name\",type=str)\n parser.add_argument('--useEqualSizeSandB', action=\"store_true\", help=\"Use exactly 50:50 N events for sig:bkg\")\n\n #Options on Testing an existing Trained Model\n parser.add_argument('-L', '--TrainedModelPath', required='--LoadTrainedModel' in sys.argv or '-l' in sys.argv, help=\"Will load an existing trained model from specific path\",type=str)\n parser.add_argument('--doConfusionMatrix', action=\"store_true\", help=\"\")\n parser.add_argument('--doEfficiency', action=\"store_true\", help=\"\")\n parser.add_argument('--doScore', action=\"store_true\", help=\"\")\n parser.add_argument('--doROC', action=\"store_true\", help=\"\")\n parser.add_argument('--unblind', action=\"store_true\", help=\"\")\n parser.add_argument('--massPointToTest', help=\"When running the param network this defines which mass point in GeV will be tested\")\n parser.add_argument('--testModelOnFullSamples',action=\"store_true\", help=\"\")\n\n # general Common options\n parser.add_argument('--PreselectionCommand' , default=\"\", help='String which will be translated to python command when creating the train/test PDs and filter the initial PDs according to it. E.g inPanda[(inPanda.isMerged == 1 ) & (inPanda.isVBFevent == 0)]',type=str)\n parser.add_argument('--PDPath', required=('--printParamSet' not in sys.argv and '-d' not in sys.argv and '-D' not in sys.argv), help='output directory containing Pandas DF', type=str)\n parser.add_argument('-N', '--MixPD_TrainTestTag', required=('--ValidPlotsFromTrainTestDF' in sys.argv) or ('--CreateTrainTestPD' in sys.argv or '-p' in sys.argv) or ('--Train' in sys.argv or '-t' in sys.argv) or ('--LoadTrainedModel' in sys.argv or '-l' in sys.argv), default=\"Mix\", help=\"Name of the PD train/test file\",type=str)\n parser.add_argument('-m', '--mode', required=('--Train' in sys.argv or '-t' in sys.argv) or ('--LoadTrainedModel' in sys.argv or '-l' in sys.argv), default=\"binary\", choices=['binary', 'multi', 'param', 'SimpleRNN', 'CNN', 'SVM'], help=\"Decide whether to run on binary or multi classification mode or in the parameterized DNN mode\")\n parser.add_argument('-o', '--outBaseDir', default=\"ModelOutput\",help=\"directory to output the training/testing results\",type=str)\n\n #Options on Validation\n parser.add_argument('--VarPlotPath',required=('--ValidPlotsFromTrainTestDF' in sys.argv) or ('--ValidPlotsDataMC' in sys.argv), default=\"VarPlotPath\",help=\"directory to output the validation plots\",type=str)\n\n\n #store the user defined options\n args = parser.parse_args()\n inROOTFiles = []\n\n if args.ConvertRootToPD:\n inROOTFiles = (args.inROOTFiles).split(',')\n\n InputFilesSB = {}\n if args.CreateTrainTestPD or args.ValidPlotsDataMC or args.LoadTrainedModel :\n InputFilesSB = {\n 'Signal': (args.inSignalFiles).split(','),\n 'Background': (args.inBackgrFiles).split(','),\n 'Data': (args.inDataFiles).split(',')\n }\n\n setupClient = ConfigClass(\n runMode = args.mode,\n HyperParamSet = args.hyperparamset,\n PrintParamSet = args.printParamSet,\n printSetup = args.printSetup,\n ValidPlotsFromTrainTestDF = args.ValidPlotsFromTrainTestDF,\n ValidPlotsDataMC = args.ValidPlotsDataMC,\n VariablesToPlot = VariablesToPlot,\n VarPlotPath = args.VarPlotPath,\n\n ConvertRootToPD = args.ConvertRootToPD,\n InputMLNtuplePath= args.InputMLNtuplePath,\n InputROOTFiles = inROOTFiles,\n rootBranchSubSample=rootBranchSubSample,\n\n CreateTrainTestPD = args.CreateTrainTestPD,\n InputFilesSB = InputFilesSB,\n MixPD_TrainTestTag = args.MixPD_TrainTestTag,\n\n Train = args.Train,\n DoKFold = args.doKFold,\n Dropout = Dropout,\n Params = Params,\n ScanParams = ScanParams,\n\n LoadTrainedModel = args.LoadTrainedModel,\n TrainedModelPath = args.TrainedModelPath,\n\n OutBaseDir = args.outBaseDir,\n modelPrefixName = args.modelPrefixName,\n useEqualSizeSandB = args.useEqualSizeSandB,\n\n PDPath = args.PDPath,\n InputDNNVariables = InputDNNVariables,\n massPoint = args.massPointToTest,\n MaskValue = -99.,\n PreselectionCuts = args.PreselectionCommand,\n doConfusionMatrix = args.doConfusionMatrix,\n doEfficiency = args.doEfficiency,\n doScore = args.doScore,\n doROC = args.doROC,\n unblind = args.unblind,\n testModelOnFullSamples = args.testModelOnFullSamples\n )\n\n if setupClient.ConvertRootToPD:\n print (Fore.BLUE+\"--------------------------\")\n print (Back.BLUE+' CONVERTING ROOT-->PANDAS ')\n print (Fore.BLUE+\"--------------------------\")\n print ('{:<45} {:<15}'.format(\"Input Flat Ntuples directory\",Fore.GREEN+setupClient.InputMLNtuplePath))\n print ('{:<45} {:<15}'.format('Output Pandas Dataframe directory',Fore.GREEN+ setupClient.PDPath), checkCreateDir(setupClient.PDPath) )\n print ('{:<45} {:<15}'.format('Branches to keep from ROOT file',Fore.GREEN+str(setupClient.rootBranchSubSample)))\n PrepareData.convertToPanda(setupClient)\n elif setupClient.CreateTrainTestPD:\n print (Fore.BLUE+\"--------------------------\")\n print (Back.BLUE+' CREATING TRAIN/TEST PDs ')\n print (Fore.BLUE+\"--------------------------\")\n print ('{:<45} {:<15}'.format('InputFilesSB',Fore.GREEN+str(InputFilesSB)))\n print ('{:<45} {:<15}'.format('I/O Pandas Dataframe directory',Fore.GREEN+ setupClient.PDPath), checkCreateDir(setupClient.PDPath) )\n print ('{:<45} {:<15}'.format('PD Train/Test Name Tag',Fore.MAGENTA + setupClient.MixPD_TrainTestTag))\n print ('{:<45} {:<15}'.format('PreselectionCuts',Fore.MAGENTA + setupClient.PreselectionCuts))\n PrepareData.preparePandas(setupClient)\n elif setupClient.Train:\n print (Fore.BLUE+\"--------------------------\")\n print (Back.BLUE+\" TRAIN A DNN MODEL \")\n print (Fore.BLUE+\"--------------------------\")\n print ('{:<45} {:<15}'.format('I/O Pandas Dataframe directory',Fore.GREEN+ setupClient.PDPath), checkCreateDir(setupClient.PDPath) )\n print ('{:<45} {:<15}'.format('Train Ouput top directory',Fore.GREEN + setupClient.OutBaseDir), checkCreateDir(setupClient.OutBaseDir) )\n print ('{:<45} {:<15}'.format('RunMode',Fore.GREEN + setupClient.runMode) )\n print ('{:<45} {:<15}'.format('PD Train Name Tag',Fore.GREEN + setupClient.MixPD_TrainTestTag))\n print ('{:<45} {:<15}'.format('Do KFold validation',(Fore.GREEN if setupClient.DoKFold else Fore.RED)+str(setupClient.DoKFold)) )\n pickModelParamSet(setupClient)\n if setupClient.runMode == 'SimpleRNN':\n modelMetricsHistory = TrainRNN(setupClient)\n PlotTools.plotTrainPerformance(setupClient,modelMetricsHistory)\n else:\n modelMetricsHistory = TrainDNN(setupClient)\n PlotTools.plotTrainPerformance(setupClient,modelMetricsHistory)\n if setupClient.DoKFold:\n doKFold(setupClient)\n elif setupClient.LoadTrainedModel:\n print (Fore.BLUE+\"--------------------------\")\n print (Back.BLUE+\" TEST A TRAINED DNN MODEL \")\n print (Fore.BLUE+\"--------------------------\")\n print ('{:<45} {:<15}'.format('I/O Pandas Dataframe directory',Fore.GREEN+ setupClient.PDPath), checkCreateDir(setupClient.PDPath) )\n print ('{:<45} {:<15}'.format('Test Ouput top directory',Fore.GREEN + setupClient.OutBaseDir), checkCreateDir(setupClient.OutBaseDir) )\n print ('{:<45} {:<15}'.format('RunMode',Fore.GREEN + setupClient.runMode) )\n print ('{:<45} {:<15}'.format('PD Test Name Tag',Fore.GREEN + setupClient.MixPD_TrainTestTag))\n print ('{:<45} {:<15}'.format('Load Model from path', Fore.GREEN+setupClient.TrainedModelPath) )\n print ('{:<45} {:<15}'.format('Create the Confusion Matrix', Fore.GREEN+str(setupClient.doConfusionMatrix)) )\n print ('{:<45} {:<15}'.format('Calculate Signal and Background Efficienies', Fore.GREEN+str(setupClient.doEfficiency)) )\n print ('{:<45} {:<15}'.format('Plot the NN-Score from Train/Test samples', Fore.GREEN+str(setupClient.doScore)) )\n print ('{:<45} {:<15}'.format('Create the ROC curve', Fore.GREEN+str(setupClient.doROC)) )\n print ('{:<45} {:<15}'.format('Data unblind', Fore.GREEN+str(setupClient.unblind)) )\n LoadDNN(setupClient)\n elif setupClient.ValidPlotsFromTrainTestDF:\n print (Fore.BLUE+\"--------------------------\")\n print (Back.BLUE+\" VALIDATION PLOTS FROM DF \")\n print (Fore.BLUE+\"--------------------------\")\n print ('{:<45} {:<15}'.format('Plots Save directory',Fore.GREEN + setupClient.VarPlotPath),checkCreateDir(setupClient.VarPlotPath) )\n PlotTools.plotDFs(setupClient)\n elif setupClient.ValidPlotsDataMC:\n print (Fore.BLUE+\"--------------------------\")\n print (Back.BLUE+\" DATA/MC PLOTS FROM DF \")\n print (Fore.BLUE+\"--------------------------\")\n print ('{:<45} {:<15}'.format('Validation Plots Save directory',Fore.GREEN + setupClient.VarPlotPath),checkCreateDir(setupClient.VarPlotPath) )\n PlotTools.plotDataMC(setupClient)\n elif args.printSetup:\n setupClient.printSetupParameters()\n elif args.printParamSet:\n printScanParamCombos(setupClient)\n\n print('')\n","sub_path":"RunML.py","file_name":"RunML.py","file_ext":"py","file_size_in_byte":12548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"36652758","text":"# Copyright 2017 Google Inc.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Data providers for deepvariant images.\n\ntf.slim datasets and data providers for standard DeepVariant datasets for\ntraining and evaluating germline calling accuracy.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom deepvariant import tf_utils\nfrom deepvariant.core import io_utils\nfrom deepvariant.protos import deepvariant_pb2\n\nslim = tf.contrib.slim\n\n# Number of classes represented in the data set. The three classes are\n# homozygous reference (0), heterozygous (1) and homozygous alternative (2).\nDEFAULT_NUM_CLASSES = 3\n\n\ndef make_training_batches(dataset, model, batch_size):\n \"\"\"Provides batches of pileup images from this dataset.\n\n Creates a DataSetProvider for dataset, extracts image, label, and\n truth_variant from it, preprocesses each image with model.preprocess_image()\n and finally batches these up.\n\n Args:\n dataset: a slim DataSet we want to turn into batches. Must provide data\n items \"image\", \"label\", and \"truth_variant\".\n model: a DeepVariantModel to use for preprocessing each image before\n batching.\n batch_size: the number of images in each batch.\n\n Returns:\n images: 4-D float Tensor of a batch of images with shape\n (batch_size, height, width, 3).\n labels: a 1-D integer Tensor shape (batch_size,) containing the labels for\n each image, in the same order.\n encoded_truth_variants: Tensor of strings with shape (batch_size,).\n Each element of this tensor is a byte-encoded learning.genomics.v1.Variant\n protobuf in the same order as images and one_hot_labels.\n \"\"\"\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n common_queue_capacity=2 * batch_size,\n common_queue_min=batch_size,\n reader_kwargs={\n 'options': io_utils.make_tfrecord_options(dataset.data_sources)\n })\n # Load the data.\n image, label, truth_variant = data_provider.get(\n ['image', 'label', 'truth_variant'])\n image = model.preprocess_image(image)\n return tf.train.shuffle_batch(\n [image, label, truth_variant],\n batch_size=batch_size,\n num_threads=4,\n capacity=5000,\n # redacted\n min_after_dequeue=min(1000, dataset.num_samples))\n\n\n# redacted\nclass DeepVariantDataSet(object):\n \"\"\"All of the information needed to create and use a DeepVariant dataset.\"\"\"\n\n def __init__(self,\n name,\n source,\n num_examples,\n num_classes=DEFAULT_NUM_CLASSES,\n tensor_shape=None):\n \"\"\"Creates a dataset.\n\n Args:\n name: str. The name of this dataset. Used to refer to this dataset on\n the command line.\n source: str or list[str]. A file path pattern or a comma-separated list of\n file path patterns pointing to TF.Example PIC images containing the data\n for this dataset.\n num_examples: A positive integer. The number of examples in this dataset.\n num_classes: A positive integer. The number of classes in the labels of\n this dataset. Currently defaults to DEFAULT_NUM_CLASSES.\n tensor_shape: None (whihc means we get the shape from the first example in\n source), or list of int [height, width, channel] for testing.\n \"\"\"\n self.name = name\n self.source = source\n self.num_examples = num_examples\n self.num_classes = num_classes\n if tensor_shape:\n self.tensor_shape = tensor_shape\n else:\n self.tensor_shape = tf_utils.get_shape_from_examples_path(source)\n\n def __str__(self):\n return ('DeepVariantDataSet(name={}, source={}, num_examples={}, '\n 'num_classes={}').format(self.name, self.source, self.num_examples,\n self.num_classes)\n\n def get_slim_dataset(self):\n \"\"\"Returns a Slim dataset for this dataset.\n\n Returns:\n A tf.slim.dataset.Dataset with the data in this DataSet.\n \"\"\"\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string),\n 'variant/encoded': tf.FixedLenFeature((), tf.string),\n 'truth_variant/encoded': tf.FixedLenFeature((), tf.string),\n 'image/format': tf.FixedLenFeature((), tf.string),\n 'label': tf.FixedLenFeature((1,), tf.int64),\n 'locus': tf.FixedLenFeature((), tf.string),\n }\n items_to_handlers = {\n 'image':\n slim.tfexample_decoder.Image(\n 'image/encoded', 'image/format', shape=self.tensor_shape),\n 'label':\n slim.tfexample_decoder.Tensor('label', shape=[]),\n 'locus':\n slim.tfexample_decoder.Tensor('locus', shape=[]),\n 'variant':\n slim.tfexample_decoder.Tensor('variant/encoded', shape=[]),\n 'truth_variant':\n slim.tfexample_decoder.Tensor('truth_variant/encoded', shape=[]),\n }\n\n # redacted\n # shuffled correctly in training.\n return slim.dataset.Dataset(\n data_sources=self.source.split(','),\n reader=tf.TFRecordReader,\n decoder=slim.tfexample_decoder.TFExampleDecoder(keys_to_features,\n items_to_handlers),\n num_samples=self.num_examples,\n items_to_descriptions=None)\n\n\ndef _get_dataset(name, path, num_examples, tensor_shape=None):\n \"\"\"Creates a dataset with a specified name a path to the source file.\n\n Args:\n name: String. The name of the dataset.\n path: String. The path to the source file of the dataset.\n num_examples: int. The number of examples in the dataset.\n tensor_shape: None, or list of int [height, width, channel] for testing.\n\n Returns:\n A DeepVariantDataSet where name is the specified name, and the source is the\n path.\n\n Raises:\n ValueError: if name and path are not specified.\n \"\"\"\n if not name:\n raise ValueError('Name must not be None', name)\n if not path:\n raise ValueError('Path must not be None', path)\n\n return DeepVariantDataSet(\n name=name,\n source=path,\n num_examples=num_examples,\n tensor_shape=tensor_shape)\n\n\ndef get_dataset(dataset_config_filename, tensor_shape=None):\n \"\"\"Creates a DeepVariantDataSet from the dataset config file.\n\n Args:\n dataset_config_filename: String. Path to the dataset config pbtxt file.\n tensor_shape: None, or list of int [height, width, channel] for testing.\n\n Returns:\n A DeepVariantDataSet from the specified split in the dataset_config file.\n\n Raises:\n ValueError: if the dataset config doesn't have the necessary information.\n \"\"\"\n\n def read_dataset_config(dataset_config_pbtxt_filename):\n with tf.gfile.GFile(dataset_config_pbtxt_filename) as f:\n return text_format.Parse(f.read(),\n deepvariant_pb2.DeepVariantDatasetConfig())\n\n dataset_config = read_dataset_config(dataset_config_filename)\n\n if not dataset_config.name:\n raise ValueError('dataset_config needs to have a name')\n\n if not dataset_config.tfrecord_path:\n raise ValueError('The dataset in the config {} does not have a '\n 'tfrecord_path.'.format(dataset_config_filename))\n\n # redacted\n # of num_examples.\n if not dataset_config.num_examples:\n raise ValueError('The dataset in the config {} does not have a '\n 'num_examples.'.format(dataset_config_filename))\n\n return _get_dataset(\n dataset_config.name,\n dataset_config.tfrecord_path,\n dataset_config.num_examples,\n tensor_shape=tensor_shape)\n\n\ndef write_dataset_config_to_pbtxt(dataset_config, dataset_config_filename):\n \"\"\"Writes the dataset_config to a human-readable text format.\n\n Args:\n dataset_config: DeepVariantDatasetConfig. The config to be written out.\n dataset_config_filename: String. Path to the output pbtxt file.\n \"\"\"\n with tf.gfile.GFile(dataset_config_filename, mode='w') as writer:\n writer.write(text_format.MessageToString(dataset_config))\n","sub_path":"deepvariant/data_providers.py","file_name":"data_providers.py","file_ext":"py","file_size_in_byte":9471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"591218979","text":"from flask import Flask, flash, request, redirect, url_for, render_template, send_from_directory\nfrom flask import session\nfrom flask import jsonify\nfrom flask import json\nfrom werkzeug.utils import secure_filename\nimport os\n\nfrom user import *\nfrom course import *\n\nUPLOAD_FOLDER = 'static/uploads/'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n\napp = Flask(__name__, static_url_path='/public')\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\napp.secret_key = 'your secret key'\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\n if request.method == 'POST':\n\n user = str(request.form['user'])\n password = str(request.form['password'])\n\n print(\"login\")\n\n access = authUser(user, password)\n msg = \"credenciales incorrectas\"\n\n if access:\n session['loggedin'] = True\n session['id'] = access[0]\n session['username'] = access[1]\n\n return redirect(f\"/home\")\n else:\n return render_template(\"index.html\", msg=msg)\n\n\n return render_template(\"index.html\")\n\n\n@app.route(\"/home\")\ndef home():\n\n if 'loggedin' in session:\n\n Course = getAllCourses()\n\n\n return render_template(\"home.html\", username=session[\"username\"], course=Course)\n \n return redirect(f\"/\")\n\n@app.route(\"/logout\")\ndef logout():\n session.pop('loggedin', None)\n session.pop('id', None)\n session.pop('username', None)\n\n return redirect(f\"/\")\n\n@app.route(\"/catalogoCursos\", methods=['GET', 'POST'])\ndef catalogoCursos():\n\n if request.method == \"POST\":\n title = str(request.form['title'])\n category = int(request.form['category'])\n description = str(request.form['description'])\n\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n\n file = request.files['file']\n\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # print(\"%s %s %s\"%(title, category, description))\n \n createCourse(title, category, description, \"public/uploads/\"+filename)\n\n\n return redirect(f\"/catalogoCursos\")\n\n Course = getAllCourses()\n\n\n return render_template(\"CatalogoCursos.html\", course=Course)\n\n\n\n\n\n@app.route(\"/deleteMaterial\")\ndef deleteMaterial():\n\n mid = str(request.args.get(\"id\"))\n deleteMaterialBy(mid)\n\n return redirect(f\"/material\")\n\n\n@app.route(\"/deleteModule\")\ndef deleteModule():\n\n mid = str(request.args.get(\"id\"))\n deleteModuleBy(mid)\n\n return redirect(f\"/modules\")\n\n\n\n\n@app.route(\"/deleteCourse\")\ndef deleteCourse():\n\n mid = str(request.args.get(\"id\"))\n deleteCourseBy(mid)\n\n return redirect(f\"/catalogoCursos\")\n\n\n@app.route('/material', methods=['GET', 'POST'])\ndef material():\n if request.method == 'POST':\n\n module = str(request.form['module'])\n title = str(request.form['title'])\n video = str(request.form['video'])\n\n print(\"%s %s %s\"%(module, title, video))\n done = createMaterial(module, title, video)\n print(done)\n\n return redirect(f\"/material\")\n\n Material = getAllMaterial()\n\n return render_template(\"Material.html\", material=Material)\n\n@app.route('/modules', methods=['GET', 'POST'])\ndef modules():\n if request.method == 'POST':\n\n course = str(request.form['course'])\n title = str(request.form['title'])\n price = str(request.form['price'])\n\n\n # print(\"%s %s %s\"%(course, title, price))\n done = createModule(course, title, price)\n # print(done)\n\n return redirect(f\"/modules\")\n\n Modules = getAllModules()\n\n return render_template(\"Modules.html\", modules=Modules)\n\n\n@app.route(\"/updateModules\", methods=['POST'])\ndef updateModules():\n\n if request.method == 'POST':\n\n title = str(request.form['utitle'])\n course = int(request.form['ucourse'])\n \n price = str(request.form['uprice'])\n uid = str(request.form['uid'])\n\n\n print(\"%s %s %s %s\"%(title, course, price, uid))\n updateModule(course, title, price, uid)\n\n return redirect(f\"/modules\")\n\n\n\n\n@app.route('/signup', methods=['GET', 'POST'])\ndef signup():\n if request.method == 'POST':\n\n nick_name = str(request.form['nick_name'])\n mail = str(request.form['mail'])\n password = str(request.form['pass'])\n\n done = createUser(nick_name, mail, password)\n \n print(done)\n\n return redirect(f\"/\")\n\n\n return render_template(\"signup.html\")\n\n\n@app.route(\"/category_course\", methods=['GET'])\ndef category_course():\n\n Catalog = getCategory_course()\n\n return jsonify( json.dumps([ obj.__dict__ for obj in Catalog] )), 200\n\n@app.route(\"/getCourseBy\", methods=['GET'])\ndef category_courseBy():\n\n categoryId = str(request.args.get(\"categoryId\"))\n\n print(categoryId)\n\n # Colection = []\n Colection = getCourseBy(categoryId)\n\n return jsonify( json.dumps([ obj.__dict__ for obj in Colection] )), 200\n\n\n@app.route(\"/getCourseE\", methods=['GET'])\ndef category_courseE():\n\n id = str(request.args.get(\"id\"))\n\n print(id)\n\n # Colection = []\n Colection = getCourseE(id)\n\n return jsonify( json.dumps([ obj.__dict__ for obj in Colection] )), 200\n\n\n\n\n@app.route(\"/getModuleBy\", methods=['GET'])\ndef get_ModulesBy():\n\n id = str(request.args.get(\"id\"))\n\n print(id)\n\n # Colection = []\n Colection = getModulesBy(id)\n\n return jsonify( json.dumps([ obj.__dict__ for obj in Colection] )), 200\n\n\n@app.route(\"/getMaterialBy\", methods=['GET'])\ndef get_MaterialsBy():\n\n id = str(request.args.get(\"id\"))\n\n print(id)\n\n # Colection = []\n Colection = getMaterialBy(id)\n\n return jsonify( json.dumps([ obj.__dict__ for obj in Colection] )), 200\n\n\n\n@app.route(\"/getModuleE\", methods=['GET'])\ndef category_moduleE():\n\n id = str(request.args.get(\"id\"))\n\n print(id)\n\n # Colection = []\n Colection = getModuleE(id)\n\n return jsonify( json.dumps([ obj.__dict__ for obj in Colection] )), 200\n\n\n\n@app.route('/users', methods=['GET', 'POST'])\ndef allUser():\n User = getAllUser()\n\n return render_template(\"User.html\", users=User)\n\n\n@app.route('/quiz', methods=['GET', 'POST'])\ndef quiz():\n # User = getAllUser()\n\n return render_template(\"Quiz.html\")\n\n\n\n@app.route(\"/newCourse\", methods=['GET', 'POST'])\ndef newCourse(): \n if request.method == 'POST':\n return redirect(f\"/\")\n\n return render_template(\"newCourse.html\") \n\n\n@app.route(\"/updateCourse\", methods=['GET', 'POST'])\ndef updateCourse():\n\n if request.method == 'POST':\n\n title = str(request.form['utitle'])\n category = int(request.form['ucategory'])\n description = str(request.form['udescription'])\n uid = str(request.form['uid'])\n\n # print(\"id:%s\\t t: %s c: %s d:%s\"%(id, title, category, description))\n\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n\n file = request.files['file']\n\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n print(\"Update Course\")\n\n print(filename)\n answer = updateCourseCatalog(category, title, description, uid, \"public/uploads/\"+filename)\n\n print(answer)\n return redirect(f\"/catalogoCursos\")\n\n\n return render_template(\"newCourse.html\") \n\n\nif __name__ == '__main__':\n # app.secret_key = 'super secret key'\n app.run(debug=True, host=\"0.0.0.0\", port=\"3000\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"168201477","text":"# BSD 2-Clause License\n#\n# Copyright (c) 2021-2023, Hewlett Packard Enterprise\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom smartsim import Experiment, status\n\n\"\"\"\nTest the launch of simple entity types with local launcher\n\"\"\"\n\n\ndef test_models(fileutils):\n exp_name = \"test-models-local-launch\"\n exp = Experiment(exp_name, launcher=\"local\")\n test_dir = fileutils.make_test_dir()\n\n script = fileutils.get_test_conf_path(\"sleep.py\")\n settings = exp.create_run_settings(\"python\", f\"{script} --time=5\")\n\n M1 = exp.create_model(\"m1\", path=test_dir, run_settings=settings)\n M2 = exp.create_model(\"m2\", path=test_dir, run_settings=settings)\n\n exp.start(M1, block=False)\n statuses = exp.get_status(M1)\n assert all([stat != status.STATUS_FAILED for stat in statuses])\n\n # start another while first model is running\n exp.start(M2, block=True)\n statuses = exp.get_status(M1, M2)\n assert all([stat == status.STATUS_COMPLETED for stat in statuses])\n","sub_path":"tests/test_local_multi_run.py","file_name":"test_local_multi_run.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"104897650","text":"from typing import (Optional, List)\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def removeLeafNodes(self, root: Optional[TreeNode], target: int) -> Optional[TreeNode]:\n # Runtime: 75 ms, faster than 57.45% of Python3 online submissions for Delete Leaves With a Given Value.\n # Memory Usage: 14.6 MB, less than 88.64% of Python3 online submissions for Delete Leaves With a Given Value.\n\n if self._remove_leaf(root, target):\n return None\n\n return root\n\n def _remove_leaf(self, root, target):\n\n if not root:\n return False\n\n #print(f'root = {root.val}')\n\n if self._is_leaf(root) == False:\n #print(f'{root.val} is not leaf')\n if self._remove_leaf(root.left, target):\n #print(f\"root.left = {root.left.val} is leaf, so remove\")\n root.left = None\n if self._remove_leaf(root.right, target):\n #print(f\"root.right = {root.right.val} is leaf, so remove\")\n\n root.right = None\n\n if self._is_leaf(root) and root.val == target:\n #print(f'找到leaf, root.val = {root.val}')\n return True\n\n def _is_leaf(self, root):\n if root.right or root.left:\n return False\n\n else:\n return True","sub_path":"tree/medium/DeleteLeavesWithaGivenValue/delete_leaves_with_a_given_value.py","file_name":"delete_leaves_with_a_given_value.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"567680233","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # AIT Development notebook\n# \n# \n# ## notebook of structure\n# \n# |#|area name|cell num|description|edit or not|\n# |---|---|---|---|---|\n# | 1|flags set|1|setting of launch jupyter or ait flag.|no edit|\n# | 2|ait-sdk install|1|Use only jupyter launch.
find ait-sdk and install.|no edit|\n# | 3|create requirements and pip install|3|Use only jupyter launch.
create requirements.txt.
And install by requirements.txt.|should edit(second cell, you set use modules.)|\n# | 4|import|2|you should write use import modules.
but bottom lines do not edit.|should edit(first cell, you import your moduel.)|\n# | 5|create manifest|1|Use only jupyter launch.
create ait.manifest.json.|should edit|\n# | 6|create input|1|Use only jupyter launch.
create ait.input.json.|should edit|\n# | 7|initialize|1|this cell is initialize for ait progress.|no edit|\n# | 8|functions|N|you defined measures, resources, downloads in ait.manifesit.json.
Define any functions to add these.|should edit|\n# | 9|main|1|Read the data set or model and calls the function defined in `functions-area`.|should edit|\n# |10|entrypoint|1|Call the main function.|no edit|\n# |11|license attribute set|1|Use only notebook launch.
Setting attribute for license.|should edit|\n# |12|prepare deploy|1|Use only notebook launch.
Convert to python programs and create dag.py.|no edit|\n# \n# ## notebook template revision history\n# \n# ### 1.0.1 2020/10/21\n# \n# * add revision history\n# * separate `create requirements and pip install` editable and noeditable\n# * separate `import` editable and noeditable\n# \n# ### 1.0.0 2020/10/12\n# \n# * new cerarion\n\n# In[1]:\n\n\n#########################################\n# area:flags set\n# do not edit\n#########################################\n\n# Determine whether to start AIT or jupyter by startup argument\nimport sys\nis_ait_launch = (len(sys.argv) == 2)\n\n\n# In[2]:\n\n\n#########################################\n# area:ait-sdk install\n# do not edit\n#########################################\nif not is_ait_launch:\n # get ait-sdk file name\n from pathlib import Path\n from glob import glob\n import re\n\n def numericalSort(value):\n numbers = re.compile(r'(\\d+)')\n parts = numbers.split(value)\n parts[1::2] = map(int, parts[1::2])\n return parts\n latest_sdk_file_path=sorted(glob('../lib/*.whl'), key=numericalSort)[-1]\n\n ait_sdk_name = Path(latest_sdk_file_path).name\n \n # copy to develop dir\n import shutil\n current_dir = get_ipython().run_line_magic('pwd', '')\n shutil.copyfile(f'../lib/{ait_sdk_name}', f'{current_dir}/{ait_sdk_name}')\n\n # install ait-sdk\n get_ipython().system('pip install --upgrade pip')\n get_ipython().system('pip install --force-reinstall ./$ait_sdk_name')\n\n\n# In[3]:\n\n\n#########################################\n# area:create requirements and pip install\n# do not edit\n#########################################\nif not is_ait_launch:\n from ait_sdk.common.files.ait_requirements_generator import AITRequirementsGenerator\n requirements_generator = AITRequirementsGenerator()\n\n\n# In[4]:\n\n\n#########################################\n# area:create requirements and pip install\n# should edit\n#########################################\nif not is_ait_launch:\n requirements_generator.add_package('pandas')\n requirements_generator.add_package('seaborn')\n\n\n# In[5]:\n\n\n#########################################\n# area:create requirements and pip install\n# do not edit\n#########################################\nif not is_ait_launch:\n requirements_generator.add_package(f'./{ait_sdk_name}')\n requirements_path = requirements_generator.create_requirements(current_dir)\n\n get_ipython().system('pip install -r $requirements_path ')\n\n\n# In[6]:\n\n\n#########################################\n# area:import\n# should edit\n#########################################\n\n# import if you need modules cell\nimport pandas as pd\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nfrom os import makedirs, path\n\n\n# In[7]:\n\n\n#########################################\n# area:import\n# do not edit\n#########################################\n\n# must use modules\nimport shutil # do not remove\nfrom ait_sdk.common.files.ait_input import AITInput # do not remove\nfrom ait_sdk.common.files.ait_output import AITOutput # do not remove\nfrom ait_sdk.common.files.ait_manifest import AITManifest # do not remove\nfrom ait_sdk.develop.ait_path_helper import AITPathHelper # do not remove\nfrom ait_sdk.utils.logging import get_logger, log, get_log_path # do not remove\nfrom ait_sdk.develop.annotation import measures, resources, downloads, ait_main # do not remove\n# must use modules\n\n\n# In[8]:\n\n\n#########################################\n# area:create manifest\n# should edit\n#########################################\nif not is_ait_launch:\n from ait_sdk.common.files.ait_manifest_generator import AITManifestGenerator\n \n manifest_genenerator = AITManifestGenerator(current_dir)\n manifest_genenerator.set_ait_name('dev_template_local_docker')\n manifest_genenerator.set_ait_description('AIT template (docker image regist to local)')\n manifest_genenerator.set_ait_author('AIST')\n manifest_genenerator.set_ait_email('')\n manifest_genenerator.set_ait_version('0.1')\n manifest_genenerator.set_ait_quality('https://airc.aist.go.jp/aiqm/quality/internal/Coverage_for_distinguished_problem_cases')\n manifest_genenerator.set_ait_reference('')\n manifest_genenerator.add_ait_inventories(name='iris_data', \n type_='dataset', \n description='アヤメの分類データです', \n format_=['csv'], \n schema='https://archive.ics.uci.edu/ml/datasets/iris')\n manifest_genenerator.add_ait_parameters(name='mean_column_name', \n type_='str', \n description='sepal.width\\nsepal.length\\npetal.width\\npetal.length', \n default_val='sepal.width')\n manifest_genenerator.add_ait_measures(name='mean', \n type_='float', \n description='mean of select column', \n structure='single',\n min='0')\n manifest_genenerator.add_ait_resources(name='pairplot', \n type_='picture', \n description='pairplot')\n manifest_genenerator.add_ait_downloads(name='Log', \n description='AIT実行ログ')\n manifest_path = manifest_genenerator.write()\n\n\n# In[9]:\n\n\n#########################################\n# area:create input\n# should edit\n#########################################\nif not is_ait_launch:\n from ait_sdk.common.files.ait_input_generator import AITInputGenerator\n input_generator = AITInputGenerator(manifest_path)\n input_generator.add_ait_inventories(name='iris_data',\n value='iris_data/tableconvert_csv_4nryby.csv')\n input_generator.set_ait_params(name='mean_column_name',\n value='petal.width')\n input_generator.write()\n\n\n# In[10]:\n\n\n#########################################\n# area:initialize\n# do not edit\n#########################################\n\nlogger = get_logger()\n\nait_manifest = AITManifest()\nait_input = AITInput(ait_manifest)\nait_output = AITOutput(ait_manifest)\n\nif is_ait_launch:\n # launch from AIT\n current_dir = path.dirname(path.abspath(__file__))\n path_helper = AITPathHelper(argv=sys.argv, ait_input=ait_input, ait_manifest=ait_manifest, entry_point_dir=current_dir)\nelse:\n # launch from jupyter notebook\n # ait.input.json make in input_dir\n input_dir = '/usr/local/qai/mnt/ip/job_args/1/1'\n current_dir = get_ipython().run_line_magic('pwd', '')\n path_helper = AITPathHelper(argv=['', input_dir], ait_input=ait_input, ait_manifest=ait_manifest, entry_point_dir=current_dir)\n\nait_input.read_json(path_helper.get_input_file_path())\nait_manifest.read_json(path_helper.get_manifest_file_path())\n\n### do not edit cell\n\n\n# In[11]:\n\n\n#########################################\n# area:functions\n# should edit\n#########################################\n\n@log(logger)\n@measures(ait_output, 'mean')\ndef calc_mean(iris_data, col_name):\n return iris_data.mean()[col_name]\n\n\n# In[12]:\n\n\n#########################################\n# area:functions\n# should edit\n#########################################\n\n@log(logger)\n@resources(ait_output, path_helper, 'pairplot', 'pairplot.png')\ndef save_pair_plot(iris_data, file_path: str=None) -> None:\n sn.pairplot(iris_data, hue='variety')\n plt.savefig(file_path)\n\n\n# In[13]:\n\n\n#########################################\n# area:functions\n# should edit\n#########################################\n\n@log(logger)\n@downloads(ait_output, path_helper, 'Log', 'ait.log')\ndef move_log(file_path: str=None) -> None:\n shutil.move(get_log_path(), file_path)\n\n\n# In[14]:\n\n\n#########################################\n# area:main\n# should edit\n#########################################\n\n@log(logger)\n@ait_main(ait_output, path_helper)\ndef main() -> None:\n\n # インベントリを読み込み\n iris_data = pd.read_csv(ait_input.get_inventory_path('iris_data'))\n \n calc_mean(iris_data, ait_input.get_method_param_value('mean_column_name'))\n save_pair_plot(iris_data)\n move_log()\n\n\n# In[15]:\n\n\n#########################################\n# area:entory point\n# do not edit\n#########################################\nif __name__ == '__main__':\n main()\n\n\n# In[16]:\n\n\n#########################################\n# area:license attribute set\n# should edit\n#########################################\nait_owner='AIST'\nait_creation_year='2020'\n\n\n# In[17]:\n\n\n#########################################\n# area:prepare deproy\n# do not edit\n#########################################\n\nif not is_ait_launch:\n from ait_sdk.deploy import prepare_deploy\n from ait_sdk.license.license_generator import LicenseGenerator\n \n current_dir = get_ipython().run_line_magic('pwd', '')\n prepare_deploy(ait_manifest, ait_sdk_name, current_dir, requirements_path, is_remote_deploy=False)\n \n # output License.txt\n license_generator = LicenseGenerator()\n license_generator.write('../top_dir/LICENSE.txt', ait_creation_year, ait_owner)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"ait_repository/ait/dev_template_local_docker_0.1/deploy/container/repository/my_ait.py","file_name":"my_ait.py","file_ext":"py","file_size_in_byte":10637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"601981138","text":"import numpy as np\r\nimport random\r\nimport math\r\nimport zmq\r\nimport time\r\nfrom scipy.spatial import distance\r\nfrom tdw.output_data import IsOnNavMesh, Images, Bounds\r\nfrom PIL import Image\r\nimport io\r\nimport os\r\nfrom threading import Thread\r\nfrom tdw.controller import Controller\r\nfrom typing import List, Tuple, Dict, Optional, Union\r\nimport socket\r\nfrom contextlib import closing\r\nfrom tdw.librarian import ModelRecord\r\nfrom pathlib import Path\r\nimport boto3\r\nfrom botocore.exceptions import ProfileNotFound, ClientError\r\nfrom subprocess import check_output, Popen, call\r\nimport re\r\nfrom psutil import pid_exists\r\nimport base64\r\n\r\n\r\nclass TDWUtils:\r\n \"\"\"\r\n Utility functions for controllers.\r\n\r\n Usage:\r\n\r\n ```python\r\n from tdw.tdw_utils import TDWUtils\r\n ```\r\n \"\"\"\r\n\r\n VECTOR3_ZERO = {\"x\": 0, \"y\": 0, \"z\": 0}\r\n\r\n # Cached values used during point cloud generation.\r\n __WIDTH: int = -1\r\n __HEIGHT: int = -1\r\n __CAM_TO_IMG_MAT: Optional[np.array] = None\r\n\r\n @staticmethod\r\n def vector3_to_array(vector3: Dict[str, float]) -> np.array:\r\n \"\"\"\r\n Convert a Vector3 object to a numpy array.\r\n\r\n :param vector3: The Vector3 object, e.g. `{\"x\": 0, \"y\": 0, \"z\": 0}`\r\n\r\n :return A numpy array.\r\n \"\"\"\r\n\r\n return np.array([vector3[\"x\"], vector3[\"y\"], vector3[\"z\"]])\r\n\r\n @staticmethod\r\n def array_to_vector3(arr: np.array) -> Dict[str, float]:\r\n \"\"\"\r\n Convert a numpy array to a Vector3.\r\n\r\n :param arr: The numpy array.\r\n\r\n :return A Vector3, e.g. `{\"x\": 0, \"y\": 0, \"z\": 0}`\r\n \"\"\"\r\n\r\n return {\"x\": float(arr[0]), \"y\": float(arr[1]), \"z\": float(arr[2])}\r\n\r\n @staticmethod\r\n def vector4_to_array(vector4: Dict[str, float]) -> np.array:\r\n \"\"\"\r\n Convert a Vector4 to a numpy array.\r\n\r\n :param vector4: The Vector4 object, e.g. `{\"x\": 0, \"y\": 0, \"z\": 0, \"w\": 0}`\r\n\r\n :return A numpy array.\r\n \"\"\"\r\n\r\n return np.array([vector4[\"x\"], vector4[\"y\"], vector4[\"z\"], vector4[\"w\"]])\r\n\r\n @staticmethod\r\n def array_to_vector4(arr: np.array) -> Dict[str, float]:\r\n \"\"\"\r\n Convert a numpy array to a Vector4.\r\n\r\n :param arr: The numpy array.\r\n\r\n :return A Vector4, e.g. `{\"x\": 0, \"y\": 0, \"z\": 0, \"w\": 0}`\r\n \"\"\"\r\n\r\n return {\"x\": arr[0], \"y\": arr[1], \"z\": arr[2], \"w\": arr[3]}\r\n\r\n @staticmethod\r\n def color_to_array(color: Dict[str, float]) -> np.array:\r\n \"\"\"\r\n Convert a RGB Color to a numpy array.\r\n\r\n :param color: The Color object, e.g. `{\"r\": 0, \"g\": 0, \"b\": 0, \"a\": 1}`\r\n\r\n :return A numpy array.\r\n \"\"\"\r\n\r\n return np.array([round(color[\"r\"] * 255), round(color[\"g\"] * 255), round(color[\"b\"] * 255)])\r\n\r\n @staticmethod\r\n def array_to_color(arr: np.array) -> Dict[str, float]:\r\n \"\"\"\r\n Convert a numpy array to a RGBA Color. If no A value is supplied it will default to 1.\r\n\r\n :param arr: The array.\r\n\r\n :return A Color, e.g. `{\"r\": 0, \"g\": 0, \"b\": 0, \"a\": 1}`\r\n \"\"\"\r\n\r\n return {\"r\": arr[0], \"g\": arr[1], \"b\": arr[2], \"a\": 1 if len(arr) == 3 else arr[3]}\r\n\r\n @staticmethod\r\n def get_random_point_in_circle(center: np.array, radius: float) -> np.array:\r\n \"\"\"\r\n Get a random point in a circle, defined by a center and radius.\r\n\r\n :param center: The center of the circle.\r\n :param radius: The radius of the circle.\r\n\r\n :return A numpy array. The y value (`arr[1]`) is always 0.\r\n \"\"\"\r\n\r\n alpha = 2 * math.pi * random.random()\r\n r = radius * math.sqrt(random.random())\r\n x = r * math.cos(alpha) + center[0]\r\n z = r * math.sin(alpha) + center[2]\r\n\r\n return np.array([x, 0, z])\r\n\r\n @staticmethod\r\n def get_magnitude(vector3: Dict[str, float]) -> float:\r\n \"\"\"\r\n Get the magnitude of a Vector3.\r\n\r\n :param vector3: The Vector3 object, e.g. `{\"x\": 0, \"y\": 0, \"z\": 0}`\r\n\r\n :return The vector magnitude.\r\n \"\"\"\r\n\r\n return np.linalg.norm(TDWUtils.vector3_to_array(vector3))\r\n\r\n @staticmethod\r\n def extend_line(p0: np.array, p1: np.array, d: float, clamp_y=True) -> np.array:\r\n \"\"\"\r\n Extend the line defined by p0 to p1 by distance d. Clamps the y value to 0.\r\n\r\n :param p0: The origin.\r\n :param p1: The second point.\r\n :param d: The distance of which the line is to be extended.\r\n :param clamp_y: Clamp the y value to 0.\r\n\r\n :return: The position at distance d.\r\n \"\"\"\r\n\r\n if clamp_y:\r\n p0[1] = 0\r\n p1[1] = 0\r\n\r\n # Get the distance between the two points.\r\n d0 = distance.euclidean(p0, p1)\r\n # Get the total distance.\r\n d_total = d0 + d\r\n\r\n return p1 + ((p1 - p0) * d_total)\r\n\r\n @staticmethod\r\n def get_distance(vector3_0: Dict[str, float], vector3_1: Dict[str, float]) -> float:\r\n \"\"\"\r\n Calculate the distance between two Vector3 (e.g. `{\"x\": 0, \"y\": 0, \"z\": 0}`) objects.\r\n\r\n :param vector3_0: The first Vector3.\r\n :param vector3_1: The second Vector3.\r\n\r\n :return The distance.\r\n \"\"\"\r\n\r\n return distance.euclidean(TDWUtils.vector3_to_array(vector3_0), TDWUtils.vector3_to_array(vector3_1))\r\n\r\n @staticmethod\r\n def get_box(width: int, length: int) -> List[Dict[str, int]]:\r\n \"\"\"\r\n Returns a list of x,y positions that can be used to create a box with the `create_exterior_walls` command.\r\n :param width: The width of the box.\r\n :param length: The length of the box.\r\n\r\n :return The box as represented by a list of `{\"x\": x, \"y\": y}` dictionaries.\r\n \"\"\"\r\n\r\n box = []\r\n for x in range(width):\r\n for y in range(length):\r\n if x == 0 or x == width - 1 or y == 0 or y == length - 1:\r\n box.append({\"x\": x, \"y\": y})\r\n return box\r\n\r\n @staticmethod\r\n def get_vector3(x, y, z) -> Dict[str, float]:\r\n \"\"\"\r\n :param x: The x value.\r\n :param y: The y value.\r\n :param z: The z value.\r\n\r\n :return: A Vector3: {\"x\": x, \"y\", y, \"z\": z}\r\n \"\"\"\r\n\r\n return {\"x\": x, \"y\": y, \"z\": z}\r\n\r\n @staticmethod\r\n def create_empty_room(width: int, length: int) -> dict:\r\n \"\"\"\r\n :param width: The width of the room.\r\n :param length: The length of the room.\r\n\r\n :return: A `create_exterior_walls` command that creates a box with dimensions (width, length).\r\n \"\"\"\r\n\r\n return {\"$type\": \"create_exterior_walls\", \"walls\": TDWUtils.get_box(width, length)}\r\n\r\n @staticmethod\r\n def create_room_from_image(filepath: str, exterior_color=(255, 0, 0), interior_color=(0, 0, 0)) -> List[dict]:\r\n \"\"\"\r\n Load a .png file from the disk and use it to create a room. Each pixel on the image is a grid point.\r\n\r\n :param filepath: The absolute filepath to the image.\r\n :param exterior_color: The color on the image marking exterior walls (default=red).\r\n :param interior_color: The color on the image marking interior walls (default=black).\r\n\r\n :return: A list of commands: The first creates the exterior walls, and the second creates the interior walls.\r\n \"\"\"\r\n\r\n exterior_walls = []\r\n interior_walls = []\r\n\r\n # Read the image.\r\n img = Image.open(filepath)\r\n pixels = img.load()\r\n col, row = img.size\r\n\r\n # Read each pixel as a grid point.\r\n for i in range(row):\r\n for j in range(col):\r\n pixel = pixels[i, j]\r\n if len(pixel) == 4:\r\n pixel = (pixel[0], pixel[1], pixel[2])\r\n if pixel == exterior_color:\r\n exterior_walls.append({\"x\": i, \"y\": col - j})\r\n elif pixel == interior_color:\r\n interior_walls.append({\"x\": i, \"y\": col - j})\r\n\r\n return [{\"$type\": \"create_exterior_walls\",\r\n \"walls\": exterior_walls},\r\n {\"$type\": \"create_interior_walls\",\r\n \"walls\": interior_walls}]\r\n\r\n @staticmethod\r\n def save_images(images: Images, filename: str, output_directory=\"dist\", resize_to=None, append_pass: bool = True) -> None:\r\n \"\"\"\r\n Save each image in the Images object.\r\n The name of the image will be: pass_filename.extension, e.g.: `\"0000\"` -> `depth_0000.png`\r\n The images object includes the pass and extension information.\r\n\r\n :param images: The Images object. Contains each capture pass plus metadata.\r\n :param output_directory: The directory to write images to.\r\n :param filename: The filename of each image, minus the extension. The image pass will be appended as a prefix.\r\n :param resize_to: Specify a (width, height) tuple to resize the images to. This is slower than saving as-is.\r\n :param append_pass: If false, the image pass will _not_ be appended to the filename as a prefix, e.g.: `\"0000\"`: -> \"`0000.jpg\"`\r\n \"\"\"\r\n\r\n if not os.path.isdir(output_directory):\r\n os.makedirs(output_directory)\r\n\r\n for i in range(images.get_num_passes()):\r\n if append_pass:\r\n fi = images.get_pass_mask(i)[1:] + \"_\" + filename + \".\" + images.get_extension(i)\r\n else:\r\n fi = filename + \".\" + images.get_extension(i)\r\n\r\n if resize_to:\r\n TDWUtils.get_pil_image(images, i).resize((resize_to[0], resize_to[1]), Image.LANCZOS)\\\r\n .save(os.path.join(output_directory, fi))\r\n else:\r\n pass_mask = images.get_pass_mask(i)\r\n path = os.path.join(output_directory, fi)\r\n # The depth passes aren't png files, so we need to convert them.\r\n if pass_mask == \"_depth\" or pass_mask == \"_depth_simple\":\r\n # Save the image.\r\n Image.fromarray(TDWUtils.get_shaped_depth_pass(images=images, index=i)).save(path)\r\n # Every other pass can be saved directly to disk.\r\n else:\r\n with open(path, \"wb\") as f:\r\n f.write(images.get_image(i))\r\n\r\n @staticmethod\r\n def get_shaped_depth_pass(images: Images, index: int) -> np.array:\r\n \"\"\"\r\n The `_depth` and `_depth_simple` passes are a 1D array of RGB values, as oppposed to a png or jpg like every other pass.\r\n This function reshapes the array into a 2D array of RGB values.\r\n\r\n :param images: The `Images` output data.\r\n :param index: The index in `Images` of the depth pass. See: `Images.get_pass_mask()`.\r\n\r\n :return: A reshaped depth pass. Shape is: `(height, width, 3)`.\r\n \"\"\"\r\n\r\n return np.flip(np.reshape(images.get_image(index), (images.get_height(), images.get_width(), 3)), 0)\r\n\r\n @staticmethod\r\n def zero_padding(integer: int, width=4) -> str:\r\n \"\"\"\r\n :param integer: The integer being converted.\r\n :param width: The total number of digits in the string. If integer == 3 and width == 4, output is: \"0003\".\r\n\r\n :return A string representation of an integer padded with zeroes, e.g. converts `3` to `\"0003\"`.\r\n \"\"\"\r\n\r\n return str(integer).zfill(width)\r\n\r\n @staticmethod\r\n def get_pil_image(images: Images, index: int) -> Image:\r\n \"\"\"\r\n Converts Images output data to a PIL Image object.\r\n Use this function to read and analyze an image in memory.\r\n Do NOT use this function to save image data to disk; `save_image` is much faster.\r\n\r\n :param images: Images data from the build.\r\n :param index: The index of the image in Images.get_image\r\n\r\n :return A PIL image.\r\n \"\"\"\r\n\r\n return Image.open(io.BytesIO(images.get_image(index)))\r\n\r\n @staticmethod\r\n def get_random_position_on_nav_mesh(c: Controller, width: float, length: float, x_e=0, z_e=0, bake=True, rng=random.uniform) -> Tuple[float, float, float]:\r\n \"\"\"\r\n Returns a random position on a NavMesh.\r\n\r\n :param c: The controller.\r\n :param width: The width of the environment.\r\n :param length: The length of the environment.\r\n :param bake: If true, send bake_nav_mesh.\r\n :param rng: Random number generator.\r\n :param x_e: The x position of the environment.\r\n :param z_e: The z position of the environment.\r\n\r\n :return The coordinates as a tuple `(x, y, z)`\r\n \"\"\"\r\n\r\n if bake:\r\n c.communicate({'$type': 'bake_nav_mesh'})\r\n\r\n # Try to find a valid position on the NavMesh.\r\n is_on = False\r\n x, y, z = (0, 0, 0)\r\n while not is_on:\r\n # Get a random position.\r\n x = rng(-width / 2, width / 2) + x_e\r\n z = rng(-length / 2, length / 2) + z_e\r\n resp = c.communicate(\r\n {'$type': 'send_is_on_nav_mesh',\r\n 'position': {'x': x, 'y': 0, 'z': z},\r\n 'max_distance': 4.0\r\n })\r\n answer = IsOnNavMesh(resp[0])\r\n is_on = answer.get_is_on()\r\n x, y, z = answer.get_position()\r\n return x, y, z\r\n\r\n @staticmethod\r\n def set_visual_material(c: Controller, substructure: List[dict], object_id: int, material: str, quality=\"med\") -> List[dict]:\r\n \"\"\"\r\n :param c: The controller.\r\n :param substructure: The metadata substructure of the object.\r\n :param object_id: The ID of the object in the scene.\r\n :param material: The name of the new material.\r\n :param quality: The quality of the material.\r\n\r\n :return A list of commands to set ALL visual materials on an object to a single material.\r\n \"\"\"\r\n\r\n commands = []\r\n for sub_object in substructure:\r\n for i in range(len(sub_object[\"materials\"])):\r\n commands.extend([c.get_add_material(material, library=\"materials_\" + quality + \".json\"),\r\n {\"$type\": \"set_visual_material\",\r\n \"id\": object_id,\r\n \"material_name\": material,\r\n \"object_name\": sub_object[\"name\"],\r\n \"material_index\": i}])\r\n return commands\r\n\r\n @staticmethod\r\n def get_depth_values(image: np.array, depth_pass: str = \"_depth\", width: int = 256, height: int = 256, near_plane: float = 0.1, far_plane: float = 100) -> np.array:\r\n \"\"\"\r\n Get the depth values of each pixel in a _depth image pass.\r\n The far plane is hardcoded as 100. The near plane is hardcoded as 0.1.\r\n (This is due to how the depth shader is implemented.)\r\n\r\n :param image: The image pass as a numpy array.\r\n :param depth_pass: The type of depth pass. This determines how the values are decoded. Options: `\"_depth\"`, `\"_depth_simple\"`.\r\n :param width: The width of the screen in pixels. See output data `Images.get_width()`.\r\n :param height: The height of the screen in pixels. See output data `Images.get_height()`.\r\n :param near_plane: The near clipping plane. See command `set_camera_clipping_planes`. The default value in this function is the default value of the near clipping plane.\r\n :param far_plane: The far clipping plane. See command `set_camera_clipping_planes`. The default value in this function is the default value of the far clipping plane.\r\n\r\n :return An array of depth values.\r\n \"\"\"\r\n\r\n # Convert the image to a 2D image array.\r\n image = np.flip(np.reshape(image, (height, width, 3)), 0)\r\n if depth_pass == \"_depth\":\r\n depth_values = np.array((image[:, :, 0] + image[:, :, 1] / 256.0 + image[:, :, 2] / (256.0 ** 2)))\r\n elif depth_pass == \"_depth_simple\":\r\n depth_values = image[:, :, 0] / 256.0\r\n else:\r\n raise Exception(f\"Invalid depth pass: {depth_pass}\")\r\n # Un-normalize the depth values.\r\n return (depth_values * ((far_plane - near_plane) / 256.0)).astype(np.float32)\r\n\r\n @staticmethod\r\n def get_point_cloud(depth, camera_matrix: Union[np.array, tuple], vfov: float = 54.43222, filename: str = None, near_plane: float = 0.1, far_plane: float = 100) -> np.array:\r\n \"\"\"\r\n Create a point cloud from an numpy array of depth values.\r\n\r\n :param depth: Depth values converted from a depth pass. See: `TDWUtils.get_depth_values()`\r\n :param camera_matrix: The camera matrix as a tuple or numpy array. See: [`send_camera_matrices`](https://github.com/threedworld-mit/tdw/blob/master/Documentation/api/command_api.md#send_camera_matrices).\r\n :param vfov: The field of view. See: [`set_field_of_view`](https://github.com/threedworld-mit/tdw/blob/master/Documentation/api/command_api.md#set_field_of_view)\r\n :param filename: If not None, the point cloud data will be written to this file.\r\n :param near_plane: The near clipping plane. See command `set_camera_clipping_planes`. The default value in this function is the default value of the near clipping plane.\r\n :param far_plane: The far clipping plane. See command `set_camera_clipping_planes`. The default value in this function is the default value of the far clipping plane.\r\n\r\n :return: An point cloud as a numpy array of `[x, y, z]` coordinates.\r\n \"\"\"\r\n\r\n if isinstance(camera_matrix, tuple):\r\n camera_matrix = np.array(camera_matrix)\r\n camera_matrix = np.linalg.inv(camera_matrix.reshape((4, 4)))\r\n\r\n # Different from real-world camera coordinate system.\r\n # OpenGL uses negative z axis as the camera front direction.\r\n # x axes are same, hence y axis is reversed as well.\r\n # Source: https://learnopengl.com/Getting-started/Camera\r\n rot = np.array([[1, 0, 0, 0],\r\n [0, -1, 0, 0],\r\n [0, 0, -1, 0],\r\n [0, 0, 0, 1]])\r\n camera_matrix = np.dot(camera_matrix, rot)\r\n\r\n # Cache some calculations we'll need to use every time.\r\n if TDWUtils.__HEIGHT != depth.shape[0] or TDWUtils.__WIDTH != depth.shape[1]:\r\n TDWUtils.__HEIGHT = depth.shape[0]\r\n TDWUtils.__WIDTH = depth.shape[1]\r\n\r\n img_pixs = np.mgrid[0: depth.shape[0], 0: depth.shape[1]].reshape(2, -1)\r\n # Swap (v, u) into (u, v).\r\n img_pixs[[0, 1], :] = img_pixs[[1, 0], :]\r\n img_pix_ones = np.concatenate((img_pixs, np.ones((1, img_pixs.shape[1]))))\r\n\r\n # Calculate the intrinsic matrix from vertical_fov.\r\n # Motice that hfov and vfov are different if height != width\r\n # We can also get the intrinsic matrix from opengl's perspective matrix.\r\n # http://kgeorge.github.io/2014/03/08/calculating-opengl-perspective-matrix-from-opencv-intrinsic-matrix\r\n vfov = vfov / 180.0 * np.pi\r\n tan_half_vfov = np.tan(vfov / 2.0)\r\n tan_half_hfov = tan_half_vfov * TDWUtils.__WIDTH / float(TDWUtils.__HEIGHT)\r\n fx = TDWUtils.__WIDTH / 2.0 / tan_half_hfov # focal length in pixel space\r\n fy = TDWUtils.__HEIGHT / 2.0 / tan_half_vfov\r\n intrinsics = np.array([[fx, 0, TDWUtils.__WIDTH / 2.0],\r\n [0, fy, TDWUtils.__HEIGHT / 2.0],\r\n [0, 0, 1]])\r\n img_inv = np.linalg.inv(intrinsics[:3, :3])\r\n TDWUtils.__CAM_TO_IMG_MAT = np.dot(img_inv, img_pix_ones)\r\n\r\n points_in_cam = np.multiply(TDWUtils.__CAM_TO_IMG_MAT, depth.reshape(-1))\r\n points_in_cam = np.concatenate((points_in_cam, np.ones((1, points_in_cam.shape[1]))), axis=0)\r\n points_in_world = np.dot(camera_matrix, points_in_cam)\r\n points_in_world = points_in_world[:3, :].reshape(3, TDWUtils.__WIDTH, TDWUtils.__HEIGHT)\r\n points_in_cam = points_in_cam[:3, :].reshape(3, TDWUtils.__WIDTH, TDWUtils.__HEIGHT)\r\n if filename is not None:\r\n f = open(filename, 'w')\r\n for i in range(points_in_world.shape[1]):\r\n for j in range(points_in_world.shape[2]):\r\n if points_in_cam[2, i, j] < (far_plane - near_plane):\r\n f.write(f'{points_in_world[0, i, j]};{points_in_world[1, i, j]};{points_in_world[2, i, j]}\\n')\r\n return points_in_world\r\n\r\n @staticmethod\r\n def create_avatar(avatar_type=\"A_Img_Caps_Kinematic\", avatar_id=\"a\", position=None, look_at=None) -> List[dict]:\r\n \"\"\"\r\n This is a wrapper for `create_avatar` and, optionally, `teleport_avatar_to` and `look_at_position`.\r\n\r\n :param avatar_type: The type of avatar.\r\n :param avatar_id: The avatar ID.\r\n :param position: The position of the avatar. If this is None, the avatar won't teleport.\r\n :param look_at: If this isn't None, the avatar will look at this position.\r\n\r\n :return A list of commands to create theavatar.\r\n \"\"\"\r\n\r\n # Create the avatar.\r\n commands = [{\"$type\": \"create_avatar\",\r\n \"type\": avatar_type,\r\n \"id\": avatar_id}]\r\n\r\n # Teleport the avatar.\r\n if position:\r\n commands.append({\"$type\": \"teleport_avatar_to\",\r\n \"avatar_id\": avatar_id,\r\n \"position\": position})\r\n if look_at:\r\n commands.append({\"$type\": \"look_at_position\",\r\n \"avatar_id\": avatar_id,\r\n \"position\": look_at})\r\n return commands\r\n\r\n @staticmethod\r\n def _send_start_build(socket, controller_address: str) -> dict:\r\n \"\"\"\r\n This sends a command to the launch_binaries daemon running on a remote node\r\n to start a binary connected to the given controller address.\r\n\r\n :param socket: The zmq socket.\r\n :param controller_address: The host name or ip address of node running the controller.\r\n\r\n :return Build info dictionary containing build port.\r\n \"\"\"\r\n request = {\"type\": \"start_build\",\r\n \"controller_address\": controller_address}\r\n socket.send_json(request)\r\n build_info = socket.recv_json()\r\n return build_info\r\n\r\n @staticmethod\r\n def _send_keep_alive(socket, build_info: dict) -> dict:\r\n \"\"\"\r\n This sends a command to the launch_binaries daemon running on a remote node\r\n to mark a given binary as still alive, preventing garbage collection.\r\n\r\n :param socket: The zmq socket.\r\n :param build_info: A diciontary containing the build_port.\r\n\r\n :return a heartbeat indicating build is still alive.\r\n \"\"\"\r\n\r\n build_port = build_info[\"build_port\"]\r\n request = {\"type\": \"keep_alive\", \"build_port\": build_port}\r\n socket.send_json(request)\r\n heartbeat = socket.recv_json()\r\n return heartbeat\r\n\r\n @staticmethod\r\n def _send_kill_build(socket, build_info: dict) -> dict:\r\n \"\"\"\r\n This sends a command to the launch_binaries daemon running on a remote node to terminate a given binary.\r\n\r\n :param socket: The zmq socket.\r\n :param build_info: A diciontary containing the build_port.\r\n\r\n :return A kill_status indicating build has been succesfully terminated.\r\n \"\"\"\r\n\r\n build_port = build_info[\"build_port\"]\r\n request = {\"type\": \"kill_build\", \"build_port\": build_port}\r\n socket.send_json(request)\r\n kill_status = socket.recv_json()\r\n return kill_status\r\n\r\n @staticmethod\r\n def _keep_alive_thread(socket, build_info: dict) -> None:\r\n \"\"\"\r\n This is a wrapper around the keep alive command to be executed in a separate thread.\r\n\r\n :param socket: The zmq socket.\r\n :param build_info: A diciontary containing the build_port.\r\n \"\"\"\r\n while True:\r\n TDWUtils._send_keep_alive(socket, build_info)\r\n time.sleep(60)\r\n\r\n @staticmethod\r\n def launch_build(listener_port: int, build_address: str, controller_address: str) -> dict:\r\n \"\"\"\r\n Connect to a remote binary_manager daemon and launch an instance of a TDW build.\r\n\r\n Returns the necessary information for a local controller to connect.\r\n Use this function to automatically launching binaries on remote (or local) nodes, and to\r\n automatically shut down the build after controller is finished. Call in the constructor\r\n of a controller and pass the build_port returned in build_info to the parent Controller class.\r\n\r\n :param listener_port: The port launch_binaries is listening on.\r\n :param build_address: Remote IP or hostname of node running launch_binaries.\r\n :param controller_address: IP or hostname of node running controller.\r\n\r\n :return The build_info dictionary containing build_port.\r\n \"\"\"\r\n\r\n context = zmq.Context()\r\n socket = context.socket(zmq.REQ)\r\n socket.connect(\"tcp://\" + build_address + \":%s\" % listener_port)\r\n build_info = TDWUtils._send_start_build(socket, controller_address)\r\n thread = Thread(target=TDWUtils._keep_alive_thread,\r\n args=(socket, build_info))\r\n thread.setDaemon(True)\r\n thread.start()\r\n return build_info\r\n\r\n @staticmethod\r\n def get_unity_args(arg_dict: dict) -> List[str]:\r\n \"\"\"\r\n :param arg_dict: A dictionary of arguments. Key=The argument prefix (e.g. port) Value=Argument value.\r\n\r\n :return The formatted command line string that is accepted by unity arg parser.\r\n \"\"\"\r\n\r\n formatted_args = []\r\n for key, value in arg_dict.items():\r\n prefix = \"-\" + key + \"=\"\r\n if type(value) == list:\r\n prefix += \",\".join([str(v) for v in value])\r\n else:\r\n prefix += str(value)\r\n formatted_args += [prefix]\r\n return formatted_args\r\n\r\n @staticmethod\r\n def find_free_port() -> int:\r\n \"\"\"\r\n :return a free port.\r\n \"\"\"\r\n\r\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\r\n s.bind((\"\", 0))\r\n return int(s.getsockname()[1])\r\n\r\n @staticmethod\r\n def get_unit_scale(record: ModelRecord) -> float:\r\n \"\"\"\r\n :param record: The model record.\r\n\r\n :return The scale factor required to scale a model to 1 meter \"unit scale\".\r\n \"\"\"\r\n\r\n bounds = record.bounds\r\n\r\n # Get the \"unit scale\" of the object.\r\n s = 1 / max(\r\n bounds['top']['y'] - bounds['bottom']['y'],\r\n bounds['front']['z'] - bounds['back']['z'],\r\n bounds['right']['x'] - bounds['left']['x'])\r\n return s\r\n\r\n @staticmethod\r\n def validate_amazon_s3() -> bool:\r\n \"\"\"\r\n Validate that your local Amazon S3 credentials are set up correctly.\r\n\r\n :return True if everything is OK.\r\n \"\"\"\r\n\r\n config_path = Path.home().joinpath(\".aws/config\")\r\n new_config_path = not config_path.exists()\r\n # Generate a valid config file.\r\n if new_config_path:\r\n config_path.write_text(\"[default]\\nregion = us-east-1\\noutput = json\")\r\n print(f\"Generated a new config file: {config_path.resolve()}\")\r\n try:\r\n session = boto3.Session(profile_name=\"tdw\")\r\n s3 = session.resource(\"s3\")\r\n s3.meta.client.head_object(Bucket='tdw-private', Key='models/windows/2018-2019.1/iron_box')\r\n return True\r\n except ProfileNotFound:\r\n print(f\"ERROR! Your AWS credentials file is not set up correctly.\")\r\n print(\"Your AWS credentials must have a [tdw] profile with valid keys.\")\r\n return False\r\n except ClientError as e:\r\n print(\"ERROR! Could not access bucket tdw-private. Make sure you have the right permissions.\")\r\n error_code = e.response['Error']['Code']\r\n print(e, error_code)\r\n return False\r\n\r\n @staticmethod\r\n def get_base64_flex_particle_forces(forces: list) -> str:\r\n \"\"\"\r\n :param forces: The forces (see Flex documentation for how to arrange this array).\r\n\r\n :return: An array of Flex particle forces encoded in base64.\r\n \"\"\"\r\n\r\n forces = np.array(forces, dtype=np.float32)\r\n return base64.b64encode(forces).decode()\r\n\r\n @staticmethod\r\n def color_to_hashable(color: Union[np.array, Tuple[int, int, int]]) -> int:\r\n \"\"\"\r\n :param color: The color as an RGB array or tuple, where each value is between 0 and 255.\r\n\r\n :return: A hashable integer representation of the color array.\r\n \"\"\"\r\n\r\n return (color[0] << 16) + (color[1] << 8) + color[2]\r\n\r\n @staticmethod\r\n def hashable_to_color(hashable: int) -> np.array:\r\n \"\"\"\r\n :param hashable: A hashable integer representing an RGB color.\r\n\r\n :return: A color as a numpy array of integers between 0 and 255: `[r, g, b]`\r\n \"\"\"\r\n\r\n return np.array([(hashable >> 16) & 255, (hashable >> 8) & 255, hashable & 255], dtype=int)\r\n\r\n @staticmethod\r\n def get_bounds_dict(bounds: Bounds, index: int) -> Dict[str, np.array]:\r\n \"\"\"\r\n :param bounds: Bounds output data.\r\n :param index: The index in `bounds` of the target object.\r\n\r\n :return: A dictionary of the bounds. Key = the name of the position. Value = the position as a numpy array.\r\n \"\"\"\r\n\r\n return {\"top\": np.array(bounds.get_top(index)),\r\n \"bottom\": np.array(bounds.get_bottom(index)),\r\n \"left\": np.array(bounds.get_left(index)),\r\n \"right\": np.array(bounds.get_right(index)),\r\n \"front\": np.array(bounds.get_front(index)),\r\n \"back\": np.array(bounds.get_back(index)),\r\n \"center\": np.array(bounds.get_center(index))}\r\n\r\n @staticmethod\r\n def get_bounds_extents(bounds: Bounds, index: int) -> np.array:\r\n \"\"\"\r\n :param bounds: Bounds output data.\r\n :param index: The index in `bounds` of the target object.\r\n\r\n :return: The width (left to right), length (front to back), and height (top to bottom) of the bounds as a numpy array.\r\n \"\"\"\r\n\r\n return np.array([np.linalg.norm(np.array(bounds.get_left(index)) - np.array(bounds.get_right(index))),\r\n np.linalg.norm(np.array(bounds.get_front(index)) - np.array(bounds.get_back(index))),\r\n np.linalg.norm(np.array(bounds.get_top(index)) - np.array(bounds.get_bottom(index)))])\r\n\r\n @staticmethod\r\n def get_closest_position_in_bounds(origin: np.array, bounds: Bounds, index: int) -> np.array:\r\n \"\"\"\r\n :param origin: The origin from which the distance is calculated.\r\n :param bounds: Bounds output data.\r\n :param index: The index in `bounds` of the target object.\r\n\r\n :return: The position on the object bounds that is closest to `origin`.\r\n \"\"\"\r\n\r\n object_bounds = TDWUtils.get_bounds_dict(bounds=bounds, index=index)\r\n\r\n # Get the closest point on the bounds.\r\n min_destination = \"\"\r\n min_distance = 10000\r\n for p in object_bounds:\r\n d = np.linalg.norm(origin - object_bounds[p])\r\n if d < min_distance:\r\n min_distance = d\r\n min_destination = p\r\n return object_bounds[min_destination]\r\n\r\n @staticmethod\r\n def get_angle(forward: np.array, origin: np.array, position: np.array) -> float:\r\n \"\"\"\r\n :param position: The target position.\r\n :param origin: The origin position of the directional vector.\r\n :param forward: The forward directional vector.\r\n\r\n :return: The angle in degrees between `forward` and the direction vector from `origin` to `position`.\r\n \"\"\"\r\n\r\n # Get the normalized directional vector to the target position.\r\n p0 = np.array([origin[0], origin[2]])\r\n p1 = np.array([position[0], position[2]])\r\n d = p1 - p0\r\n d = d / np.linalg.norm(d)\r\n f = np.array([forward[0], forward[2]])\r\n\r\n dot = f[0] * d[0] + f[1] * d[1]\r\n det = f[0] * d[1] - f[1] * d[0]\r\n angle = np.arctan2(det, dot)\r\n angle = np.rad2deg(angle)\r\n return angle\r\n\r\n @staticmethod\r\n def get_angle_between(v1: np.array, v2: np.array) -> float:\r\n \"\"\"\r\n :param v1: The first directional vector.\r\n :param v2: The second directional vector.\r\n\r\n :return: The angle in degrees between two directional vectors.\r\n \"\"\"\r\n\r\n ang1 = np.arctan2(v1[2], v1[0])\r\n ang2 = np.arctan2(v2[2], v2[0])\r\n\r\n return np.rad2deg((ang1 - ang2) % (2 * np.pi))\r\n\r\n @staticmethod\r\n def rotate_position_around(position: np.array, angle: float, origin: np.array = None) -> np.array:\r\n \"\"\"\r\n Rotate a position by a given angle around a given origin.\r\n\r\n :param origin: The origin position. If None, the origin is `[0, 0, 0]`\r\n :param position: The point being rotated.\r\n :param angle: The angle in degrees.\r\n\r\n :return: The rotated position.\r\n \"\"\"\r\n\r\n if origin is None:\r\n origin = np.array([0, 0, 0])\r\n\r\n radians = np.deg2rad(angle)\r\n x, y = position[0], position[2]\r\n offset_x, offset_y = origin[0], origin[2]\r\n adjusted_x = (x - offset_x)\r\n adjusted_y = (y - offset_y)\r\n cos_rad = np.cos(radians)\r\n sin_rad = np.sin(radians)\r\n qx = offset_x + cos_rad * adjusted_x + sin_rad * adjusted_y\r\n qy = offset_y + -sin_rad * adjusted_x + cos_rad * adjusted_y\r\n\r\n return np.array([qx, position[1], qy])\r\n\r\n @staticmethod\r\n def euler_angles_to_rpy(euler_angles: np.array) -> np.array:\r\n \"\"\"\r\n Convert Euler angles to ROS RPY angles.\r\n\r\n :param euler_angles: A numpy array: `[x, y, z]` Euler angles in degrees.\r\n\r\n :return: A numpy array: `[r, p, y]` angles in radians.\r\n \"\"\"\r\n\r\n # Source: https://github.com/Unity-Technologies/URDF-Importer/blob/c41208565419b04907496baa93ad1b675d41dc20/com.unity.robotics.urdf-importer/Runtime/Extensions/TransformExtensions.cs#L85-L92\r\n return np.radians(np.array([-euler_angles[2], euler_angles[0], -euler_angles[1]]))\r\n\r\n\r\nclass AudioUtils:\r\n \"\"\"\r\n Utility class for recording audio in TDW using [fmedia](https://stsaz.github.io/fmedia/).\r\n\r\n Usage:\r\n\r\n ```python\r\n from tdw.tdw_utils import AudioUtils\r\n from tdw.controller import Controller\r\n\r\n c = Controller()\r\n\r\n initialize_trial() # Your code here.\r\n\r\n # Begin recording audio. Automatically stop recording at 10 seconds.\r\n AudioUtils.start(output_path=\"path/to/file.wav\", until=(0, 10))\r\n\r\n do_trial() # Your code here.\r\n\r\n # Stop recording.\r\n AudioUtils.stop()\r\n ```\r\n \"\"\"\r\n\r\n # The process ID of the audio recorder.\r\n RECORDER_PID: Optional[int] = None\r\n # The audio capture device.\r\n DEVICE: Optional[str] = None\r\n\r\n @staticmethod\r\n def get_system_audio_device() -> str:\r\n \"\"\"\r\n :return: The audio device that can be used to capture system audio.\r\n \"\"\"\r\n\r\n devices = check_output([\"fmedia\", \"--list-dev\"]).decode(\"utf-8\").split(\"Capture:\")[1]\r\n dev_search = re.search(\"device #(.*): Stereo Mix\", devices, flags=re.MULTILINE)\r\n assert dev_search is not None, \"No suitable audio capture device found:\\n\" + devices\r\n return dev_search.group(1)\r\n\r\n @staticmethod\r\n def start(output_path: Union[str, Path], until: Optional[Tuple[int, int]] = None) -> None:\r\n \"\"\"\r\n Start recording audio.\r\n\r\n :param output_path: The path to the output file.\r\n :param until: If not None, fmedia will record until `minutes:seconds`. The value must be a tuple of 2 integers. If None, fmedia will record until you send `AudioUtils.stop()`.\r\n \"\"\"\r\n\r\n if isinstance(output_path, str):\r\n p = Path(output_path).resolve()\r\n else:\r\n p = output_path\r\n\r\n # Create the directory.\r\n if not p.parent.exists():\r\n p.parent.mkdir(parents=True)\r\n\r\n # Set the capture device.\r\n if AudioUtils.DEVICE is None:\r\n AudioUtils.DEVICE = AudioUtils.get_system_audio_device()\r\n fmedia_call = [\"fmedia\",\r\n \"--record\",\r\n f\"--dev-capture={AudioUtils.DEVICE}\",\r\n f\"--out={str(p.resolve())}\",\r\n \"--globcmd=listen\"]\r\n # Automatically stop recording.\r\n if until is not None:\r\n fmedia_call.append(f\"--until={TDWUtils.zero_padding(until[0], 2)}:{TDWUtils.zero_padding(until[1], 2)}\")\r\n with open(os.devnull, \"w+\") as f:\r\n AudioUtils.RECORDER_PID = Popen(fmedia_call,\r\n stderr=f).pid\r\n\r\n @staticmethod\r\n def stop() -> None:\r\n \"\"\"\r\n Stop recording audio (if any fmedia process is running).\r\n \"\"\"\r\n\r\n if AudioUtils.RECORDER_PID is not None:\r\n with open(os.devnull, \"w+\") as f:\r\n call(['fmedia', '--globcmd=quit'], stderr=f, stdout=f)\r\n AudioUtils.RECORDER_PID = None\r\n\r\n @staticmethod\r\n def is_recording() -> bool:\r\n \"\"\"\r\n :return: True if the fmedia recording process still exists.\r\n \"\"\"\r\n\r\n return AudioUtils.RECORDER_PID is not None and pid_exists(AudioUtils.RECORDER_PID)\r\n\r\n\r\nclass QuaternionUtils:\r\n \"\"\"\r\n Helper functions for using quaternions.\r\n\r\n Quaternions are always numpy arrays in the following order: `[x, y, z, w]`.\r\n This is the order returned in all Output Data objects.\r\n\r\n Vectors are always numpy arrays in the following order: `[x, y, z]`.\r\n \"\"\"\r\n\r\n \"\"\":class_var\r\n The global up directional vector.\r\n \"\"\"\r\n UP = np.array([0, 1, 0])\r\n \"\"\":class_var\r\n The global forward directional vector.\r\n \"\"\"\r\n FORWARD: np.array = np.array([0, 0, 1])\r\n \"\"\":class_var\r\n The quaternion identity rotation.\r\n \"\"\"\r\n IDENTITY = np.array([0, 0, 0, 1])\r\n\r\n @staticmethod\r\n def get_inverse(q: np.array) -> np.array:\r\n \"\"\"\r\n Source: https://referencesource.microsoft.com/#System.Numerics/System/Numerics/Quaternion.cs\r\n\r\n :param q: The quaternion.\r\n\r\n :return: The inverse of the quaternion.\r\n \"\"\"\r\n\r\n x = q[0]\r\n y = q[1]\r\n z = q[2]\r\n w = q[3]\r\n\r\n ls = x * x + y * y + z * z + w * w\r\n inv = 1.0 / ls\r\n\r\n return np.array([-x * inv, -y * inv, -z * inv, w * inv])\r\n\r\n @staticmethod\r\n def multiply(q1: np.array, q2: np.array) -> np.array:\r\n \"\"\"\r\n Multiply two quaternions.\r\n Source: https://stackoverflow.com/questions/4870393/rotating-coordinate-system-via-a-quaternion\r\n\r\n :param q1: The first quaternion.\r\n :param q2: The second quaternion.\r\n :return: The multiplied quaternion: `q1 * q2`\r\n \"\"\"\r\n\r\n x1 = q1[0]\r\n y1 = q1[1]\r\n z1 = q1[2]\r\n w1 = q1[3]\r\n\r\n x2 = q2[0]\r\n y2 = q2[1]\r\n z2 = q2[2]\r\n w2 = q2[3]\r\n\r\n w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2\r\n x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2\r\n y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2\r\n z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2\r\n return np.array([x, y, z, w])\r\n\r\n @staticmethod\r\n def get_conjugate(q: np.array) -> np.array:\r\n \"\"\"\r\n Source: https://stackoverflow.com/questions/4870393/rotating-coordinate-system-via-a-quaternion\r\n\r\n :param q: The quaternion.\r\n\r\n :return: The conjugate of the quaternion: `[-x, -y, -z, w]`\r\n \"\"\"\r\n\r\n x = q[0]\r\n y = q[1]\r\n z = q[2]\r\n w = q[3]\r\n\r\n return np.array([-x, -y, -z, w])\r\n\r\n @staticmethod\r\n def multiply_by_vector(q: np.array, v: np.array) -> np.array:\r\n \"\"\"\r\n Multiply a quaternion by a vector.\r\n Source: https://stackoverflow.com/questions/4870393/rotating-coordinate-system-via-a-quaternion\r\n\r\n :param q: The quaternion.\r\n :param v: The vector.\r\n\r\n :return: A directional vector calculated from: `q * v`\r\n \"\"\"\r\n\r\n q2 = (v[0], v[1], v[2], 0.0)\r\n return QuaternionUtils.multiply(QuaternionUtils.multiply(q, q2), QuaternionUtils.get_conjugate(q))[:-1]\r\n\r\n @staticmethod\r\n def world_to_local_vector(position: np.array, origin: np.array, rotation: np.array) -> np.array:\r\n \"\"\"\r\n Convert a vector position in absolute world coordinates to relative local coordinates.\r\n Source: https://answers.unity.com/questions/601062/what-inversetransformpoint-does-need-explanation-p.html\r\n\r\n :param position: The position vector in world coordinates.\r\n :param origin: The origin vector of the local space in world coordinates.\r\n :param rotation: The rotation quaternion of the local coordinate space.\r\n\r\n :return: `position` in local coordinates.\r\n \"\"\"\r\n\r\n return QuaternionUtils.multiply_by_vector(q=QuaternionUtils.get_inverse(q=rotation), v=position - origin)\r\n\r\n @staticmethod\r\n def get_up_direction(q: np.array) -> np.array:\r\n \"\"\"\r\n :param q: The rotation as a quaternion.\r\n\r\n :return: A directional vector corresponding to the \"up\" direction from the quaternion.\r\n \"\"\"\r\n\r\n return QuaternionUtils.multiply_by_vector(q, QuaternionUtils.UP)\r\n\r\n @staticmethod\r\n def euler_angles_to_quaternion(euler: np.array) -> np.array:\r\n \"\"\"\r\n Convert Euler angles to a quaternion.\r\n\r\n :param euler: The Euler angles vector.\r\n\r\n :return: The quaternion representation of the Euler angles.\r\n \"\"\"\r\n\r\n roll = euler[0]\r\n pitch = euler[1]\r\n yaw = euler[2]\r\n cy = np.cos(yaw * 0.5)\r\n sy = np.sin(yaw * 0.5)\r\n cp = np.cos(pitch * 0.5)\r\n sp = np.sin(pitch * 0.5)\r\n cr = np.cos(roll * 0.5)\r\n sr = np.sin(roll * 0.5)\r\n\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return np.array([x, y, z, w])\r\n\r\n @staticmethod\r\n def quaternion_to_euler_angles(quaternion: np.array) -> np.array:\r\n \"\"\"\r\n Convert a quaternion to Euler angles.\r\n\r\n :param quaternion: A quaternion as a nump array.\r\n\r\n :return: The Euler angles representation of the quaternion.\r\n \"\"\"\r\n\r\n x = quaternion[0]\r\n y = quaternion[1]\r\n z = quaternion[2]\r\n w = quaternion[3]\r\n ysqr = y * y\r\n\r\n t0 = +2.0 * (w * x + y * z)\r\n t1 = +1.0 - 2.0 * (x * x + ysqr)\r\n ex = np.degrees(np.arctan2(t0, t1))\r\n\r\n t2 = +2.0 * (w * y - z * x)\r\n t2 = np.where(t2 > +1.0, +1.0, t2)\r\n\r\n t2 = np.where(t2 < -1.0, -1.0, t2)\r\n ey = np.degrees(np.arcsin(t2))\r\n\r\n t3 = +2.0 * (w * z + x * y)\r\n t4 = +1.0 - 2.0 * (ysqr + z * z)\r\n ez = np.degrees(np.arctan2(t3, t4))\r\n\r\n return np.array([ex, ey, ez])\r\n\r\n @staticmethod\r\n def get_y_angle(q1: np.array, q2: np.array) -> float:\r\n \"\"\"\r\n Source: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\r\n\r\n :param q1: The first quaternion.\r\n :param q2: The second quaternion.\r\n\r\n :return: The angle between the two quaternions in degrees around the y axis.\r\n \"\"\"\r\n\r\n qd = QuaternionUtils.multiply(QuaternionUtils.get_conjugate(q1), q2)\r\n return np.rad2deg(2 * np.arcsin(np.clip(qd[1], -1, 1)))\r\n","sub_path":"Python/tdw/tdw_utils.py","file_name":"tdw_utils.py","file_ext":"py","file_size_in_byte":43770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"496450723","text":"#!/usr/bin/env python\n\ncamera_angles = [\"[<>,<>,<>,1.0,<>]\"]\n\n\n\ndata = SEMCAD.GetData()\n\nmodel = data.GetModel()\nsimulation = data.GetActiveSimulation()\n\nSEMCAD.SetViewerMode()\norig_cameraeye = model.CameraEye\norig_cameratarget = model.CameraTarget\norig_cameraup = model.CameraUp\norig_scalingfactor = model.ScalingFactor\n\nSetModelingMode()\n\nSEMCAD.raw_input(\"Prompt >>> \")\n\nSEMCAD.SetSimulationMode()\n\nSEMCAD.IsRunning()\n\nSEMCAD.SetViewerMode()\n\nSEMCAD.UpdateView()\n\nSEMCAD.SaveScreenShotAs(\"filename\")\n\n#Simulations.List\n#Simulations.GetActiveSimulation\n\n#Simulation.ComputeVoxels()\n#Simulation.Run()\n#Simulation.ResetResults()\n","sub_path":"simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"376434599","text":"#!/bin/bash\nimport glob\nimport os\n\n\n\ndef correctComs(globber,correctLine):\n\t# Make sure you are in directory that you are calling script from \n\tmainDir = os.getcwd()\n\n\n\t# Move into each folder\n\tfor t in glob.glob(globber):\n\n\t\t# Get base name of file\n\t\ttreeSet=str(t)\n\t\ttreeSetIndex = treeSet.find(\".nex\")\n\t\tfName = treeSet[:treeSetIndex]\n\t\t\n\t\t# Create path \n\t\tdirPath = os.path.join(mainDir,fName)\n\t\tos.chdir(dirPath)\n\n\t\tfor f in glob.glob('*AffManu.out'):\n\t\t\twith open(f) as file:\n\t\t\t\tif any(line == correctLine for line in file):\n\t\t\t\t\tprint(\"correct\")\n\t\t\t\telse:\n\t\t\t\t\tprint(f)\n\t\tos.chdir(mainDir)\n\n\nprint('1v100trees')\ncorrectLine1='Community 1 includes nodes: 0,1,2,3,4,5,6,7,8,9,\\n'\nglobber1='*1v100trees*.nex'\ncorrectComs(globber1,correctLine1)\n\nprint('1v500trees')\ncorrectLine2='Community 1 includes nodes: 0,1,\\n'\nglobber2='*1v500trees*.nex'\ncorrectComs(globber2,correctLine2)\n\nprint('1v1000trees')\ncorrectLine3='Community 1 includes nodes: 0,\\n'\nglobber3='*1v1000trees*.nex'\ncorrectComs(globber3,correctLine3)\n","sub_path":"gnuParalellBatch/AccurateComs.py","file_name":"AccurateComs.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"105484783","text":"class Tower:\r\n def __init__(self, name):\r\n self.name = name\r\n self.disks = []\r\n\r\n def add_disk(self, disk):\r\n self.disks.append(disk)\r\n\r\n def pop_disk(self):\r\n return self.disks.pop()\r\n\r\n\r\ndef towers_of_hanoi(n, origin, dest, buffer):\r\n if n == 1:\r\n disk = origin.pop_disk()\r\n dest.add_disk(disk)\r\n #print(\"Move\", n, \"from\", origin, \"to\", dest)\r\n return\r\n\r\n towers_of_hanoi(n-1, origin, buffer, dest)\r\n #print(\"Move\", n, \"from\", origin, \"to\", dest)\r\n disk = origin.pop_disk()\r\n dest.add_disk(disk)\r\n towers_of_hanoi(n-1, buffer, dest, origin)\r\n\r\n\r\ndef main():\r\n n = 5\r\n #origin, buffer, dest = 'A', 'B', 'C'\r\n left = Tower(\"left\")\r\n centre = Tower(\"centre\")\r\n right = Tower(\"right\")\r\n\r\n for i in range(n, 0, -1):\r\n left.add_disk(i)\r\n\r\n towers_of_hanoi(n, left, right, centre)\r\n\r\n print(left.disks, centre.disks, right.disks)\r\n return\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Chapter 8/8.6.py","file_name":"8.6.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"180925572","text":"from sklearn import svm\nimport numpy;\nfrom sklearn import datasets;\nimport matplotlib.pyplot as plt;\nXi, yi = datasets.make_blobs(500, random_state=1111);\nprint(yi[0:10]);\nXi = Xi.astype(numpy.float32)\nthe_data = Xi;\nK = 3;\n#print(the_data);\n\n\n\nclf = svm.SVC()\nclf.fit(Xi, yi);\n\n'''\nSVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\n'''\n\nassignments = clf.predict(Xi)\nprint(assignments);\n\n'''\nvectors = clf.support_vectors_\nprint(vectors);\n'''\n\n\nplt.scatter(*Xi.T, c='k', lw=0);\nplt.scatter(*Xi.T, c=yi, lw=0, vmax=K + 0.5, label='data');\nplt.legend(scatterpoints=3);\nplt.show()\n ","sub_path":"2_classify/2_train_and_classify/svm/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"334190694","text":"import pymongo\r\nimport pyhocon\r\nimport re\r\n\r\ncon=pymongo.MongoClient('127.0.0.1',27017)\r\n\r\ndef get_parsers():\r\n coll=con['parseconfig']['event']\r\n coll1=con['parseconfig']['parsers']\r\n events=coll.find()[0]['event-builder']['events']\r\n #print(events)\r\n parsers=[]\r\n for name,e in events.items():\r\n #print(e)\r\n if e['output-type']=='kerberos-logon':\r\n #print(e)\r\n msgs=re.findall(\"\\'(\\w+(?:\\-\\w+)+)\\'\",e['input-message']['expression'])\r\n parsers += msgs\r\n parsers = set(parsers)\r\n for pf,p in coll1.find()[0].items():\r\n if isinstance(p,list):\r\n for parser in p:\r\n for f_parser in parsers:\r\n if f_parser==parser['Name']:\r\n for f in parser['Fields']:\r\n #print(f)\r\n if \"dest_ip\" in f:\r\n print(f_parser)\r\n\r\nget_parsers()\r\n","sub_path":"scripts/kerberos_logon.py","file_name":"kerberos_logon.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"135673552","text":"#!/usr/bin/env python3\n\nimport logging\nfrom functools import reduce\nfrom create_donor import *\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\ndatabase = SqliteDatabase('donation.db')\n\nclass Donor:\n\n def __init__(self, name, list_donations):\n self._name = name\n self._list_donations = list_donations\n self._donation_count = len(list_donations)\n self._amount = sum(list_donations)\n\n\n @property\n def name(self):\n return self._name\n\n @property\n def amount(self):\n return self._amount\n\n @amount.setter\n def amount(self, amount):\n self._amount = amount\n\n def add(self, donation_amount):\n self._amount +=donation_amount\n self._donation_count += 1\n self._list_donations.append(donation_amount)\n\n\n @property\n def donation_count(self):\n return self._donation_count\n\n @property\n def average(self):\n return self._amount / self._donation_count\n\n def get_letter_text(self, name, amount):\n msg = []\n msg.append('Dear {},'.format(name))\n msg.append('\\n\\n\\tThank you for your very kind donation of ${:.2f}.'.format(amount))\n msg.append('\\n\\n\\tIt will be put to very good use.')\n msg.append('\\n\\n\\t\\t\\t\\tSincerely,')\n msg.append('\\n\\t\\t\\t\\t-The Team\\n')\n return \"\".join(msg)\n\n def __lt__(self, other):\n return self._amount < other._amount\n\n def __gt__(self, other):\n return self._amount > other._amount\n\n def __eq__(self, other):\n return self._amount == other._amount\n\nclass Donations:\n\n def __init__(self):\n \"\"\"collection of donors\"\"\"\n self._donors = {}\n\n def insert_donor(self, donor):\n self._donors[donor.name] = donor\n\n def add_update(self, donor):\n \"\"\" add or update donor\"\"\"\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n # existing donor\n if donor.name in self._donors.keys():\n d =self._donors[donor.name]\n # update donation amount\n d.add(donor.amount)\n\n #find existing donor in database\n with database.transaction():\n existing_donor = Donor_Collection.get(Donor_Collection.person_name == donor._name)\n\n existing_donor.total_amount += donor._amount\n existing_donor.donation_count += 1\n existing_donor.average = existing_donor.total_amount/existing_donor.donation_count\n existing_donor.save()\n new_donation_amount = Donation_Amount.create(\n donation_amount = donor._amount,\n from_person = donor._name)\n new_donation_amount.save()\n logger.info('Database update successful')\n else:\n # new donor\n self._donors[donor.name] = donor\n #new donor\n with database.transaction():\n new_donor = Donor_Collection.create(\n person_name = donor._name,\n donation_count = donor._donation_count,\n total_amount = donor._amount)\n\n new_donor.save()\n\n for amount in donor._list_donations:\n new_donation_amount = Donation_Amount.create(\n donation_amount = amount,\n from_person = donor._name)\n new_donation_amount.save()\n\n logger.info('Database add successful')\n\n except Exception as e:\n logger.info('failed to add or update')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()\n\n def delete(self, donor_name):\n \"\"\" delete a donor from database\"\"\"\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n Donation_Amount.delete().where (Donation_Amount.from_person == donor_name).execute()\n\n aDonor = Donor_Collection.get(Donor_Collection.person_name == donor_name)\n logger.info(f'Trying to delete {aDonor.person_name}')\n aDonor.delete_instance()\n\n except Exception as e:\n logger.info(\"Delete failed because record doesn't exist: \")\n logger.error(\"error\", e)\n\n finally:\n database.close()\n\n\n @property\n def donors(self):\n return self._donors\n\n def generate_report(self):\n \"\"\"Get data from database and Generate report\"\"\"\n\n report = []\n report.append(\"--------------------------------------------------------------\")\n msg = \"{:20} | {:10} | {:5} | {:10}\".format('Donor Name', 'Total Given', 'Num Gifts', 'Average Gift')\n report.append(msg)\n report.append(\"--------------------------------------------------------------\")\n\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n try:\n query = Donor_Collection.select(Donor_Collection).order_by(Donor_Collection.total_amount.desc())\n\n for aDonor in query:\n average = aDonor.total_amount / aDonor.donation_count\n a_row = '{:20} $ {:>10.2f} {:>10d} $ {:>11.2f}'.format(aDonor.person_name,\n aDonor.total_amount,\n aDonor.donation_count,\n average)\n report.append(a_row)\n except Exception as e:\n logger.info(\"Record doesn't exist: \")\n logger.info(e)\n\n finally:\n database.close()\n\n return \"\\n\".join(report)","sub_path":"students/cindywaldron/hw8/mailroom/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"92079791","text":"__author__ = 'sandro.lourenco'\nimport logging\n\nfrom tornado_json.gen import coroutine\nfrom tornado.web import MissingArgumentError\n\nfrom rest.utils import DoesNotExist\nfrom rest.utils import json_formats\nfrom rest import RestDBAPIHandler\n\nslog = logging.getLogger('restdb')\nalog = logging.getLogger('access')\n\nTF = ['true', 'false', 'True', 'False']\nEMPTY = ['', None, False, 0]\n\n\nclass KeyHandler(RestDBAPIHandler):\n \"\"\"\n AuthData\n \"\"\"\n\n __urls__ = [r\"/key/(?P[a-zA-Z0-9_]+)/?$\"]\n\n @coroutine\n def get(self, base):\n \"\"\"\n :param base:\n :return:\n \"\"\"\n try:\n user_id = self.get_argument('user', default=None, strip=True)\n id_estrangeiro = self.get_argument('cpf', default=None, strip=True)\n product = self.get_argument('product', default=None, strip=True)\n partner = self.get_argument('partner', default=None, strip=True)\n key = self.get_argument('key', default=None, strip=True)\n parceiro_id = self.get_argument(\n 'parceiro_id', default=None, strip=True)\n tabela = 'VIEW_SSO_USER_DATA'\n fields = (\n 'chave', 'cliente_id', 'nome', 'email', 'data_suspensao', 'data_cancelamento', 'produto_id',\n 'parceiro_id',\n 'id_estrangeiro')\n sql = \"select %s from %s \"\n\n db = self.db.get(base)\n\n if key is not None:\n sql = sql % (','.join(fields), tabela) + ' where chave=%s'\n qpar = [key]\n elif partner is not None:\n sql = sql % (','.join(fields),\n tabela) + ' where parceiro_id=%s and cliente_id=%s and data_cancelamento is Null and data_suspensao is Null'\n qpar = [partner, user_id]\n elif id_estrangeiro is not None and parceiro_id is not None:\n sql = sql % (','.join(fields),\n tabela) + ' where parceiro_id=%s and id_estrangeiro=%s and data_cancelamento is Null and data_suspensao is Null'\n qpar = [parceiro_id, id_estrangeiro]\n elif id_estrangeiro is not None:\n sql = sql % (','.join(fields),\n tabela) + ' where produto_id=%s and id_estrangeiro=%s and data_cancelamento is Null and data_suspensao is Null'\n qpar = [product, id_estrangeiro]\n elif product is not None and user_id is not None:\n sql = sql % (','.join(fields),\n tabela) + ' where produto_id=%s and cliente_id=%s and data_cancelamento is Null and data_suspensao is Null'\n qpar = [product, user_id]\n else:\n raise MissingArgumentError('data')\n cur = yield db.fexecute(sql, qpar, table=tabela, cnames=fields, expires=60)\n\n if cur and cur.value in ['', False, 0, None]:\n raise DoesNotExist\n\n self.success(json_formats(dict(zip(fields, cur.value))), True)\n\n except (DoesNotExist, TypeError):\n self.fail('Client Id not found', code=404)\n except MissingArgumentError as e:\n self.fail(e.log_message, code=e.status_code)\n except Exception as e:\n slog.error(e)\n self.error('General Oauth Error', code=500)\n\n\nclass KeyClientHandler(RestDBAPIHandler):\n \"\"\"\n AuthData\n \"\"\"\n\n __urls__ = [r\"/key/client/(?P[a-zA-Z0-9_]+)/?$\"]\n\n def get(self, base):\n \"\"\"\n\n :param base:\n :return:\n \"\"\"\n try:\n\n raise DoesNotExist\n\n except DoesNotExist:\n self.fail('Method unavailable', code=404)\n except MissingArgumentError as e:\n self.fail(e.log_message, code=e.status_code)\n except Exception as e:\n slog.error(e)\n self.error('General Oauth Error', code=500)\n\n\nclass KeyCountHandler(RestDBAPIHandler):\n \"\"\"\n AuthData\n \"\"\"\n\n __urls__ = [r\"/key/count/(?P[a-zA-Z0-9_]+)/?$\"]\n\n def get(self, base):\n \"\"\"\n\n :param base:\n :return:\n \"\"\"\n try:\n\n raise DoesNotExist\n\n except DoesNotExist:\n self.fail('Method unavailable', code=404)\n except MissingArgumentError as e:\n self.fail(e.log_message, code=e.status_code)\n except Exception as e:\n slog.error(e)\n self.error('General Oauth Error', code=500)\n\n\nclass KeyFHandler(RestDBAPIHandler):\n \"\"\"\n AuthData\n \"\"\"\n\n __urls__ = [r\"/fkey/(?P[a-zA-Z0-9_]+)/?$\"]\n\n def get(self, base):\n \"\"\"\n\n :param base:\n :return:\n \"\"\"\n try:\n\n raise DoesNotExist\n\n except DoesNotExist:\n self.fail('Method unavailable', code=404)\n except MissingArgumentError as e:\n self.fail(e.log_message, code=e.status_code)\n except Exception as e:\n slog.error(e)\n self.error('General Oauth Error', code=500)\n","sub_path":"restdb/Code/rest/keys/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"302844953","text":"import numpy as np\n\nimg_gt_path='../../npz_DSD_dataset/Arr1_mnistImgsForDSD_GTimgs_mask_GTlabel_(47x64)x32x32x1_unitLength1_dataset.npz'\nimg_pre_path='../../npz_DSD_dataset/codes/codes_DSD_codesAndImgForMetricsCal.npz'\n\nimg_gt=np.load(img_gt_path)['imagesGT']\n\nimg_pre=np.load(img_pre_path)['imagesNorm0_1']\nspot=np.where(np.isinf(img_gt))[0]\n\n# DSD already is 0-255\nimg_gt=np.delete(img_gt,spot,axis=0)\nimg_pre=np.delete(img_pre,spot,axis=0)\n\nimg_gt=img_gt[0:3000]\nimg_pre=img_pre[0:3000]\n\nmetri=abs(img_gt-img_pre).mean()\nprint(\"visual metrics:{}\".format(metri))","sub_path":"DSD/dual_diaeMnist_unitLeng1/visual_metrics_cal.py","file_name":"visual_metrics_cal.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"62095242","text":"import cProfile\nimport io\nimport os\nimport pstats\nimport sys\n\nimport numpy as np\n\nimport rot_inv_scattering.disk as disk\n\n\ndef doprofile(func, filename, *l):\n pr = cProfile.Profile()\n pr.enable() # début du profiling\n func(*l) # appel de la fonction\n pr.disable() # fin du profiling\n s = io.StringIO()\n ps = pstats.Stats(pr, stream=s).sort_stats(\"cumulative\")\n ps.print_stats()\n rem = os.path.normpath(os.path.join(os.getcwd(), \"..\", \"..\", \"..\"))\n res = s.getvalue().replace(rem, \"\")\n res = res.replace(sys.base_prefix, \"\").replace(\"\\\\\", \"/\")\n # ps.dump_stats(filename)\n return res\n\n\nεc = -1.1\nμc = 1\nk = 8\n\nN = 256\nT = 2\nX, Y = np.meshgrid(np.linspace(-T, T, num=N), np.linspace(-T, T, num=N))\n\nif len(sys.argv) > 1:\n print(\"Scattered field\")\n r = doprofile(disk.scattered_field, \"profiling.dat\", εc, μc, k, T, X, Y, \"xy\")\nelse:\n print(\"Total field\")\n r = doprofile(disk.total_field, \"profiling.dat\", εc, μc, k, T, X, Y, \"xy\")\nprint(r)\n","sub_path":"profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"513569149","text":"\"\"\"\n321. Create Maximum Number\nGiven two arrays of length m and n with digits 0-9 representing two numbers. Create the maximum number of length k <= m + n from digits of the two. The relative order of the digits from the same array must be preserved. Return an array of the k digits.\n\nNote: You should try to optimize your time and space complexity.\n\"\"\"\n\n# simple math problem\n# Runtime: 420 ms, faster than 69.02% of Python3 online submissions for Create Maximum Number.\n# Memory Usage: 14 MB, less than 53.99% of Python3 online submissions for Create Maximum Number.\nclass Solution:\n def maxNumber(self, nums1: List[int], nums2: List[int], k: int) -> List[int]:\n def getK(nums, k):\n n = len(nums)\n to_pop = n - k\n ans = []\n for num in nums:\n while len(ans) > 0 and num > ans[-1] and to_pop > 0:\n to_pop -= 1\n ans.pop()\n ans.append(num)\n return ans[:k]\n\n def getMax(nums1, nums2):\n ans = []\n while nums1 and nums2:\n if nums1 > nums2:\n ans.append(nums1.pop(0))\n else:\n ans.append(nums2.pop(0))\n if nums1:\n ans += nums1\n else:\n ans += nums2\n return ans\n\n n1 = len(nums1)\n n2 = len(nums2)\n ans = []\n for k1 in range(k+1):\n k2 = k - k1\n if k1 > n1 or k2 > n2:\n continue\n ans = max(ans, getMax(getK(nums1, k1), getK(nums2, k2)))\n return ans","sub_path":"Widen/LC321_Create_Maximum_Number.py","file_name":"LC321_Create_Maximum_Number.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"444874160","text":"# -*- encoding: utf-8 -*-\nimport sys\nr_input = sys.stdin.readline\n\nresult = 0 # 기차에 사람이 가장 많을 때 사람 수\ncnt = 0 # 현재 기차에 타고 있는 사람 수\n\nfor i in range(4):\n A, B = map(int, r_input().split()) # A명이 내리고 B명이 탑승\n cnt -= A\n cnt += B\n\n result = max(cnt, result)\n\nprint(result)\n","sub_path":"Algorithm/Baekjoon/02455 지능형 기차/2455.py","file_name":"2455.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"484659721","text":"# Title: electrolib.io.cachefile\n# Author: Oscar Benjamin\n# Date: 25 Jan 2010\n# Description: This module defines the CacheFile class. The CacheFile class is\n# a subclass of EEGFile that can be used as a wrapper around any read-mode\n# EEGFile object. CacheFile will keep all blocks of EEG data in memory as\n# they are read, ensuring that it is not necessary to seek backwards on files\n# that are read from pipes.\n\nfrom electrolib.eegfile import EEGFile\n\nclass CacheFile(EEGFile):\n \"\"\"The CacheFile class is a wrapper around any read-mode EEGFile subclass\n that stores the results of read operations in memory negating the need for\n further disk accesses. This is useful in the event that the input file is\n non-seekable stream (such as stdin) or disk-access is slowing the\n application down.\n \"\"\"\n\n\n def __init__(self, *args, **kwargs):\n EEGFile.__init__(self, *args, **kwargs)\n self._blocksegments = []\n self._blockrefs = {}\n self._last_block = 0\n\n\n def _get_blocks(self, n1, n2):\n \"\"\"Retrieve the nth block cacheing all previous blocks as necessary\"\"\"\n if n2 > self._last_block:\n self._cache_blocks(n2)\n return self._retrieve_blocks(n1, n2)\n\n\n def _cache_blocks(self, n2):\n \"\"\"Cache the first n blocks if not already done\"\"\"\n # Retrieve blocks\n n1 = self._last_block\n blockseg = EEGFile._get_blocks(self, n1, n2)\n block_ind = len(self._blocksegments)\n self._blocksegments.append((blockseg, n1))\n for n in range(n1, n2):\n self._blockrefs[n] = block_ind\n self._last_block = n2\n\n\n def _retrieve_blocks(self, n1, n2):\n nseg1 = self._blockrefs[n1]\n nseg2 = self._blockrefs[n2 - 1] + 1\n blist = []\n for bseg, noffset in self._blocksegments[nseg1:nseg2]:\n start = max(n1 - noffset, 0)\n end = min(n2 - noffset, len(bseg))\n blist.append(bseg[start:end])\n return blist[0].concatenate(blist)\n\n","sub_path":"electrolib/io/cachefile.py","file_name":"cachefile.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"219921328","text":"\nfrom rest_framework import mixins, status\nfrom rest_framework.response import Response\n\n\nclass ListInWorkoutMixin(mixins.ListModelMixin):\n \"\"\"List objects that belongs to a Workout.\"\"\"\n def list(self, request, *args, **kwargs):\n self.get_workout(workout_pk=kwargs['workout_pk'])\n return super().list(request, *args, **kwargs)\n\n\nclass RetrieveInWorkoutMixin(mixins.RetrieveModelMixin):\n \"\"\"Retrieve an object that belongs to a Workout.\"\"\"\n def retrieve(self, request, *args, **kwargs):\n self.get_workout(workout_pk=kwargs['workout_pk'])\n return super().retrieve(request, *args, **kwargs)\n\n\nclass UpdateInWorkoutMixin(mixins.UpdateModelMixin):\n \"\"\"Update an object that belongs to a Workout.\"\"\"\n def update(self, request, *args, **kwargs):\n self.get_workout(workout_pk=kwargs['workout_pk'])\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n\n # Verify update is valid and perform it\n serializer = self.get_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n\n # Return the object serialized\n instance = self.get_object()\n serializer = self.get_response_serializer(instance)\n return Response(serializer.data)\n\n\nclass DestroyInWorkoutMixin(mixins.DestroyModelMixin):\n \"\"\"Destroy an object that belongs to a Workout.\"\"\"\n def destroy(self, request, *args, **kwargs):\n self.get_workout(workout_pk=kwargs['workout_pk'])\n return super().destroy(request, *args, **kwargs)\n\n\nclass CreateInWorkoutMixin(mixins.CreateModelMixin):\n \"\"\"Create an object that belongs to a Workout.\"\"\"\n def create(self, request, *args, **kwargs):\n self.get_workout(workout_pk=kwargs['workout_pk'])\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n saved = self.perform_create(serializer)\n serializer = self.get_response_serializer(instance=saved)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n \n def perform_create(self, serializer):\n return serializer.save()\n","sub_path":"wogether/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"459191481","text":"from tkinter import *\nimport math # modules for floor()\nfrom PIL import ImageTk, Image, ImageSequence # modules for Pillow Image\n\n## 변수 선언 ##\n\n_WINDOW_WIDTH = 1000\n_WINDOW_HEIGHT = 500\n_SCENE_SIZE = (_WINDOW_WIDTH, _WINDOW_HEIGHT)\n\n_BUTTON_SEARCH_WIDTH = 30\n_BUTTON_SEARCH_HEIGHT = 20\n_BUTTON_SEARCH_SIZE = (_BUTTON_SEARCH_WIDTH, _BUTTON_SEARCH_HEIGHT)\n\n_BUTTON_SEND_WIDTH = 60\n_BUTTON_SEND_HEIGHT = 20\n_BUTTON_SEND_SIZE = (_BUTTON_SEND_WIDTH, _BUTTON_SEND_HEIGHT)\n\n_LOL_INTRO_LABEL_WIDTH = 350\n_LOL_INTRO_LABEL_HEIGHT = 350\n_LOL_INTRO_LABEL_SIZE = (_LOL_INTRO_LABEL_WIDTH, _LOL_INTRO_LABEL_HEIGHT)\n\n_LOL_SCENE_WIDTH = 1230\n_LOL_SCENE_HEIGHT = 750\n_LOL_SCENE_SIZE = (_LOL_SCENE_WIDTH, _LOL_SCENE_HEIGHT)\n\n################\n\n# 인트로 씬을 위한 애니메이터 클래스\n# 씬 전개 순서는 학교 로고 - 개발자 학생 로고 - 어플리케이션 이름 순서\nclass IntroSceneAnimator:\n global _WINDOW_HEIGHT, _WINDOW_WIDTH, _SCENE_SIZE\n\n def __init__(self):\n global parser\n\n # 윈도우 설정\n self.animationFlag = True\n self.frame = 0.0\n self.window = Tk()\n self.window.resizable(False, False)\n # 드래그바와 닫기 메뉴 가리기\n self.window.overrideredirect(True)\n\n self.Load_data()\n\n # 중앙 배치를 위한 오프셋 계산\n _WINDOW_OFFSET_X = int(self.window.winfo_screenwidth() / 2 - _WINDOW_WIDTH / 2)\n _WINDOW_OFFSET_Y = int(self.window.winfo_screenheight() / 2 - _WINDOW_HEIGHT / 2)\n\n # 스크린 중앙 배치\n setGeometry = \"{0}x{1}+{2}+{3}\".format(_WINDOW_WIDTH, _WINDOW_HEIGHT, _WINDOW_OFFSET_X, _WINDOW_OFFSET_Y)\n self.window.geometry(setGeometry)\n\n self.window.wm_iconbitmap('DNF.ico')\n self.window.title(\"useful\")\n\n # 애니메이션 동안 블렌딩을 위한 캔버스와 이미지 로딩\n self.canvas = Canvas(self.window, bg = \"white\", width = _WINDOW_WIDTH, height = _WINDOW_HEIGHT, bd = 0)\n self.canvas.create_image(500, 250, image=self.img_logoblack_raw, tags=\"introscene\")\n self.canvas.place(x = 0, y = 0)\n if self.animationFlag:\n self.canvas.after(0, self.Animate)\n else:\n return\n\n def Animate(self):\n # 애니메이션 재생 속도 조절\n animSpeed = 3\n self.frame += 0.016 * animSpeed\n\n if math.floor(self.frame) == 0: # 1초\n self.img_blended = self.Get_BlendedImageFromImages(self.img_logoblack, self.img_kpulogo, self.frame - math.floor(self.frame))\n elif 1 <= math.floor(self.frame) <= 2:\n self.img_blended = self.img_kpulogo_raw\n elif math.floor(self.frame) == 3:\n self.img_blended = self.Get_BlendedImageFromImages(self.img_kpulogo, self.img_minsulogo, self.frame - math.floor(self.frame))\n elif 4 <= math.floor(self.frame) <= 5:\n self.img_blended = self.img_minsulogo_raw\n elif math.floor(self.frame) == 6:\n self.img_blended = self.Get_BlendedImageFromImages(self.img_minsulogo, self.img_applogo, self.frame - math.floor(self.frame))\n elif 7 <= math.floor(self.frame) <= 10:\n self.img_blended = self.img_applogo_raw\n else:\n self.animationFlag = False\n print(\"\\x1b[1;34mIntroScene Ended\\x1b[0;m\")\n self.window.destroy()\n return\n\n self.canvas.delete(\"introscene\")\n\n self.canvas.create_image(500, 250, image = self.img_blended, tags = \"introscene\")\n\n self.canvas.after(16, self.Animate)\n\n\n ## 내부 동작을 위한 함수 ####################################################\n\n def Get_ImageFromFile(self, in_filePath = \"\", in_ImgSize = tuple()):\n # 파일 경로에 저장되어 있는 이미지를 읽어와 리사이징하고 RGBA 형식으로 변환해 리턴한다.\n if in_filePath == \"\":\n self.Print_Errors(\"Empty FilePath\")\n return\n\n if in_ImgSize == ():\n self.Print_Errors(\"Empty size\")\n return\n\n image = Image.open(in_filePath)\n image_resized = image.resize(in_ImgSize)\n image_converted = image_resized.convert(\"RGBA\")\n if image_converted:\n return image_converted\n else:\n self.Print_Errors(\"Image Loading Failure\")\n return\n\n def Get_BlendedImageFromImages(self, in_img1, in_img2, in_alpha = 0.0) :\n # 입력받은 이미지를 블렌딩한다.\n image_blended = Image.blend(in_img1, in_img2, in_alpha)\n image_result = ImageTk.PhotoImage(image_blended)\n\n if image_result:\n return image_result\n else:\n self.Print_Errors(\"Image Blending Failure\")\n return\n\n def Get_ImageFromFile_COMPLETE(self, in_filePath = \"\", in_ImgSize = tuple()):\n # 파일 경로에 저장되어 있는 이미지를 읽어와 리사이징하고 RGBA 형식으로 변환해 완전한 PhotoImage 개체로 리턴한다.\n if in_filePath == \"\":\n self.Print_Errors(\"Empty FilePath\")\n return\n\n if in_ImgSize == ():\n self.Print_Errors(\"Empty size\")\n return\n\n image = Image.open(in_filePath)\n image_resized = image.resize(in_ImgSize)\n image_converted = image_resized.convert(\"RGBA\")\n image_result = ImageTk.PhotoImage(image_converted)\n\n if image_result:\n return image_result\n else:\n self.Print_Errors(\"Image Loading Failure\")\n return\n\n def Print_Errors(self, in_errorText = \"\"):\n # 에러 입력을 출력한다.\n print(\"\\x1b[1;91mProcessing Error\\x1b[0;m: [{0}]\".format(in_errorText))\n\n def Load_data(self):\n # 이미지 데이터들을 로딩한다.\n\n # image loading & resizing\n self.img_logoblack = self.Get_ImageFromFile(\"./sceneimage/logoblack.png\", _SCENE_SIZE)\n self.img_kpulogo = self.Get_ImageFromFile(\"./sceneimage/logo.png\", _SCENE_SIZE)\n self.img_minsulogo = self.Get_ImageFromFile(\"./sceneimage/minsulogo.png\", _SCENE_SIZE)\n self.img_applogo = self.Get_ImageFromFile(\"./sceneimage/applogo.png\", _SCENE_SIZE)\n\n # 바로 원본으로 사용할 이미지 개체\n self.img_logoblack_raw = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/logoblack.png\", _SCENE_SIZE)\n self.img_kpulogo_raw = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/logo.png\", _SCENE_SIZE)\n self.img_minsulogo_raw = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/minsulogo.png\", _SCENE_SIZE)\n self.img_applogo_raw = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/applogo.png\", _SCENE_SIZE)\n\n print(\"\\x1b[1;32mIntroScene Image Loading COMPLETE\\x1b[0;m\")\n\nclass ButtonDrawer:\n # 버튼 이미지를 가지고 있는 클래스\n\n def __init__(self, in_mainWindow):\n self.window = in_mainWindow\n self.Load_data()\n pass\n\n def Print_Errors(self, in_errorText = \"\"):\n # 에러 입력을 출력한다.\n print(\"\\x1b[1;91mProcessing Error\\x1b[0;m: [{0}]\".format(in_errorText))\n\n def Get_ImageFromFile_COMPLETE(self, in_filePath = \"\", in_ImgSize = tuple()):\n # 파일 경로에 저장되어 있는 이미지를 읽어와 리사이징하고 RGBA 형식으로 변환해 완전한 PhotoImage 개체로 리턴한다.\n if in_filePath == \"\":\n self.Print_Errors(\"Empty FilePath\")\n return\n\n if in_ImgSize == ():\n self.Print_Errors(\"Empty size\")\n return\n\n image = Image.open(in_filePath)\n image_resized = image.resize(in_ImgSize)\n image_converted = image_resized.convert(\"RGBA\")\n image_result = ImageTk.PhotoImage(image_converted)\n\n if image_result:\n return image_result\n else:\n self.Print_Errors(\"Image Loading Failure\")\n return\n\n def Get_BlendedImageFromImages(self, in_img1, in_img2, in_alpha = 0.0) :\n # 입력받은 이미지를 블렌딩한다.\n image_blended = Image.blend(in_img1, in_img2, in_alpha)\n image_result = ImageTk.PhotoImage(image_blended)\n\n if image_result:\n return image_result\n else:\n self.Print_Errors(\"Image Blending Failure\")\n return\n\n def Get_ImageFromFile(self, in_filePath = \"\", in_ImgSize = tuple()):\n # 파일 경로에 저장되어 있는 이미지를 읽어와 리사이징하고 RGBA 형식으로 변환해 리턴한다.\n if in_filePath == \"\":\n self.Print_Errors(\"Empty FilePath\")\n return\n\n if in_ImgSize == ():\n self.Print_Errors(\"Empty size\")\n return\n\n image = Image.open(in_filePath)\n image_resized = image.resize(in_ImgSize)\n image_converted = image_resized.convert(\"RGBA\")\n if image_converted:\n return image_converted\n else:\n self.Print_Errors(\"Image Loading Failure\")\n return\n\n def Load_data(self):\n global _BUTTON_SEND_SIZE, _BUTTON_SEARCH_SIZE, _LOL_SCENE_SIZE, _LOL_INTRO_LABEL_SIZE\n # 이미지 데이터들을 로딩한다.\n\n # image loading & resizing\n\n # 버튼 이미지\n self.img_button_search = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/search.png\", _BUTTON_SEARCH_SIZE)\n self.img_button_reset = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/reset.png\", _BUTTON_SEARCH_SIZE)\n self.img_button_send = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/send.png\", _BUTTON_SEND_SIZE)\n\n self.img_button_search_over_red = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/search_over_red.png\", _BUTTON_SEARCH_SIZE)\n self.img_button_reset_over_red = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/reset_over_red.png\", _BUTTON_SEARCH_SIZE)\n self.img_button_send_over_red = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/send_over_red.png\", _BUTTON_SEND_SIZE)\n\n self.img_button_search_over_teal = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/search_over_teal.png\", _BUTTON_SEARCH_SIZE)\n self.img_button_reset_over_teal = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/reset_over_teal.png\", _BUTTON_SEARCH_SIZE)\n self.img_button_send_over_teal = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/send_over_teal.png\", _BUTTON_SEND_SIZE)\n\n self.img_label_search = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/lol_search.png\", _LOL_INTRO_LABEL_SIZE)\n self.img_label_rotation = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/lol_rotation.png\", _LOL_INTRO_LABEL_SIZE)\n self.img_label_challenger = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/lol_challenger.png\", _LOL_INTRO_LABEL_SIZE)\n\n self.img_label_search_over = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/lol_search_over.png\", _LOL_INTRO_LABEL_SIZE)\n self.img_label_rotation_over = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/lol_rotation_over.png\", _LOL_INTRO_LABEL_SIZE)\n self.img_label_challenger_over = self.Get_ImageFromFile_COMPLETE(\"./sceneimage/lol_challenger_over.png\", _LOL_INTRO_LABEL_SIZE)\n\n # 탭 버튼 이미지\n self.img_tab_dnf = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/tab_dnf.png\", (155, 20))\n self.img_tab_market = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/tab_market.png\", (85, 20))\n self.img_tab_lol = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/tab_lol.png\", (85, 20))\n\n # lol 씬 전환을 위한 이미지\n self.img_background_raw = self.Get_ImageFromFile_COMPLETE(\"./lol_images/background/background.png\", _LOL_SCENE_SIZE)\n self.img_background = self.Get_ImageFromFile(\"./lol_images/background/background.png\", _LOL_SCENE_SIZE)\n self.img_blackbackground_raw = self.Get_ImageFromFile_COMPLETE(\"./lol_images/background/blackbackground.png\", _LOL_SCENE_SIZE)\n self.img_blackbackground = self.Get_ImageFromFile(\"./lol_images/background/blackbackground.png\", _LOL_SCENE_SIZE)\n self.img_whitebackground_raw = self.Get_ImageFromFile_COMPLETE(\"./lol_images/background/whitebackground.png\", _LOL_SCENE_SIZE)\n self.img_whitebackground = self.Get_ImageFromFile(\"./lol_images/background/whitebackground.png\", _LOL_SCENE_SIZE)\n\n # lol 기능 씬을 위한 이미지\n self.img_background_transparent = self.Get_ImageFromFile(\"./lol_images/background/background_transparent.png\", _LOL_SCENE_SIZE)\n self.img_background_transparent_raw = self.Get_ImageFromFile_COMPLETE(\"./lol_images/background/background_transparent.png\", _LOL_SCENE_SIZE)\n self.img_sequence = [ImageTk.PhotoImage(img.resize(_LOL_SCENE_SIZE)) for img in ImageSequence.Iterator(Image.open(\"./lol_images/background/background_lol.gif\"))]\n self.img_label_back = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/back.png\", (50, 50))\n self.img_label_back_over = self.Get_ImageFromFile_COMPLETE(\"./buttonimages/back_over.png\", (50, 50))\n print(\"\\x1b[1;32mButton Image Loading COMPLETE\\x1b[0;m\")\n\n","sub_path":"TermProject/Animator.py","file_name":"Animator.py","file_ext":"py","file_size_in_byte":13023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"340990107","text":"from . import io, api\n\nimport time\nfrom pprint import pprint\n\n\nadmin_tags = [\"Developer\", \"Ambassador\", \"Staff\"]\nvalid_tags = [\"Community\", \"Developer\", \"Ambassador\", \"Staff\"]\n\n\ntags = {}\nfor tag in valid_tags:\n tags[tag.lower()] = tag\n\ndef delete_message(sender, timestamp):\n \"\"\"Syntax: /delete \"\"\"\n\n if sender[\"tag\"] not in admin_tags:\n return {\n \"ok\": False,\n \"message\": \"You are not allowed to use this command.\"\n }\n\n print(\"Attempting to delete message at %s\" % timestamp)\n io.emit(\"delete message\", timestamp)\n\n\ndef ban_user(sender, username, *reason):\n \"\"\"Syntax: /ban [@] [reason]\"\"\"\n reason = \" \".join(reason)\n if sender[\"tag\"] not in admin_tags:\n return {\n \"ok\": False,\n \"message\": \"You are not allowed to use this command.\"\n }\n\n print(\"Banning user '%s'\" % username)\n\n\ndef get_rooms(sender):\n \"\"\"Syntax: /rooms\"\"\"\n message = \"\"\n for room in api.rooms.keys():\n message += room + \": \"\n for user_id in api.rooms[room][\"users\"]:\n user = api.get_user(user_id)\n message += user[\"name\"] + \", \"\n message += \"\\n\"\n\n return {\n \"ok\": True,\n \"show_message\": True,\n \"message\": message\n }\n\ndef ping(sender, delay, *message):\n \"\"\"Syntax: /ping \"\"\"\n message = \" \".join(message)\n\n message = api.parse_message(message)[\"text\"]\n\n time.sleep(int(delay))\n\n return {\n \"ok\": True,\n \"show_message\": True,\n \"message\": message\n }\n\ndef set_rank(sender, username, rank):\n \"\"\"Syntax: /setrank [@] < %s >\"\"\" % \" | \".join(valid_tags)\n\n try:\n rank = tags[rank.lower()]\n except KeyError:\n print(\"Couldn't find rank...\")\n return {\n \"ok\": False,\n \"message\": \"'%s' is not a valid rank.\"\n }\n\n username = username.lower()\n for user in api.users.values():\n pprint(user)\n if user[\"name_lower\"] == username:\n user[\"tag\"] = rank\n print(\"set rank\")\n return {\n \"ok\": True,\n \"show_message\": True,\n \"message\": \"'%s' set to '%s'\" % (username, rank)\n }\n else:\n print(\"Couldn't find user\")\n return {\n \"ok\": False,\n \"message\": \"'%s' is not a valid user.\" % username\n }\n\ncommands = {\n \"delete\": delete_message,\n \"rooms\": get_rooms,\n \"ping\": ping,\n \"setrank\": set_rank,\n}\n","sub_path":"chittr/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"11106350","text":"def equivalente (horas,minutos,segundos):\n minutosasegundos=minutos*60\n horasasegundos=horas*60*60\n\n totalsegundos = segundos + minutosasegundos + horasasegundos\n return totalsegundos\n\ndef main():\n #escribe tu código abajo de esta línea\n resultado = equivalente(2,20,8)\n\n print (resultado)\n\nif __name__=='__main__':\n main()\n","sub_path":"assignments/ACT_10/src/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"458243105","text":"from .views import *\n\ndef make_admin (user_id):\n user = User.query.filter(User.id == user_id).first()\n user.admin = True\n\n@app.route('/delete/', methods=['POST'])\n@login_required\ndef delete_post ():\n post = Post.query.filter(Post.id == request.form['id']).first()\n user = g.user\n if post.user_id != user.id and not user.admin:\n return jsonify ({'error': 'Only admins can delete posts of other users'})\n \n post.type = Post_Types.deleted\n for parent in post.parents:\n print (parent.id)\n parent.children.remove(post)\n parent_resource = load_resource('post', parent.id)\n parent_resource['child_ids'].remove(post.id)\n overwrite_resource('post', parent.id, parent_resource)\n db.session.add(parent)\n db.session.add(post)\n db.session.commit()\n \n resource = load_resource('post', post.id)\n resource['body'] = 'Deleted by admin'\n resource['children'] = []\n overwrite_resource('post', post.id, resource)\n \n return jsonify ({'error': False})\n","sub_path":"vwarp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"86342177","text":"import pickle\nimport tensorflow as tf\nimport numpy as np\n\nsess = tf.InteractiveSession()\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo,encoding='bytes')\n return dict\n\ntrain_data1 = unpickle(\"data_batch_1\")\ntrain_data2 = unpickle(\"data_batch_2\")\ntrain_data3 = unpickle(\"data_batch_3\")\ntrain_data4 = unpickle(\"data_batch_4\")\ntrain_data5 = unpickle(\"data_batch_5\")\ntest_data = unpickle(\"test_batch\")\n\ndef one_hot_encode(y,n_classes=10):\n\treturn np.eye(n_classes)[y]\n\ndef normalize_columns(arr):\n\trows,cols = np.shape(arr)\n\tfor col in range(cols):\n\t\tarr[:,col] = (arr[:,col] - np.mean(arr[:,col]))/(abs(arr[:,col]).max() - abs(arr[:,col]).min())\n\n#print(train_data1[b'data'])\ntrain_data1_xs = train_data1[b'data'];\ntrain_data1_xs = np.array(train_data1_xs,dtype=np.float64)\nnormalize_columns(train_data1_xs);\n#print(train_data1_xs)\n#print(np.mean(train_data1_xs[:,1]))\ntrain_data1_ys = train_data1[b'labels'];\ntrain_data1_ys = one_hot_encode(train_data1_ys);\n#print(train_data1_ys)\n\ntrain_data2_xs = train_data2[b'data'];\ntrain_data2_xs = np.array(train_data2_xs,dtype=np.float64)\nnormalize_columns(train_data2_xs);\ntrain_data2_ys = train_data2[b'labels'];\ntrain_data2_ys = one_hot_encode(train_data2_ys);\n\ntrain_data3_xs = train_data3[b'data'];\ntrain_data3_xs = np.array(train_data3_xs,dtype=np.float64)\nnormalize_columns(train_data3_xs);\ntrain_data3_ys = train_data3[b'labels'];\ntrain_data3_ys = one_hot_encode(train_data3_ys);\n\ntrain_data4_xs = train_data4[b'data'];\ntrain_data4_xs = np.array(train_data4_xs,dtype=np.float64)\nnormalize_columns(train_data4_xs);\ntrain_data4_ys = train_data4[b'labels'];\ntrain_data4_ys = one_hot_encode(train_data4_ys);\n\ntrain_data5_xs = train_data5[b'data'];\ntrain_data5_xs = np.array(train_data5_xs,dtype=np.float64)\nnormalize_columns(train_data5_xs);\ntrain_data5_ys = train_data5[b'labels'];\ntrain_data5_ys = one_hot_encode(train_data5_ys);\n\ntest_data_xs = test_data[b'data'];\ntest_data_xs = np.array(test_data_xs,dtype=np.float64)\nnormalize_columns(test_data_xs);\ntest_data_ys = test_data[b'labels'];\ntest_data_ys = one_hot_encode(test_data_ys);\n\nx = tf.placeholder(tf.float32, shape=[None, 3072,])\ny_labels = tf.placeholder(tf.float32, shape=[None, 10])\n\nimg_data = tf.reshape(x, [-1,3,32,32])\nx_image = tf.transpose(img_data, perm=[0,2,3,1])\nglobal_step = tf.Variable(0,trainable=False)\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\nkernel1 = weight_variable([3, 3, 3, 32])\nconv1 = tf.nn.conv2d(x_image,kernel1,[1,1,1,1],padding='SAME')\nbias1 = bias_variable([32])\npre_activation1 = tf.nn.bias_add(conv1,bias1)\nconv1_pre_final = tf.nn.relu(pre_activation1)\nbatch_mean1, batch_var1 = tf.nn.moments(conv1_pre_final,[0,1,2])\nbeta = tf.Variable(tf.constant(0.0,shape=[32]))\ngamma = tf.Variable(tf.constant(1.0,shape=[32]))\nconv1_final = tf.nn.batch_normalization(conv1_pre_final,batch_mean1,batch_var1,beta,gamma,1e-3)\n#pool1 = tf.nn.max_pool(conv1_final,ksize = [1,3,3,1],strides = [1,2,2,1],padding = 'SAME')\n#norm1 = tf.nn.lrn(pool1,4,bias=1.0,alpha=0.001/9.0,beta=0.75)\n\nkernel2 = weight_variable([3, 3, 32, 32])\nconv2 = tf.nn.conv2d(conv1_final,kernel2,[1,1,1,1],padding='SAME')\nbias2 = bias_variable([32])\npre_activation2 = tf.nn.bias_add(conv2,bias2)\nconv2_pre_final = tf.nn.relu(pre_activation2)\nbatch_mean2, batch_var2 = tf.nn.moments(conv2_pre_final,[0,1,2])\n#beta = tf.Variable(tf.constant(0.0,shape=[32]))\n#gamma = tf.Variable(tf.constant(1.0,shape=[32]))\nconv2_final = tf.nn.batch_normalization(conv2_pre_final,batch_mean2,batch_var2,beta,gamma,1e-3)\npool1 = tf.nn.max_pool(conv2_final,ksize = [1,3,3,1],strides = [1,2,2,1],padding = 'SAME')\npool1_drop = tf.nn.dropout(pool1,0.2)\n\nkernel3 = weight_variable([3, 3, 32, 64])\nconv3 = tf.nn.conv2d(pool1_drop,kernel3,[1,1,1,1],padding='SAME')\nbias3 = bias_variable([64])\npre_activation3 = tf.nn.bias_add(conv3,bias3)\nconv3_pre_final = tf.nn.relu(pre_activation3)\nbatch_mean3, batch_var3 = tf.nn.moments(conv3_pre_final,[0,1,2])\nbeta = tf.Variable(tf.constant(0.0,shape=[64]))\ngamma = tf.Variable(tf.constant(1.0,shape=[64]))\nconv3_final = tf.nn.batch_normalization(conv3_pre_final,batch_mean3,batch_var3,beta,gamma,1e-3)\n#pool1 = tf.nn.max_pool(conv1_final,ksize = [1,3,3,1],strides = [1,2,2,1],padding = 'SAME')\n#norm1 = tf.nn.lrn(pool1,4,bias=1.0,alpha=0.001/9.0,beta=0.75)\n\nkernel4 = weight_variable([3, 3, 64, 64])\nconv4 = tf.nn.conv2d(conv3_final,kernel4,[1,1,1,1],padding='SAME')\nbias4 = bias_variable([64])\npre_activation4 = tf.nn.bias_add(conv4,bias4)\nconv4_pre_final = tf.nn.relu(pre_activation4)\nbatch_mean4, batch_var4 = tf.nn.moments(conv4_pre_final,[0,1,2])\n#beta = tf.Variable(tf.constant(0.0,shape=[64]))\n#gamma = tf.Variable(tf.constant(1.0,shape=[64]))\nconv4_final = tf.nn.batch_normalization(conv4_pre_final,batch_mean4,batch_var4,beta,gamma,1e-3)\npool4 = tf.nn.max_pool(conv4_final,ksize = [1,3,3,1],strides = [1,2,2,1],padding = 'SAME')\npool4_drop = tf.nn.dropout(pool4,0.2)\n\nkernel5 = weight_variable([5, 5, 64, 128])\nconv5 = tf.nn.conv2d(pool4_drop,kernel5,[1,1,1,1],padding='SAME')\nbias5 = bias_variable([128])\npre_activation5 = tf.nn.bias_add(conv5,bias5)\nconv5_pre_final = tf.nn.relu(pre_activation5)\nbatch_mean5, batch_var5 = tf.nn.moments(conv5_pre_final,[0,1,2])\nbeta = tf.Variable(tf.constant(0.0,shape=[128]))\ngamma = tf.Variable(tf.constant(1.0,shape=[128]))\nconv5_final = tf.nn.batch_normalization(conv5_pre_final,batch_mean5,batch_var5,beta,gamma,1e-3)\npool5 = tf.nn.max_pool(conv5_final,ksize = [1,2,2,1],strides = [1,2,2,1],padding = 'SAME')\npool5_drop = tf.nn.dropout(pool5,0.2)\n\nW_fc1 = weight_variable([4 * 4 * 128, 256])\nb_fc1 = bias_variable([256])\n\nh_pool2_flat = tf.reshape(pool5_drop, [-1, 4*4*128])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\nh_fc1_drop = tf.nn.dropout(h_fc1, 0.5)\n\nW_fc2 = weight_variable([256, 10])\nb_fc2 = bias_variable([10])\n\ny_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_labels, logits=y_conv))\ntrain_step = tf.train.MomentumOptimizer(0.01, momentum=0.9).minimize(cross_entropy,global_step)\ncorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_labels,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsess.run(tf.global_variables_initializer())\n\nfor j in range(1000000):\n\tk = j%500\n\tif k<100:\n\t\ti = k\n\t\tbatch_xs = train_data1_xs[100*i:100*(i+1)]\n\t\tbatch_ys = train_data1_ys[100*i:100*(i+1)]\n\telif 200>k>=100:\n\t\ti = k-100\n\t\tbatch_xs = train_data2_xs[100*i:100*(i+1)]\n\t\tbatch_ys = train_data2_ys[100*i:100*(i+1)]\n\telif 300>k>=200:\n\t\ti = k-200\n\t\tbatch_xs = train_data3_xs[100*i:100*(i+1)]\n\t\tbatch_ys = train_data3_ys[100*i:100*(i+1)]\n\telif 400>k>=300:\n\t\ti = k-300\n\t\tbatch_xs = train_data4_xs[100*i:100*(i+1)]\n\t\tbatch_ys = train_data4_ys[100*i:100*(i+1)]\n\telse:\n\t\ti = k-400\n\t\tbatch_xs = train_data5_xs[100*i:100*(i+1)]\n\t\tbatch_ys = train_data5_ys[100*i:100*(i+1)]\n\n\tif j%25 == 0:\n\t\ttrain_accuracy = accuracy.eval(feed_dict={x:batch_xs, y_labels:batch_ys})\n\t\tprint(\"step %d, training accuracy %g\"%(j, train_accuracy))\n\n\tif j%100 == 0:\n\t\ttest_acc = 0.0\n\t\tfor i in range(100):\n\t\t\ttest_xs = test_data_xs[100*i:100*(i+1)]\n\t\t\ttest_ys = test_data_ys[100*i:100*(i+1)]\n\t\t\tacc = accuracy.eval(feed_dict={x:test_xs,y_labels:test_ys})\n\t\t\ttest_acc += acc\n\n\t\tprint(\"training accuracy on test_data : %g\"%(test_acc/100.0))\n\n\ttrain_step.run(feed_dict={x:batch_xs, y_labels:batch_ys})\n\ntest_acc = 0.0\n\nfor i in range(100):\n\ttest_xs = test_data[b'data'][100*i:100*(i+1)]\n\tbatch_xs = np.array(batch_xs, dtype=np.float32)\n\tnormalize_columns(batch_xs)\n\ttest_ys = test_data[b'labels'][100*i:100*(i+1)]\n\ttest_ys = list_connversion(test_ys)\n\n\tacc = accuracy.eval(feed_dict={x:test_xs,y_:test_ys,keep_prob:1.0})\n\ttest_acc += acc\n\tprint(acc)\n\tprint(test_acc)\n\nprint(\"training accuracy %g\"%(test_acc/100.0))\n","sub_path":"cifar10 problem/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":7964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"189162201","text":"\"\"\"\nImplementation of the inverse KL function. (For definition of this function, \nsee surface notes.)\n\"\"\"\n\n\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as st\nimport matplotlib.pyplot as plt\n\n\n\ndef KL(a, b):\n \"\"\"\n Calculate the KL divergence between two Bernoulli distributions parameterized\n by a and b respectively. \n \n Input:\n a: a real value in (0,1). \n b: a real value in (0,1). \n \n Output:\n d: a real non-negative value. \n \"\"\"\n \n d = a * np.log(a/b) + (1-a) * np.log((1-a)/(1-b))\n return d\n\n\ndef g(theta, y, x):\n \"\"\"\n g(theta, y, x) := KL(theta, x) - y \n \n Input:\n theta: a real value in (0,1). \n x: a real value in (0,1). \n y: a positive real value \n \"\"\" \n \n return KL(theta, x) - y \n\n\ndef g_dash(theta, x):\n \"\"\"\n g_dash(theta, x) := g'(theta, y, x), where the derivative is with respect\n to x. g_dash is independent of y. \n \n Input:\n theta: a real value in (0,1). \n x: a real value in (0,1). \n \"\"\"\n \n return -theta/x + (1-theta)/(1-x)\n\n\n\ndef KL_inverse(theta, y, epsilon, x10, x20):\n \"\"\"\n Find two values of x that satisfy KL(theta, x) = y. By Newton's method. \n \n Input:\n theta: a real value in (0,1). \n y: a positive real value.\n epsilon: a postive small real value that determines when the iteration ends. \n x10: initialized value of x1, should be in (0, theta)\n x20: inivialized value of x2, should be in (theta, 1)\n \n Output:\n (x1,x2): a 2-tuple of real values, satisfying x1 in (0, theta) and x2 in (theta, 1). \n \"\"\"\n\n diff = 1.0\n x1_cur = x10\n while diff > epsilon:\n #print('x1_cur =', x1_cur)\n x1_nxt = x1_cur - g(theta, y, x1_cur)/g_dash(theta, x1_cur)\n if x1_nxt < 0:\n x1_nxt = theta/100.0\n elif x1_nxt > theta:\n x1_nxt = theta - theta/100.0\n diff = abs(x1_nxt - x1_cur)\n x1_cur = x1_nxt\n \n diff = 1.0\n x2_cur = x20\n while diff > epsilon:\n #print('x2_cur =', x2_cur)\n x2_nxt = x2_cur - g(theta, y, x2_cur)/g_dash(theta, x2_cur)\n if x2_nxt > 1:\n x2_nxt = 1 - (1-theta)/100.0\n elif x2_nxt < theta:\n x2_nxt = theta + (1-theta)/100.0 \n diff = abs(x2_nxt - x2_cur)\n x2_cur = x2_nxt \n \n return (x1_cur, x2_cur)\n\n\n\n\nif __name__ == \"__main__\":\n thetatrue1 = 0.7\n thetatrue2 = 0.5\n d11 = 0.7\n d12 = 0.05\n d21 = 0.1\n d22 = 0.8\n d31 = 0.2\n d32 = 0.2\n \n epsilon = 0.00000001\n \n # particle 1\n (theta11_1, theta11_2) = KL_inverse(thetatrue1, d11, epsilon, thetatrue1/2.0, thetatrue1 + (1-thetatrue1)/2.0)\n (theta12_1, theta12_2) = KL_inverse(thetatrue2, d12, epsilon, thetatrue2/2.0, thetatrue2 + (1-thetatrue2)/2.0)\n \n # particle 2\n (theta21_1, theta21_2) = KL_inverse(thetatrue1, d21, epsilon, thetatrue1/2.0, thetatrue1 + (1-thetatrue1)/2.0)\n (theta22_1, theta22_2) = KL_inverse(thetatrue2, d22, epsilon, thetatrue2/2.0, thetatrue2 + (1-thetatrue2)/2.0) \n \n # particle 3\n (theta31_1, theta31_2) = KL_inverse(thetatrue1, d31, epsilon, thetatrue1/2.0, thetatrue1 + (1-thetatrue1)/2.0)\n (theta32_1, theta32_2) = KL_inverse(thetatrue2, d32, epsilon, thetatrue2/2.0, thetatrue2 + (1-thetatrue2)/2.0) \n \n plt.figure(1)\n plt.scatter([thetatrue1], [thetatrue2], color='purple')\n \n ## particle 1\n plt.scatter([theta11_1], [theta12_1], color='red')\n #plt.scatter([theta11_1], [theta12_2], color='red')\n #plt.scatter([theta11_2], [theta12_1], color='red')\n #plt.scatter([theta11_2], [theta12_2], color='red')\n print('theta1 =', '(', theta11_1, ',', theta12_1, ')')\n \n ## particle 2\n #plt.scatter([theta21_1], [theta22_1], color='green')\n plt.scatter([theta21_1], [theta22_2], color='green')\n #plt.scatter([theta21_2], [theta22_1], color='green')\n #plt.scatter([theta21_2], [theta22_2], color='green')\n print('theta2 =', '(', theta21_1, ',', theta22_2, ')')\n \n ## particle 3\n plt.scatter([theta31_1], [theta32_1], color='blue')\n #plt.scatter([theta31_1], [theta32_2], color='blue')\n #plt.scatter([theta31_2], [theta32_1], color='blue')\n #plt.scatter([theta31_2], [theta32_2], color='blue')\n print('theta3 =', '(', theta31_1, ',', theta32_1, ')')\n \n plt.plot([0,1], [0,1], color='black')\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.grid()\n plt.xlabel('arm 1')\n plt.ylabel('arm 2')\n ","sub_path":"3b_KL_divergence/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"51502581","text":"class Stack:\n \"\"\"Stack data structure\"\"\"\n def __init__(self):\n self.stack = []\n\n def add(self, vertex):\n self.stack.insert(0, vertex)\n\n def pop(self):\n return self.stack.pop(0)\n\n def __call__(self, *args, **kwargs):\n return self.stack\n\n\ndef dfs(graph, source):\n visited = []\n stack = Stack()\n visited.append(source)\n stack.add(source)\n\n while stack():\n v = stack.pop()\n\n for neighbour in graph[v]:\n if neighbour not in visited:\n visited.append(neighbour)\n stack.add(v)\n stack.add(neighbour)\n break\n\n return visited\n\n\ngraph = {1: [4, 2],\n 2: [3, 5, 7, 8],\n 3: [2, 4, 9, 10],\n 4: [1, 3],\n 5: [2, 6, 7, 8],\n 6: [5],\n 7: [2, 5, 8],\n 8: [2, 5, 7],\n 9: [3],\n 10: [3]}\n\nt = dfs(graph, 1)\nprint(t)\n","sub_path":"Graph Traversal/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"364190312","text":"#!/usr/bin/env python3\n\n\nfrom ev3dev.ev3 import (\n Motor, MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C,\n TouchSensor, ColorSensor, INPUT_1, INPUT_3, INPUT_4,\n Sound, RemoteControl, InfraredSensor\n)\n\nfrom ev3dev.helper import RemoteControlledTank\n\nfrom threading import Thread\n\n\nclass MarsRov3r(RemoteControlledTank):\n def __init__(\n self,\n left_motor_port: str = OUTPUT_B,\n right_motor_port: str = OUTPUT_C,\n grip_motor_port: str = OUTPUT_A,\n touch_sensor_port: str = INPUT_1, color_sensor_port: str = INPUT_3,\n ir_sensor_port: str = INPUT_4, ir_beacon_channel: int = 1):\n super().__init__(\n left_motor=left_motor_port, right_motor=right_motor_port,\n polarity=Motor.POLARITY_NORMAL)\n\n self.grip_motor = MediumMotor(address=grip_motor_port)\n\n self.touch_sensor = TouchSensor(address=touch_sensor_port)\n self.color_sensor = ColorSensor(address=color_sensor_port)\n\n self.ir_sensor = InfraredSensor(address=ir_sensor_port)\n self.ir_beacon_channel = ir_beacon_channel\n self.beacon = RemoteControl(sensor=self.ir_sensor,\n channel=ir_beacon_channel)\n\n self.speaker = Sound()\n\n is_gripping = False\n\n def grip_or_release_claw_by_ir_beacon(self):\n while True:\n if self.beacon.beacon:\n if self.is_gripping:\n self.grip_motor.run_timed(\n speed_sp=1000,\n time_sp=2000,\n stop_action=Motor.STOP_ACTION_BRAKE)\n self.grip_motor.wait_while(Motor.STATE_RUNNING)\n\n self.speaker.play(\n wav_file='/home/robot/sound/Air release.wav').wait()\n\n self.is_gripping = False\n\n else:\n self.grip_motor.run_timed(\n speed_sp=-1000,\n time_sp=2000,\n stop_action=Motor.STOP_ACTION_BRAKE)\n self.grip_motor.wait_while(Motor.STATE_RUNNING)\n\n self.speaker.play(\n wav_file='/home/robot/sound/Airbrake.wav').wait()\n\n self.is_gripping = True\n\n while self.beacon.beacon:\n pass\n\n def main(self):\n self.grip_motor.run_timed(\n speed_sp=500,\n time_sp=1000,\n stop_action=Motor.STOP_ACTION_BRAKE)\n self.grip_motor.wait_while(Motor.STATE_RUNNING)\n\n Thread(target=self.grip_or_release_claw_by_ir_beacon,\n daemon=True).start()\n\n super().main() # RemoteControlledTank.main()\n\n\nif __name__ == '__main__':\n MARS_ROV3R = MarsRov3r()\n\n MARS_ROV3R.main()\n","sub_path":"Computing-Platforms/EV3/LuongPham-Bots/Mars-Rov3r/Mars-Rov3r-RCTank.EV3Dev1.Threading.py","file_name":"Mars-Rov3r-RCTank.EV3Dev1.Threading.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"22937379","text":"#! python3\n\n# Solve a two step algebra equation.\n# Two steps equations are in the format ax + b = c\n# You will ask the user to enter in all 3 variables: a, b and c\n# You will need to display the solution for the equation\n\n# inputs\n# a, b, c\n#\n# outputs\n# solution for x\n#\n# test case: 5, 1, 11 should give x = 2\nimport math\na = input(\"Enter first variable\")\nb = input(\"Enter second variable\")\nc = input(\"Enter third variable\")\nA = int(a)\nB = int(b)\nC = int(c)\nsub = C - B\nSol = sub / A \nsol = str(Sol)\nprint(\"x = \" + sol)\n","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"574517291","text":"import pika\n\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\n# 创建一个随机名称的空队列, 参数exclusive=True表示当前连接关闭时删除该队列\n# result.method.queue 可以查看该队列的名称\nresult = channel.queue_declare(queue='', exclusive=True)\nqueue_name = result.method.queue\n\nchannel.queue_bind(exchange='logs', routing_key='', queue=queue_name)\n\nprint('[x] waiting fro logs. to exit press CTRL+C')\n\n\ndef callback(ch, method, properties, body):\n print('[receive] %s' % body)\n\n\nchannel.basic_consume(\n queue=queue_name, on_message_callback=callback, auto_ack=True\n)\n\nchannel.start_consuming()","sub_path":"publish/receive_logs.py","file_name":"receive_logs.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"30969882","text":"from typing import Tuple\n\nimport os\nimport os.path as osp\nimport pickle\n\n\nimport numpy as np\nimport cv2\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import KMeans\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\n\nfrom skimage import data\n\nSEED = 42\nNB_WORDS = 20\nDESIRED_WIDTH = 1024\n\n\n# data loading\ndef load_dataset(dataset_dir_path: str) -> Tuple[np.array, np.array]:\n X, y = [], []\n for i, class_dir in enumerate(os.listdir(dataset_dir_path)):\n class_dir_path = osp.join(dataset_dir_path, class_dir)\n for file in os.listdir(class_dir_path):\n img_file = cv2.imread(osp.join(class_dir_path, file), cv2.IMREAD_GRAYSCALE)\n if img_file is None:\n print(osp.join(class_dir_path, file))\n # cv2.imshow('original', img_file)\n height, width = img_file.shape\n scale_factor = DESIRED_WIDTH / width\n img_rescaled = cv2.resize(img_file, (DESIRED_WIDTH, int(height*scale_factor)))\n # cv2.imshow('rescaled', img_rescaled)\n # cv2.waitKey(0)\n\n X.append(img_rescaled)\n y.append(i)\n\n X = np.array(X)\n y = np.array(y)\n return X, y\n\n\ndef prepare_vocabulary(images, feature_detector_descriptor, nb_words):\n features = []\n for image in images:\n print(image.shape)\n keypoints, image_descriptor = feature_detector_descriptor.detectAndCompute(image, None)\n print(len(keypoints))\n features.extend(image_descriptor)\n features = np.float32(features)\n kmeans = KMeans(n_clusters=nb_words, random_state=42, n_jobs=-1).fit(features)\n return kmeans\n\n\ndef descriptor2histogram(descriptor, vocab_model, normalize=True):\n features_words = vocab_model.predict(descriptor)\n histogram = np.zeros(vocab_model.n_clusters, dtype=np.float32)\n unique, counts = np.unique(features_words, return_counts=True)\n histogram[unique] += counts\n if normalize:\n histogram /= histogram.sum()\n return histogram\n\n\ndef apply_feature_transform(X, feature_detector_descriptor, vocab_model):\n X_transformed = []\n for image in X:\n keypoints, image_descriptor = feature_detector_descriptor.detectAndCompute(image, None)\n bow_features_histogram = descriptor2histogram(image_descriptor, vocab_model)\n X_transformed.append(bow_features_histogram)\n X_transformed = np.array(X_transformed)\n return X_transformed\n\n\ndef print_score(clf, X_train, y_train, X_test, y_test):\n # val: {clf.score(X_val, y_val)},\n print(\n f'train: {clf.score(X_train, y_train)}, test: {clf.score(X_test, y_test)}'\n )\n\n\ndef lab():\n feature_detector_descriptor = cv2.AKAZE_create()\n\n # image = data.astronaut()[:, :, ::-1]\n #\n # keypoints, image_descriptor = feature_detector_descriptor.detectAndCompute(image, None)\n # # Drugim argumentem może być maska binarna, która służy do zawężenia obszaru z którego\n # # uzyskujemy punkty kluczowe/deskryptor – jako, że nam akurat zależy na całym obrazie,\n # # podaliśmy tam wartość None.\n #\n # # Wynikiem jest lista punktów kluczowych oraz odpowiadającym im wektorów – które składają się na deskryptor.\n # print(f'{len(keypoints)}, {len(image_descriptor)}')\n # print(f'{image_descriptor[0]}')\n #\n # X, y = load_dataset(DATA_DIR_PATH)\n\n X, y = load_dataset('./../_data/zpo/s01e03/dataset_1')\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=SEED, stratify=y\n )\n print(f'len(X_train): {len(X_train)}, len(X_test): {len(X_test)}')\n\n vocab_model = prepare_vocabulary(X_train, feature_detector_descriptor, NB_WORDS)\n\n X_train = apply_feature_transform(X_train, feature_detector_descriptor, vocab_model)\n X_test = apply_feature_transform(X_test, feature_detector_descriptor, vocab_model)\n\n svc = SVC(random_state=SEED)\n print(svc.fit(X_train, y_train))\n print_score(svc, X_train, y_train, X_test, y_test)\n\n mlp = MLPClassifier(random_state=SEED)\n print(mlp.fit(X_train, y_train))\n print_score(mlp, X_train, y_train, X_test, y_test)\n\n rf = RandomForestClassifier(random_state=SEED)\n print(rf.fit(X_train, y_train))\n print_score(rf, X_train, y_train, X_test, y_test)\n\n\ndef project():\n X, y = load_dataset('./../_data/zpo/s01e03/dataset_2_fixed/train')\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=SEED, stratify=y\n )\n\n # X_train, X_val, y_train, y_val = train_test_split(\n # X_train, y_train, test_size=0.2, random_state=SEED, stratify=y_train\n # ) # W drugim problemie testowy wrzuci się potem, X_test to w zasadzie walidacyjny\n\n feature_detector_descriptor = cv2.AKAZE_create()\n\n # vocab_model = prepare_vocabulary(X_train, feature_detector_descriptor, NB_WORDS)\n #\n # X_train = apply_feature_transform(X_train, feature_detector_descriptor, vocab_model)\n # # X_val = apply_feature_transform(X_val, feature_detector_descriptor, vocab_model)\n # X_test = apply_feature_transform(X_test, feature_detector_descriptor, vocab_model)\n #\n # pickle.dump(vocab_model, open(f'./../_data/zpo/s01e03/vocab_model_{NB_WORDS}.p', 'wb'))\n # pickle.dump(X_train, open(f'./../_data/zpo/s01e03/X_train_{NB_WORDS}.p', 'wb'))\n # # pickle.dump(X_val, open(f'./../_data/zpo/s01e03/X_val_{NB_WORDS}.p', 'wb'))\n # pickle.dump(X_test, open(f'./../_data/zpo/s01e03/X_test_{NB_WORDS}.p', 'wb'))\n\n vocab_model = pickle.load(open(f'./../_data/zpo/s01e03/vocab_model_{NB_WORDS}.p', 'rb'))\n X_train = pickle.load(open(f'./../_data/zpo/s01e03/X_train_{NB_WORDS}.p', 'rb'))\n # X_val = pickle.load(open(f'./../_data/zpo/s01e03/X_val_{NB_WORDS}.p', 'rb'))\n X_test = pickle.load(open(f'./../_data/zpo/s01e03/X_test_{NB_WORDS}.p', 'rb'))\n\n svc = SVC(random_state=SEED)\n print(svc.fit(X_train, y_train))\n print_score(svc, X_train, y_train, X_test, y_test)\n\n mlp = MLPClassifier(random_state=SEED)\n print(mlp.fit(X_train, y_train))\n print_score(mlp, X_train, y_train, X_test, y_test)\n\n rf = RandomForestClassifier(random_state=SEED)\n print(rf.fit(X_train, y_train))\n print_score(rf, X_train, y_train, X_test, y_test)\n\n\nif __name__ == '__main__':\n lab()\n # project()\n","sub_path":"zpo/s01e03.py","file_name":"s01e03.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"26797365","text":"''''\nCapture multiple Faces from multiple users to be stored on a DataBase (dataset directory)\n\t==> Faces will be stored on a directory: dataset/ (if does not exist, pls create one)\n\t==> Each face will have a unique numeric integer ID as 1, 2, 3, etc \n\nBased on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition \n\nDeveloped by Marcelo Rovai - MJRoBot.org @ 21Feb18 \n\n'''\n\nimport cv2\nimport os\nimport math\nfrom sklearn import neighbors\nimport os\nimport os.path\nimport pickle\nfrom PIL import Image, ImageDraw\nimport face_recognition\nfrom face_recognition.face_recognition_cli import image_files_in_folder\nimport sys\n\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n\ndef train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):\n \"\"\"\n Trains a k-nearest neighbors classifier for face recognition.\n \n :param train_dir: directory that contains a sub-directory for each known person, with its name.\n \n (View in source code to see train_dir example tree structure)\n \n Structure:\n /\n ├── /\n │ ├── .jpeg\n │ ├── .jpeg\n │ ├── ...\n ├── /\n │ ├── .jpeg\n │ └── .jpeg\n └── ...\n \n :param model_save_path: (optional) path to save model on disk\n :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified\n :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree\n :param verbose: verbosity of training\n :return: returns knn classifier that was trained on the given data.\n \"\"\"\n X = []\n y = []\n \n # Loop through each person in the training set\n for class_dir in os.listdir(train_dir):\n if not os.path.isdir(os.path.join(train_dir, class_dir)):\n continue\n \n # Loop through each training image for the current person\n for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):\n image = face_recognition.load_image_file(img_path)\n face_bounding_boxes = face_recognition.face_locations(image)\n \n if len(face_bounding_boxes) != 1:\n # If there are no people (or too many people) in a training image, skip the image.\n if verbose:\n print(\"Image {} not suitable for training: {}\".format(img_path, \"Didn't find a face\" if len(face_bounding_boxes) < 1 else \"Found more than one face\"))\n else:\n # Add face encoding for current image to the training set\n X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])\n y.append(class_dir)\n\n # Determine how many neighbors to use for weighting in the KNN classifier\n if n_neighbors is None:\n n_neighbors = int(round(math.sqrt(len(X))))\n if verbose:\n print(\"Chose n_neighbors automatically:\", n_neighbors)\n\n # Create and train the KNN classifier\n knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')\n knn_clf.fit(X, y)\n\n # Save the trained KNN classifier\n if model_save_path is not None:\n with open(model_save_path, 'wb') as f:\n pickle.dump(knn_clf, f)\n \n return knn_clf\n\nif __name__ == \"__main__\":\n\n \n cam = cv2.VideoCapture(0)\n cam.set(3, 640) # set video width\n cam.set(4, 480) # set video height\n\n face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n # For each person, enter one numeric face id\n name = input('\\n enter user id end press ==> ')\n\n print(\"\\n [INFO] Initializing face capture. Look the camera and wait ...\")\n # Initialize individual sampling face count\n count = 0\n directory = \"knn_examples/train/\"+str(name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n while(True):\n\n ret, img = cam.read()\n # img = cv2.flip(img, -1) # flip video image vertically\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_detector.detectMultiScale(img, 1.3, 5)\n\n for (x,y,w,h) in faces:\n\n # cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)\n count += 1\n \n # Save the captured image into the datasets folder\n cv2.imwrite(\"knn_examples/train/\" + name +\"/\" + str(name) + str(count) + \".jpg\", img[y-30:y+h+50,x-30:x+w+50])\n\n cv2.imshow('image', img)\n\n k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video\n if k == 27:\n break\n elif count >= 50: # Take 30 face sample and stop video\n break\n\n # Do a bit of cleanup\n print(\"\\n [INFO] Exiting Program and cleanup stuff\")\n print(\"Training KNN classifier...\")\n classifier = train(\"knn_examples/train\", model_save_path=\"trained_knn_model.clf\", n_neighbors=2)\n print(\"Training complete!\")\n\n cam.release()\n cv2.destroyAllWindows()\n\n\n","sub_path":"01_face_dataset.py","file_name":"01_face_dataset.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"261694044","text":"# -*-coding:Utf-8 -*\n\n\"\"\"Ce fichier contient le code principal du jeu.\n\nExécutez-le avec Python pour lancer le jeu.\n\n\"\"\"\n\nimport os\n\nfrom card import Card\nimport labyrinth\n\n# This is just to put python in the game directory\nprint(os.getcwd())\n# os.chdir(\"../../Python/python_remake/ROBOC\")\n# print(os.getcwd())\n# print(os.listdir())\n\n# Uploading existing cards\ncards = []\nfor file_name in os.listdir(\"cards\"):\n if file_name.endswith(\".txt\"):\n path = os.path.join(\"cards\", file_name)\n card_name = file_name[:-3].lower()\n with open(path, \"r\") as file:\n content = file.read()\n try:\n card = Card(card_name, content)\n except ValueError:\n print(\"I cannot read the card file\")\n else:\n cards.append(card)\n\n#Showing existing cards\nprint(\"Existing labyrinths :\")\nfor i, card in enumerate(cards):\n print(\" {} - {}\".format(i + 1, card.name))\n\nlabyrinth = None\n# TO DO: If there is a saved game, we show it\n\n\n# ...\n","sub_path":"ROBOC/roboc.py","file_name":"roboc.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"139649313","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#inputs=['1.cof',\n# '5.cof',\n# '10.cof',\n# '20.cof']\nrng=['1xf2','1xc2','4xc2','4xf2','5D2']\n#rng=[1,2,7,8,9,10,20]\n#rng=range(1,6)\ninputs=[]\nfor i in rng:\n inputs.append(str(i)+'.cof')\n\nfor inp in inputs:\n #try:\n inFile = file(inp,'r')\n #except IOError:continue\n ords=[]\n cofs=[]\n for line in inFile:\n terms=line.split(' | ')\n newCof=abs(float(terms[1]))\n if newCof > 1e-12:\n ords.append(int(terms[0].strip('(').strip(')').strip(',')))\n cofs.append(newCof)\n cofs=np.log10(np.array(cofs))\n plt.plot(ords,cofs,'-',label=inp.split('.')[0])\n\nplt.legend()\nplt.xlabel('Expansion Moment')\nplt.ylabel('log |Coeff|')\n#plt.title('Expansion Moment Decay')\nplt.show()\n\n","sub_path":"trunk/parallelUQ/old_20140405/storage/cof_mesh2/plotCoeffs.py","file_name":"plotCoeffs.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"249867029","text":"import requests\nfrom requests.packages.urllib3.util.retry import Retry\nimport datetime\nimport pandas as pd\nimport concurrent.futures\nimport logging\nfrom math import isnan\nimport gc\nimport os\nimport glob\nimport json\nimport pickle as pk\nfrom copy import deepcopy\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom jinja2 import Environment\nimport netCDF4 as nc\nimport numpy as np\nfrom sys import getsizeof\n\nQC_PARAMETER_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12578/qcparameters/'\nANNOTATIONS_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12580/anno/find?'\nDEPLOYEMENT_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12587/events/deployment/inv/'\nDATA_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/'\nDATA_TEAM_PORTAL_URL = 'http://ooi.visualocean.net/data-streams/export/'\n\n# out_dir = '/home/knuth/ooi_stats/alerts/output/'\nout_dir = '/Users/knuth/Documents/ooi/repos/github/ooi_stats/alerts/output/'\n\nvirtual_times = ['time','met_timeflx','botsflu_time15s','botsflu_time24h']\nCE_cabled = [\"CE02SHBP\", \"CE04OSBP\", \"CE04OSPS\"]\n\nntp_epoch = datetime.datetime(1900, 1, 1)\nunix_epoch = datetime.datetime(1970, 1, 1)\nntp_delta = (unix_epoch - ntp_epoch).total_seconds()\n\npool = concurrent.futures.ThreadPoolExecutor(max_workers=20)\nsession = requests.session()\nretry = Retry(total=10, backoff_factor=0.3,)\nadapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100, max_retries=retry, pool_block=True)\nsession.mount('https://', adapter)\n\n\n\ndef request_data(url,username,token):\n auth = (username, token)\n return session.get(url,auth=auth)\n\ndef to_integer(dt_time):\n return 10000*dt_time.year + 100*dt_time.month + dt_time.day\n\ndef diff_days(d1,d2):\n return (d2 - d1).days\n\ndef create_dir(new_dir):\n # Check if dir exists.. if it doesn't... create it.\n if not os.path.isdir(new_dir):\n try:\n os.makedirs(new_dir)\n except OSError:\n if os.path.exists(new_dir):\n pass\n else:\n raise\n\ndef get_most_recent_eng(array):\n try:\n stream_dir = out_dir + array+'/'+'engineering_stream'+'/*'\n stream_list_of_files = glob.glob(stream_dir)\n\n stream_latest_file = max(stream_list_of_files, key=os.path.getctime)\n\n with open(stream_latest_file, 'rb') as f:\n stream_most_recent = pk.load(f)\n\n return stream_most_recent\n\n except:\n stream_most_recent = pd.DataFrame(columns =['refdes','method','stream'])\n\n return stream_most_recent\n\n\n\n\n\n\n\n\n\ndef request_annotations(array, username, token):\n beginDT = int(nc.date2num(datetime.datetime.strptime(\"2012-01-01T01:00:01Z\",'%Y-%m-%dT%H:%M:%SZ'),'seconds since 1970-01-01')*1000)\n endDT = int(nc.date2num(datetime.datetime.utcnow(),'seconds since 1970-01-01')*1000) \n\n refdes_in = DATA_TEAM_PORTAL_URL + array\n refdes_list = pd.read_csv(refdes_in)\n refdes_list = refdes_list[refdes_list['method'].str.contains(\"recovered\")==False]\n refdes_list = refdes_list[refdes_list['stream_type'].str.contains(\"Science\")==False]\n refdes_list = refdes_list[['reference_designator','method', 'stream_name']]\n refdes_list.columns = ['refdes','method', 'stream']\n refdes_list = refdes_list['refdes']\n\n # added regex search to exclude or grab cabled cabled assets to produce complete Endurance and Cabled Array outputs\n if array == 'CE':\n refdes_list = refdes_list[refdes_list.str.contains('|'.join(CE_cabled))==False]\n\n if array == 'RS':\n refdes_in_CE = DATA_TEAM_PORTAL_URL + 'CE'\n refdes_list_CE = pd.read_csv(refdes_in_CE)\n refdes_list_CE = refdes_list_CE[refdes_list_CE['method'].str.contains(\"recovered\")==False]\n refdes_list_CE = refdes_list_CE[refdes_list_CE['stream_type'].str.contains(\"Science\")==False]\n refdes_list_CE = refdes_list_CE[['reference_designator','method', 'stream_name']]\n refdes_list_CE.columns = ['refdes','method', 'stream']\n refdes_list_CE = refdes_list_CE['refdes']\n refdes_list_CE = refdes_list_CE[refdes_list_CE.str.contains('|'.join(CE_cabled))]\n refdes_list = pd.concat([refdes_list_CE,refdes_list])\n\n refdes_list = refdes_list.drop_duplicates()\n\n print(\" building annotation info requests...\")\n anno_requests = []\n for i in refdes_list:\n request_url = ANNOTATIONS_URL+'beginDT='+str(beginDT)+'&endDT='+str(endDT)+'&refdes='+i\n anno_requests.append(request_url)\n \n print(\" sending annotation info requests...\")\n ref_des_list = []\n future_to_url = {pool.submit(request_data, url, username, token): url for url in anno_requests}\n for future in concurrent.futures.as_completed(future_to_url):\n url_rf = future_to_url[future]\n try:\n anno_info = future.result()\n anno_info = anno_info.json()\n \n for i in range(len(anno_info)):\n if anno_info[i]['endDT'] is None and anno_info[i]['qcFlag'] == 'not_operational':\n refdes = url_rf[111:]\n ref_des_list.append(refdes)\n except:\n pass\n\n \n data_dict={\n 'refdes':ref_des_list}\n\n not_operational = pd.DataFrame(data_dict, columns = ['refdes'])\n\n return not_operational\n\n\n\n\n\n\ndef alert_request_eng_deployments(array, username, token):\n refdes_in = DATA_TEAM_PORTAL_URL + array\n refdes_list = pd.read_csv(refdes_in)\n refdes_list = refdes_list[refdes_list['method'].str.contains(\"recovered\")==False]\n refdes_list = refdes_list[refdes_list['stream_type'].str.contains(\"Science\")==False]\n refdes_list = refdes_list[['reference_designator','method', 'stream_name']]\n refdes_list.columns = ['refdes','method', 'stream']\n refdes_list = refdes_list['refdes']\n\n # added regex search to exclude or grab cabled cabled assets to produce complete Endurance and Cabled Array outputs\n if array == 'CE':\n refdes_list = refdes_list[refdes_list.str.contains('|'.join(CE_cabled))==False]\n\n if array == 'RS':\n refdes_in_CE = DATA_TEAM_PORTAL_URL + 'CE'\n refdes_list_CE = pd.read_csv(refdes_in_CE)\n refdes_list_CE = refdes_list_CE[refdes_list_CE['method'].str.contains(\"recovered\")==False]\n refdes_list_CE = refdes_list_CE[refdes_list_CE['stream_type'].str.contains(\"Science\")==False]\n refdes_list_CE = refdes_list_CE[['reference_designator','method', 'stream_name']]\n refdes_list_CE.columns = ['refdes','method', 'stream']\n refdes_list_CE = refdes_list_CE['refdes']\n refdes_list_CE = refdes_list_CE[refdes_list_CE.str.contains('|'.join(CE_cabled))]\n refdes_list = pd.concat([refdes_list_CE,refdes_list])\n\n refdes_list = refdes_list.drop_duplicates()\n\n print(\"working on\", array)\n print(\" building deployment info requests...\")\n asset_requests = []\n for i in refdes_list:\n sub_site = i[:8]\n platform = i[9:14]\n instrument = i[15:27]\n asset_url_inputs = '/'.join((sub_site, platform, instrument))\n request_url = DEPLOYEMENT_URL+asset_url_inputs+'/-1'\n asset_requests.append(request_url)\n\n print(\" sending deployment info requests...\")\n ref_des_list = []\n start_time_list = []\n deployment_list = []\n\n start_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=86400)\n\n future_to_url = {pool.submit(request_data, url, username, token): url for url in asset_requests}\n for future in concurrent.futures.as_completed(future_to_url):\n try:\n asset_info = future.result()\n asset_info = asset_info.json()\n\n for i in range(len(asset_info)):\n if asset_info[i]['eventStopTime'] is None:\n refdes = asset_info[i]['referenceDesignator']\n ref_des_list.append(refdes)\n \n deployment = asset_info[i]['deploymentNumber']\n deployment_list.append(deployment)\n start_time_list.append(start_time)\n except:\n pass\n \n data_dict={\n 'refdes':ref_des_list,\n 'deployment':deployment_list,\n 'start_time':start_time_list}\n\n alert_deployment_data = pd.DataFrame(data_dict, columns = ['refdes', 'deployment','start_time'])\n\n return alert_deployment_data\n\n\n\n\n\n\n\ndef alert_build_eng_requests(array, alert_deployment_data):\n print(\" building data request urls...\")\n\n refdes_streams = DATA_TEAM_PORTAL_URL + array\n refdes_streams_df = pd.read_csv(refdes_streams)\n refdes_streams_df = refdes_streams_df[refdes_streams_df['method'].str.contains(\"recovered\")==False]\n refdes_streams_df = refdes_streams_df[refdes_streams_df['stream_type'].str.contains(\"Science\")==False]\n refdes_streams_df = refdes_streams_df[['reference_designator','method', 'stream_name']]\n refdes_streams_df.columns = ['refdes','method', 'stream']\n refdes_streams_df = refdes_streams_df[refdes_streams_df['method'].str.contains(\"recovered\")==False]\n\n # regex search to exclude or grab cabled cabled assets to produce complete Endurance and Cabled Array outputs\n if array == 'CE':\n refdes_streams_df = refdes_streams_df[refdes_streams_df['refdes'].str.contains('|'.join(CE_cabled))==False]\n\n if array == 'RS':\n refdes_streams_CE = DATA_TEAM_PORTAL_URL + 'CE'\n refdes_streams_df_CE = pd.read_csv(refdes_streams_CE)\n refdes_streams_df_CE = refdes_streams_df_CE[refdes_streams_df_CE['method'].str.contains(\"recovered\")==False]\n refdes_streams_df_CE = refdes_streams_df_CE[refdes_streams_df_CE['stream_type'].str.contains(\"Science\")==False]\n refdes_streams_df_CE = refdes_streams_df_CE[['reference_designator','method', 'stream_name']]\n refdes_streams_df_CE.columns = ['refdes','method', 'stream']\n refdes_streams_df_CE = refdes_streams_df_CE[refdes_streams_df_CE['method'].str.contains(\"recovered\")==False]\n refdes_streams_df_CE = refdes_streams_df_CE[refdes_streams_df_CE['refdes'].str.contains('|'.join(CE_cabled))]\n refdes_streams_df = pd.concat([refdes_streams_df_CE,refdes_streams_df])\n\n refdes_streams_df = refdes_streams_df.drop_duplicates()\n\n request_inputs = pd.merge(refdes_streams_df,alert_deployment_data, on='refdes')\n\n request_inputs['subsite'] = request_inputs.refdes.str[:8]\n request_inputs['platform'] = request_inputs.refdes.str[9:14]\n request_inputs['instrument'] = request_inputs.refdes.str[15:27]\n request_inputs['date'] = pd.to_datetime(request_inputs['start_time'])\n request_inputs['date'] = request_inputs.date.dt.strftime('%Y-%m-%dT%H:%M:%S.000Z')\n\n\n request_inputs['urls'] = DATA_URL+\\\n request_inputs.subsite+\\\n '/'+request_inputs.platform+\\\n '/'+request_inputs.instrument+\\\n '/'+request_inputs.method+\\\n '/'+request_inputs.stream+\\\n '?beginDT='+request_inputs.date+\\\n '&limit=100'\n\n request_urls = request_inputs['urls'].drop_duplicates()\n request_urls = request_urls.values.tolist()\n\n return request_urls , request_inputs\n\n\n\n\n\n\n\n\n\ndef send_eng_data_requests(array,request_urls,username,token):\n\n print(' sending data requests...')\n eng_streams_data = pd.DataFrame()\n\n future_to_url = {pool.submit(request_data, url, username, token): url for url in request_urls}\n for future in concurrent.futures.as_completed(future_to_url):\n # url = future_to_url[future]\n try:\n data = future.result() \n data = data.json()\n\n refdes_list = []\n method_list = []\n stream_list = []\n value_list = []\n \n # use this to speed up the loop\n # df = pd.DataFrame.from_records(map(json.loads, map(json.dumps,data)))\n\n refdes = data[-1]['pk']['subsite'] + '-' + data[-1]['pk']['node'] + '-' + data[-1]['pk']['sensor']\n method = data[-1]['pk']['method']\n stream = data[-1]['pk']['stream']\n\n for ts in virtual_times:\n try:\n value_list.append(data[-1][ts])\n refdes_list.append(refdes)\n method_list.append(method)\n stream_list.append(stream)\n except:\n continue\n \n\n # create data frame from lists collected above\n data_dict = {\n 'refdes':refdes_list,\n 'method':method_list,\n 'stream':stream_list,\n 'value':value_list\n }\n response_data = pd.DataFrame(data_dict, columns = ['refdes','method','stream','value'])\n\n\n eng_streams_data = eng_streams_data.append(response_data)\n \n except:\n # print('no data for ', url)\n pass\n gc.collect()\n\n return eng_streams_data\n\n\n\n\n\n\n\n\ndef alert_create_eng_outputs(array,eng_streams_data,request_inputs):\n\n # stream level rollup\n stream_inputs = request_inputs[['refdes','method','stream']].copy()\n stream_inputs = stream_inputs.drop_duplicates()\n stream_result = eng_streams_data[['refdes','method','stream']].copy()\n stream_result = stream_result.drop_duplicates()\n missing_streams = stream_result.merge(stream_inputs,indicator=True,how='outer')\n missing_streams = missing_streams[missing_streams['_merge'] == 'right_only']\n del missing_streams['_merge']\n missing_streams['value'] = 0\n stream_result['value'] = 1\n stream_final = pd.concat([stream_result,missing_streams])\n\n return stream_final\n\n\n\n\n\n\n\n\n\ndef alert_create_missing_output(array,stream_final):\n print(' writing output...')\n\n stream_final_out = stream_final[stream_final['value'] == 0]\n del stream_final_out['value']\n\n stream_dir = out_dir + array+'/'+'engineering_stream'+'/'\n create_dir(stream_dir)\n\n\n out = stream_dir + array + '_eng_stream_data_'+ datetime.datetime.utcnow().strftime('%Y-%m-%d') + '.pd'\n with open(out, 'wb') as fh:\n pk.dump(stream_final_out,fh)\n\n return stream_final_out\n\n\n\n\n\n\n\ndef compare_operational(not_operational, stream_final_out, request_inputs):\n difference = stream_final_out.merge(not_operational,indicator=True, how='outer')\n annotated_and_not_operational = difference[difference['_merge'] == 'both']\n no_data_not_annotated = difference[difference['_merge'] == 'left_only']\n data_but_annotated = difference[difference['_merge'] == 'right_only']\n del annotated_and_not_operational['_merge']\n del no_data_not_annotated['_merge']\n data_but_annotated = data_but_annotated[['refdes']]\n\n request_inputs = request_inputs[['refdes']]\n request_inputs = request_inputs.drop_duplicates()\n data_but_annotated = request_inputs.merge(data_but_annotated,indicator=True, how='outer')\n data_but_annotated = data_but_annotated[data_but_annotated['_merge'] == 'both']\n del data_but_annotated['_merge']\n \n return no_data_not_annotated, annotated_and_not_operational, data_but_annotated\n\n\n\n\n\n\n\ndef stream_compare_output(array, stream_final_out, stream_most_recent, request_inputs): \n try:\n print(' comparing stream output to most recent...')\n difference = stream_most_recent.merge(stream_final_out,indicator=True, how='outer')\n stream_difference_new = difference[difference['_merge'] == 'right_only']\n stream_difference_resumed = difference[difference['_merge'] == 'left_only']\n del stream_difference_new['_merge']\n del stream_difference_resumed['_merge']\n\n # check that the resumed stream is still expected\n request_inputs = request_inputs[['refdes','method','stream']]\n request_inputs = request_inputs.drop_duplicates()\n stream_difference_resumed = request_inputs.merge(stream_difference_resumed,indicator=True, how='outer')\n stream_difference_resumed = stream_difference_resumed[stream_difference_resumed['_merge'] == 'both']\n del stream_difference_resumed['_merge']\n return stream_difference_new , stream_difference_resumed\n\n except:\n print(' nothing to compare to...')\n stream_difference_resumed = pd.DataFrame()\n stream_difference_new = pd.DataFrame()\n return stream_difference_new , stream_difference_resumed\n\n\n\n\n\n\n\n\ndef sendEmail(msg,recipients):\n print(' sending alert...')\n recipients = recipients\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(\"ooidatateam@gmail.com\", \"\")\n server.sendmail(\"ooidatateam@gmail.com\",recipients,msg.as_string())\n\ndef print_html_doc(html_template,text):\n return Environment().from_string(html_template).render(body=text)\n\ndef alert_send(array,no_data_not_annotated,annotated_and_not_operational,data_but_annotated,stream_difference_new,stream_difference_resumed,RS_recipients,CE_recipients,GA_recipients,CP_recipients):\n html_template = \"\"\"\n \n {{ body }}\n \n \"\"\"\n\n if stream_difference_new.empty and data_but_annotated.empty and stream_difference_resumed.empty:\n text = '

Ongoing Issues

'\n pass\n else:\n text = '

New Alert

'\n # text = text + '
*Alerts are only sent once when there is a new alert. If and instrument resumes producing data a new alert will be sent.

'\n\n if stream_difference_new.empty: \n pass\n else:\n text = text + 'Engineering streams that have not produced data in the past 24 hours:

'\n f = stream_difference_new.to_html()\n text = text + str(f) + '

'\n\n if data_but_annotated.empty:\n pass\n else:\n text = text + 'Instruments that are annotated as not_operational, but have resumed producing data:

'\n f = data_but_annotated.to_html()\n text = text + str(f) + '

'\n\n if stream_difference_resumed.empty:\n pass\n else:\n text = text + 'Engineering streams that resumed producing data in the past 24 hours:

'\n f = stream_difference_resumed.to_html()\n text = text + str(f) + '

'\n\n\n\n\n\n if stream_difference_new.empty and data_but_annotated.empty and stream_difference_resumed.empty:\n pass\n else: \n text = text + '

Summary of New and Ongoing Issues

'\n # text = text + '
' + '

'\n\n if no_data_not_annotated.empty:\n pass\n else:\n text = text + 'Engineering streams not producing data for over 24 hours:

'\n f = no_data_not_annotated.to_html()\n text = text + str(f) + '

'\n\n if annotated_and_not_operational.empty:\n pass\n else:\n text = text + 'Engineering streams not producing data and annotated as not_operational:

'\n f = annotated_and_not_operational.to_html()\n text = text + str(f) + '

'\n \n\n\n\n subject = 'Engineering Streams Alert for ' + array + ' on ' + datetime.datetime.utcnow().strftime('%Y-%m-%d')\n text = print_html_doc(html_template,text)\n\n msg = MIMEText(text,'html')\n msg['Subject'] = subject\n\n if array == 'RS':\n recipients = RS_recipients\n sendEmail(msg,recipients)\n elif array == 'CE':\n recipients = CE_recipients\n sendEmail(msg,recipients)\n elif array == 'GA':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'GI':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'GP':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'GS':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'CP':\n recipients = CP_recipients\n sendEmail(msg,recipients)\n","sub_path":"alerts/engineering_alerts_functions.py","file_name":"engineering_alerts_functions.py","file_ext":"py","file_size_in_byte":20047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"491643779","text":"from fastai.vision import *\nfrom tqdm.notebook import tqdm\nfrom pathlib import Path\n\ndata = (SegmentationItemList.from_folder(\"images\", presort=True)\n .split_none()\n .label_from_func(lambda x: str(x).replace(\"image\", \"mask\"), classes=np.array([\"background\",\"left_ventricle\",\"myocardium\"]))\n .transform(None,size=256,padding_mode=\"zeros\",resize_method=ResizeMethod.PAD,tfm_y=True)\n .databunch(bs=8)\n .normalize(imagenet_stats))\n\nfor i in tqdm(range(len(data.train_ds))):\n fname = str(data.train_ds.items[i])\n scaled_img = data.train_ds[i][0]\n scaled_mask = data.train_ds[i][1]\n scaled_img.save(\"scaled_\"+fname)\n scaled_mask.save(\"scaled_\"+fname.replace(\"image\",\"mask\"))\n\n# Repeat for additional unscaled masks e.g. from AutoQ or second expert","sub_path":"code/7T/rescale_images_masks.py","file_name":"rescale_images_masks.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"189186877","text":"'''\nlinearly interpolates data\n'''\n\nclass Interpolate(object):\n\n def __init__(self, data):\n self.data = data\n self.xVals = [item[0] for item in data]\n self.yVals = [item[1] for item in data]\n self.xMax = max(self.xVals)\n self.xMin = min(self.xVals)\n\n def eval(self, x):\n if type(x) is list:\n x = x[0]\n\n if x > self.xMax:\n lastDiff = (self.yVals[-1]-self.yVals[-2]) / (self.xVals[-1]-self.xVals[-2])\n y = lastDiff * (x - self.xMax) + self.yVals[-1]\n elif x < self.xMin:\n firstDiff = (self.yVals[1]-self.yVals[0]) / (self.xVals[1]-self.xVals[0])\n y = firstDiff * (x - self.xMin) + self.yVals[0]\n else:\n index = self.findIndex(x)\n if self.xVals[index] == x:\n y = self.xVals[index]\n else:\n diff = (self.yVals[index+1]-self.yVals[index]) / (self.xVals[index+1]-self.xVals[index])\n y = diff*(x-self.xVals[index]) + self.yVals[index]\n\n return y\n\n #returns last index i where xVals[i] < x\n def findIndex(self, x):\n i = 0\n while self.xVals[i] < x:\n i += 1\n return i-1\n","sub_path":"Interpolate.py","file_name":"Interpolate.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"461280516","text":"##########################################################################\n#\n# Copyright (c) 2019, Hypothetical Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n#\n# * Neither the name of Hypothetical Inc. nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##########################################################################\n\nimport GafferDispatch\n\nclass GafferDeadlineTask(object):\n \"\"\" Mimic the Deadline representation of a task:\n - tasks are a sequential range of frames indicated by the start frame and end frame\n - tasks can only be associated with one job and therefore one batch / Gaffer Task Node\n \"\"\"\n def __init__(self, gaffer_batch, task_number, start_frame=None, end_frame=None):\n self._start_frame = None\n self._end_frame = None\n\n self.setGafferBatch(gaffer_batch)\n self.setStartFrame(start_frame)\n self.setEndFrame(end_frame)\n self.setTaskNumber(task_number)\n\n if self._start_frame is None and self.getGafferBatch() is not None and len(self.getGafferBatch().frames()) > 0:\n self.setStartFrame(gaffer_batch.frames()[0])\n if self._end_frame is None and self.getGafferBatch() is not None and len(self.getGafferBatch().frames()) > 0:\n self.setEndFrame(gaffer_batch.frames()[len(gaffer_batch.frames()) - 1])\n\n def setTaskNumber(self, task_number):\n assert(type(task_number) == int)\n self._task_number = task_number\n\n def getTaskNumber(self):\n return self._task_number\n\n def setGafferBatch(self, gaffer_batch):\n assert(gaffer_batch is None or type(gaffer_batch) == GafferDispatch.Dispatcher._TaskBatch)\n self._gaffer_batch = gaffer_batch\n\n def getGafferBatch(self):\n return self._gaffer_batch\n\n def setFrameRange(self, start_frame, end_frame):\n if end_frame < start_frame:\n raise (ValueError, \"End frame must be greater than start frame.\")\n if int(start_frame) != start_frame or int(end_frame) != end_frame:\n raise (ValueError, \"Start and end frames must be integers.\")\n self._start_frame = int(start_frame)\n self._end_frame = int(end_frame)\n\n def setFrameRangeFromList(self, frame_list):\n frames_sequential = True\n if len(frame_list) > 0:\n if int(frame_list[0]) != frame_list[0]:\n raise(ValueError, \"Frame numbers must be integers.\")\n for i in range(1, len(frame_list)-1):\n if int(frame_list[i]) != frame_list[i]:\n raise(ValueError, \"Frame numbers must be integers.\")\n if frame_list[i] - frame_list[i-1] != 1:\n frames_sequential = False\n\n if not frames_sequential:\n raise (ValueError, \"Frame list must be sequential.\")\n self._start_frame = int(frame_list[0])\n self._end_frame = int(frame_list[len(frame_list) - 1])\n else:\n self.setStartFrame(None)\n self.setEndFrame(None)\n\n def setStartFrame(self, start_frame):\n if self._end_frame is not None and start_frame is not None and start_frame > self._end_frame:\n raise(ValueError, \"Start frame must be less than end frame.\")\n if start_frame is not None:\n if int(start_frame) != start_frame:\n raise(ValueError, \"Frame numbers must be integers.\")\n self._start_frame = int(start_frame)\n else:\n self._start_frame = None\n\n def getStartFrame(self):\n return self._start_frame\n\n def setEndFrame(self, end_frame):\n if self._start_frame is not None and end_frame is not None and end_frame < self._start_frame:\n raise(ValueError, \"End frame must be greater than start frame.\")\n if end_frame is not None:\n if int(end_frame) != end_frame:\n raise(ValueError, \"Frame numbers must be integers.\")\n self._end_frame = int(end_frame)\n else:\n self._end_frame = None\n\n def getEndFrame(self):\n return self._end_frame\n","sub_path":"python/GafferDeadline/GafferDeadlineTask.py","file_name":"GafferDeadlineTask.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"555980334","text":"from random import randint\r\n\r\n# jakie jest prawdopodobienstwo ze z wylosowanej proby numer beda takie same\r\n\r\n\r\nfor k in range(20, 50): # ilosc prob\r\n licz = 0\r\n n_iter = 1000\r\n\r\n for j in range(n_iter):\r\n t = {}\r\n\r\n for i in range(1, 366): t[i] = 0\r\n\r\n for i in range(k):\r\n dz = randint(1, 365)\r\n t[dz] = t[dz] + 1\r\n ok = False\r\n\r\n\r\n for i in range(1, 366): #len(t)+1):\r\n if t[i] > 1:\r\n ok = True\r\n if ok:\r\n licz = licz + 1\r\n\r\n #print(licz)\r\n print('k: ', k, ' Prawdop:', licz/10, '%')\r\n\r\n","sub_path":"python/p_lab_5.py","file_name":"p_lab_5.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"520169984","text":"from copy import copy\n\nfrom catan import config, logger\nfrom catan.game.actions import get_action_by_id, get_legal_action_ids\nfrom catan.game.resource import resources\nfrom catan.game.constants import PLAYER_X, PLAYER_Y\nfrom catan.game.piece import City, Road, Settlement\nfrom catan.agents import Human\n\n\ndef action(func):\n def action_wrapper(*args, **kwargs):\n player = args[0]\n\n log_func = logger.debug if player.game.depth == 0 else logger.trace\n log_func(\n data={\n 'num': player.num,\n 'player': player.name,\n 'depth': player.game.depth,\n 'function': func.__name__\n },\n tags='actions'\n )\n\n func(*args, **kwargs)\n\n player.game.draw()\n\n if not player.is_cpu:\n player.game.turn_loop()\n\n return action_wrapper\n\n\nclass Player:\n def __init__(self, game, model=None, name=None):\n self.game = game\n\n self.model = model\n if self.model:\n self.model.player = self\n self.is_cpu = model.is_cpu\n self.name = name or model.name\n else:\n self.is_cpu = False\n self.name = name\n\n self._num = None\n self.color = None\n\n # wood, brick, grain, sheep, ore\n self.resource_cards = [0] * 5\n # self.resource_cards = [7, 7, 3, 3, 0]\n self.resource_generation = [0] * 5\n self.development_cards = []\n self.cities = []\n self.roads = []\n self.settlements = []\n self.turn_num = 0\n\n @property\n def num(self):\n return self._num\n\n @num.setter\n def num(self, num):\n self._num = num\n\n if self._num == 0:\n self.color = 'purple'\n elif self._num == 1:\n self.color = 'blue'\n elif self._num == 2:\n self.color = 'maroon'\n else:\n self.color = 'cyan'\n\n @property\n def victory_points(self):\n return len(self.settlements) + 2*len(self.cities)\n\n @property\n def has_won(self):\n return self.victory_points >= config['game']['victory_points_to_win']\n\n @property\n def num_remaining_cities(self):\n return City.max_per_player - len(self.cities)\n\n @property\n def num_remaining_roads(self):\n return Road.max_per_player - len(self.roads)\n\n @property\n def num_remaining_settlements(self):\n return Settlement.max_per_player - len(self.settlements)\n\n @action\n def end_turn(self):\n if not self.game.can_end_turn():\n raise Exception(f'ERROR {self.name} cannot end turn')\n\n self.game.end_turn()\n\n @action\n def roll(self):\n if not self.game.can_roll():\n raise Exception(f'ERROR {self.name} cannot roll')\n\n self.game.roll()\n\n @action\n def trade(self, give_resource, receive_resource):\n if self.resource_cards[give_resource] < 4:\n raise Exception(f'ERROR {self.name} cannot trade - not enough {give_resource} to give')\n\n self.resource_cards[give_resource] -= 4\n self.resource_cards[receive_resource] += 1\n\n @action\n def build(self, piece_type, position):\n if position.piece:\n if not (isinstance(position.piece, Settlement) and piece_type == City):\n raise Exception(f'ERROR {self.name} cannot place piece - piece already exists there')\n\n if self.game.is_setup_phase():\n if piece_type == City:\n return\n if piece_type == Road and self.game.player.turn_num <= len(self.game.player.roads):\n return\n if piece_type == Settlement and self.game.player.turn_num <= len(self.game.player.settlements):\n return\n elif self.can_afford(piece_type):\n for i in range(5):\n self.resource_cards[i] -= piece_type.cost[i]\n else:\n return\n\n if piece_type == City:\n self.settlements.remove(position.piece)\n\n piece = piece_type(position, self)\n self.add_piece(piece)\n position.piece = piece\n\n def get_legal_action_ids(self):\n return get_legal_action_ids(self.game)\n\n def do_action(self):\n legal_action_ids = self.get_legal_action_ids()\n if self.game.can_roll():\n self.roll()\n elif len(legal_action_ids) == 1:\n func1, args, kwargs = get_action_by_id(self.game, legal_action_ids[0])\n func1(*args, **kwargs)\n else:\n self.model.do_action()\n\n def can_afford(self, piece):\n if piece.get_num_placed_by(self) >= piece.max_per_player:\n return False\n\n if self.game.is_setup_phase():\n if piece == Settlement and len(self.settlements) < self.turn_num:\n return True\n if piece == Road and len(self.roads) == len(self.settlements) - 1:\n return True\n return False\n\n cost = piece.cost\n for i in range(5):\n if self.resource_cards[i] < cost[i]:\n return False\n\n return True\n\n def add_piece(self, piece):\n if isinstance(piece, Road):\n self.roads.append(piece)\n return\n\n if isinstance(piece, City):\n self.cities.append(piece)\n elif isinstance(piece, Settlement):\n self.settlements.append(piece)\n\n self.resource_generation = [sum(x) for x in zip(self.resource_generation, piece.resource_generation)]\n\n def draw_name_banner(self, x, y):\n if 'name_banner' in self.game.graphics:\n self.game.c.delete(self.game.graphics['name_banner'][0])\n self.game.c.delete(self.game.graphics['name_banner'][1])\n\n rect = self.game.c.create_rectangle(x, y, x + 20, y + 60, fill=self.color)\n text = self.game.c.create_text(x + 30, y - 16, fill=\"white\", text=self.name, font=\"default 60 bold\", anchor=\"nw\")\n self.game.graphics['name_banner'] = (rect, text)\n\n def draw_resource_cards(self, x, y):\n y += 100\n _i = 0\n\n if 'res_cards' not in self.game.graphics:\n self.game.graphics['res_cards'] = [None] * len(self.resource_cards)\n\n for res in resources:\n if res.name == 'water' or res.name == 'desert':\n continue\n\n if self.game.graphics['res_cards'][_i]:\n self.game.c.delete(self.game.graphics['res_cards'][_i][0])\n self.game.c.delete(self.game.graphics['res_cards'][_i][1])\n\n rect = self.game.c.create_rectangle(x, y, x + 70, y + 100, outline=res.color, width=4)\n text = self.game.c.create_text(x + 35, y + 50, text=self.resource_cards[_i], font=\"default 50\", fill=res.color)\n\n self.game.graphics['res_cards'][_i] = (rect, text)\n x += 90\n _i += 1\n\n def draw_remaining_pieces(self, x, y):\n if 'remaining_pieces' in self.game.graphics:\n self.game.c.delete(self.game.graphics['remaining_pieces'][0])\n self.game.c.delete(self.game.graphics['remaining_pieces'][1])\n self.game.c.delete(self.game.graphics['remaining_pieces'][2])\n\n roads = self.game.c.create_text(\n x, y, fill=\"white\", text=f\"R - {self.num_remaining_roads}\", font=\"default 60 bold\", anchor=\"nw\")\n\n settlements = self.game.c.create_text(\n x, y + 80, fill=\"white\", text=f\"S - {self.num_remaining_settlements}\", font=\"default 60 bold\", anchor=\"nw\")\n\n cities = self.game.c.create_text(\n x, y + 160, fill=\"white\", text=f\"C - {self.num_remaining_cities}\", font=\"default 60 bold\", anchor=\"nw\")\n\n self.game.graphics['remaining_pieces'] = (roads, settlements, cities)\n\n def draw(self):\n self.draw_name_banner(PLAYER_X, PLAYER_Y)\n self.draw_resource_cards(PLAYER_X + 38, PLAYER_Y)\n self.draw_remaining_pieces(PLAYER_X + 30, PLAYER_Y + 220)\n\n def stringify_stats(self):\n roads = len(self.roads)\n settlements = len(self.settlements)\n cities = len(self.cities)\n\n stat_string = f\"\"\"\\\n| Name: {self.name}\n| Victory Points: {self.victory_points}\n| Roads: {roads}\n| Settlements: {settlements}\n| Cities: {cities}\n| Resource Cards: {self.resource_cards}\n| Development Cards: {[]}\n\"\"\"\n return stat_string\n\n def to_dict(self):\n return {\n 'name': self.name,\n 'num': self.num,\n 'model': type(self.model).__name__,\n 'victory_points': self.victory_points,\n 'num_roads': len(self.roads),\n 'num_settlements': len(self.settlements),\n 'num_cities': len(self.cities),\n 'resource_cards': self.resource_cards\n }\n\n def copy(self, game):\n p = Player(game)\n p.resource_cards = copy(self.resource_cards)\n p.resource_generation = copy(self.resource_generation)\n p.development_cards = copy(self.development_cards)\n p.cities = [city.copy(game, self) for city in self.cities]\n p.settlements = [settlement.copy(game, self) for settlement in self.settlements]\n p.roads = [road.copy(game, self) for road in self.roads]\n p.turn_num = self.turn_num\n p.is_cpu = self.is_cpu\n p.name = self.name\n p.num = self.num\n p.color = self.color\n\n return p\n","sub_path":"catan/game/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":9299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"345863113","text":"import RPi.GPIO as GPIO\nimport time\nLED=26\nKEY=20\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(LED,GPIO.OUT)\nGPIO.setup(KEY,GPIO.IN,GPIO.PUD_UP)\n\np=GPIO.PWM(LED,50)\np.start(0)\ncycle=1\n\nwhile (True):\n time.sleep(0.02)\n if cycle==1:\n for dc in range(1,101,5):\n if GPIO.input(KEY)==0:\n cycle=2\n break\n p.ChangeDutyCycle(dc)\n time.sleep(0.02)\n \n if cycle==2:\n for dc in range(100,-1,-5):\n if GPIO.input(KEY)==0:\n cycle=1\n break\n p.ChangeDutyCycle(dc)\n time.sleep(0.02)\n \n while True:\n if GPIO.input(KEY)==0:\n if cycle==1:\n cycle=2\n if cycle==2:\n cycle=1\n break\n \n ","sub_path":"reference/python_projects1/python_projects1/OutIn.py","file_name":"OutIn.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"223915221","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef nothing(x): # 滑动条的回调函数\n pass\n\n\nsrc = cv2.imread('test21_2.jpg') # 图片1\nimgray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\nret, thresh = cv2.threshold(imgray, 127, 255, 0)\nWindowName = 'Approx' # 窗口名\ncv2.namedWindow(WindowName, cv2.WINDOW_AUTOSIZE) # 建立空窗口\n\ncv2.createTrackbar('epsilon', WindowName, 0, 10, nothing) # 两张图片间转换\n\n\nwhile(1):\n img = src.copy()\n n = 10 - cv2.getTrackbarPos('epsilon', WindowName) # 获取a1滑动条值\n\n contours, hierarchy = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnt = contours[0]\n length = cv2.arcLength(cnt, True)\n\n epsilon = (n/100)*length\n approx = cv2.approxPolyDP(cnt, epsilon, True)\n M = cv2.moments(approx)\n area = cv2.contourArea(approx)\n length1 = cv2.arcLength(approx, True)\n\n cv2.drawContours(img, approx, -1, (0, 255, 0), 3)\n cv2.polylines(img, [approx], True, (0, 255, 0), 3)\n\n font = cv2.FONT_HERSHEY_SIMPLEX # 设置字体样式\n text1 = 'Area: '+str(int(area))+' Length: '+str(int(length1))\n text2 = 'epsilon = ' + str(n) + '%'\n cv2.putText(img, text1, (10, 30), font, 0.5,\n (0, 255, 0), 1, cv2.LINE_AA, 0)\n cv2.putText(img, text2, (10, 60), font, 0.5,\n (0, 255, 0), 1, cv2.LINE_AA, 0)\n\n cv2.imshow(WindowName, img)\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\ncv2.destroyAllWindows()\n","sub_path":"outline2.py","file_name":"outline2.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"173755287","text":"class Solution:\n def robLinear(self, nums: List[int]) -> int:\n if not nums: return 0\n elif len(nums) < 3: return max(nums)\n \n R = [0] * len(nums)\n R[0],R[1] = nums[0], max(nums[0], nums[1])\n for i in range(2, len(nums)):\n R[i] = max(nums[i] + R[i-2], R[i-1])\n return R[-1]\n \n def rob(self, nums: List[int]) -> int:\n \n if not nums: \n return 0\n elif len(nums) == 1:\n return nums[0]\n \n first = self.robLinear(nums[:-1])\n second = self.robLinear(nums[1:])\n return max(first, second)","sub_path":"week-3/dynamic-programming/HouseRobberII.py","file_name":"HouseRobberII.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"468831039","text":"'''\nCreated on Apr 16, 2012\n\n@author: JG\n'''\n\nfrom django.contrib.sessions.backends.base import SessionBase, CreateError\nfrom django.conf import settings\nfrom django.utils.encoding import force_unicode\n\nimport pymongo\n\nclass SessionStore(SessionBase):\n ''' MongoDB store for sessions '''\n \n mongo_session_id = None\n \n def __init__(self, session_key=None):\n '''\n Start up\n '''\n self.dbConn = pymongo.Connection(host = settings.SESSION_MONGO_HOST,\n port = settings.SESSION_MONGO_PORT,\n network_timeout = settings.SESSION_MONGO_SOCKET_TIMEOUT)\n \n self.mongo = self.dbConn[settings.SESSION_MONGO_DB][settings.SESSION_MONGO_COLLECTION]\n \n super(SessionStore, self).__init__( session_key )\n \n\n \n\n def load(self):\n '''\n '''\n session_data = self.mongo.find_one({\"ssk\":self._get_or_create_session_key()})\n \n if session_data is not None:\n self.mongo_session_id = session_data['_id']\n return self.decode(force_unicode(session_data['session_data']))\n else:\n self.create()\n return {}\n\n\n def create(self):\n '''\n Tries to create the document up to 10 times, raises an error if it cannot create it \n '''\n\n self._session_key = self._get_new_session_key()\n self.save()\n self._session_cache = {}\n \n \n def save(self, must_create = False ):\n\n session_data = self._get_session( no_load=True )\n \n if must_create:\n self._session_key = self._get_new_session_key()\n else:\n self._session_key = self._get_or_create_session_key()\n \n max_attempts = 10\n attempt = 0\n while True:\n try:\n encoded_data = self.encode(session_data)\n document = { \"ssk\":self._get_session_key(), \"session_data\":encoded_data}\n if self.mongo_session_id is not None:\n document['_id'] = self.mongo_session_id\n \n self.mongo.save(document, manipulate = True, safe = True )\n self.mongo_session_id = document['_id']\n \n self.modified = True\n return\n except pymongo.errors.OperationFailure:\n # extremely unlikely\n if attempt == max_attempts:\n raise CreateError\n else:\n attempt += 1\n continue\n\n\n def exists(self, session_key):\n if self.mongo.find_one({\"ssk\":session_key}) == None:\n return False\n else:\n return True\n\n\n def delete(self, session_key=None):\n to_delete = (session_key or self._session_key)\n self.mongo.remove({\"ssk\":to_delete})\n","sub_path":"db_mongo.py","file_name":"db_mongo.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"479477023","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n#author:Abel\nimport configparser\nimport os\nimport json\nimport datetime\nimport time\nfrom modules import mail_modules\n\n#所以用户的{邮件地址:用户信息文件}\nUSER_MAIL={}\n\n\n\ndef user_mail_func():\n \"\"\"\n 此函数用于获取所有用户的邮件地址与用户信息文件,并保存到USER_MAIL字典\n :return:\n \"\"\"\n loads_setting_func()\n tmp=os.path.join(SERVER_HOME,USER_DATA)\n #根据遍历USER_DATA目录来查看有多少个用户,然后根据遍历的卡号获取到用户信息文件\n card_number=os.listdir(tmp)\n for line in card_number :\n user_file=os.path.join(SERVER_HOME,USER_DATA,line,line)\n date=date=datetime.datetime.now() - datetime.timedelta(days=30)\n user_logs=os.path.join(SERVER_HOME,USER_LOGS,line,str(date.strftime(\"%Y-%m\"))+\".logs\")\n user_info=json.load(open(user_file,\"r\"))\n m_tmp={user_info[\"mail\"]:user_logs}\n USER_MAIL.update(m_tmp)\n\n\ndef loads_setting_func():\n \"\"\"\n 此函数用���加载配置文件\n :return: 无return值\n \"\"\"\n conf=configparser.ConfigParser()\n conf.read(\"../conf/server.conf\")\n global SERVER_HOME,USER_DATA,USER_LOGS\n SERVER_HOME=conf.get(\"admin.data\",\"ADMIN_HOME\")\n USER_DATA=conf.get(\"user\",\"USER_DATA\")\n USER_LOGS=conf.get(\"user\",\"USER_LOGS\")\n\n\n\ndef send_bill():\n \"\"\"\n 此函数用于将邮件地址与账单信息传值给mail_modules模块,将账单通过邮件发送给用户\n :return:\n \"\"\"\n user_mail_func()\n for line in USER_MAIL:\n mail=line\n if os.path.exists(USER_MAIL[line]):\n with open(USER_MAIL[line],\"r\",encoding=\"utf-8\") as file :\n mes=file.read()\n mail_modules.mail_func(line,mes)\n file.close()\n else:\n mes=\"没有消费记录\"\n mail_modules.mail_func(line,mes)\n\n\n\n\ndef de_money():\n \"\"\"\n 此函数用于信用卡定期还款使用,直接扣除储蓄账户内余额\n :return:\n \"\"\"\n func=\"定期还款\"\n loads_setting_func()\n tmp=os.path.join(SERVER_HOME,USER_DATA)\n card_number=os.listdir(tmp)\n for line in card_number :\n user_file=os.path.join(SERVER_HOME,USER_DATA,line,line)\n user_info=json.load(open(user_file,\"r\"))\n quota=user_info[\"quota\"]\n now_quota=user_info[\"now_quota\"]\n arrears=quota-now_quota\n balance=user_info[\"deposit_balance\"]\n if balance >= arrears:\n balance -= arrears\n user_info[\"deposit_balance\"]=balance\n user_info[\"now_quota\"]=quota\n json.dump(user_info,open(user_file,\"w\"))\n else :\n user_info[\"deposit_balance\"]=0\n user_info[\"now_quota\"]=now_quota+balance\n json.dump(user_info,open(user_file,\"w\"))\n\n\ndef run_func():\n \"\"\"\n 此函数用于定期调用账单功能与还款功能,每月15号出账单,20号还款\n :return:\n \"\"\"\n flag=1\n while True:\n date=datetime.datetime.now().strftime(\"%d\")\n if date == \"15\" and flag :\n send_bill()\n flag = 0\n continue\n elif date == \"20\" and flag :\n de_money()\n flag =0\n continue\n else:\n time.sleep(86400)\n flag =1\n continue","sub_path":"day4/ATM/modules/cron_modules.py","file_name":"cron_modules.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"206619701","text":"#!/usr/bin/env python\n\nimport re\nfrom urllib import unquote\nfrom lxml import etree\n\nimport xdfile\n\ndef udecode(s):\n t = unquote(s)\n try:\n return unicode(t.decode(\"utf-8\"))\n except:\n return unicode(t)\n\ndef parse_uxml(content, filename):\n POSSIBLE_META_DATA = ['Title', 'Author', 'Editor', 'Copyright', 'Category']\n\n try:\n content = content.decode(\"utf-8\")\n except:\n try:\n content = content.decode(\"cp1252\")\n except:\n pass # last ditch effort, just try the original string\n\n content = content.replace(\"&\", \"&\")\n content = content.replace('\"<\"', '\"<\"')\n content = content.replace(\"''\", '"')\n content = content.replace(\"\\x12\", \"'\") # ^R seems to be '\n content = content.replace(\"\\x05\", \"'\") # ^E seems to be junk\n\n content = re.sub(r'=\"\"(\\S)', r'=\""\\1', content) # one case has c=\"\"foo\"\". sheesh\n content = re.sub(r'(\\.)\"\"', r'\\1"\"', content)\n\n try:\n root = etree.fromstring(content)\n except:\n xml = re.search(r\"<(\\w+).*?\", content, flags=re.DOTALL).group()\n root = etree.fromstring(xml)\n\n # init crossword\n rows = int(root.xpath('//crossword/Height')[0].attrib['v'])\n cols = int(root.xpath('//crossword/Width')[0].attrib['v'])\n xd = xdfile.xdfile()\n\n # add meta data\n for item in POSSIBLE_META_DATA:\n try:\n text = root.xpath('//crossword/' + item)[0].attrib['v']\n if text:\n xd.headers.append((item, unquote(text)))\n except:\n pass\n\n # add puzzle\n all_answers = root.xpath('//crossword/AllAnswer')[0].attrib['v']\n all_answers = all_answers.replace('-', xdfile.BLOCK_CHAR)\n index = 0\n while index < len(all_answers):\n row = all_answers[index:index+cols]\n xd.grid.append(u\"\".join(row))\n index += cols\n\n # add clues\n for clue_type in ('across', 'down'):\n for clue in root.xpath('//crossword/'+clue_type)[0].getchildren():\n number = int(clue.attrib['cn'])\n text = udecode(clue.attrib['c'].strip())\n solution = clue.attrib['a'].strip()\n xd.clues.append(((clue_type[0].upper(), number), text, solution))\n\n return xd\n\nif __name__ == \"__main__\":\n xdfile.main_parse(parse_uxml)\n\n","sub_path":"src/uxml2xd.py","file_name":"uxml2xd.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"570458422","text":"d=[{1:10}, {2:20}, {1:30}]\n# {1:[10,30], 2:[20]}\n\nnew_d={}\n\n#for item in d:\n# for key in item:\n# if key not in new_d:\n# new_d[key] = [item[key]]\n# else:\n# new_d[key].append(item[key])\n#\n\n\nfor item in d:\n for key in item:\n new_d.setdefault(key, []).append(item[key])\n\n\nprint(new_d)\n\n\n\nd2=[[1,10], [2,20], [1,30]]\n# {1:[10,30], 2:[20]}\n\nnew_d2={}\n\n\nfor item in d2:\n new_d2.setdefault(item[0],[]).append(item[1])\n\n\n\nprint(new_d2)\n\n\n\n\n\n","sub_path":"PYTHON_CSC108/git/Exercises/141109_132747/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"88987849","text":"import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\nimport tensorflow as tf\n\ntf.set_random_seed(777) # for reproducibility\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ndef create_layer(pre_layer=None, input_dim=None, output_dim=None, weight_name=\"weight\", bias_name=\"bias\"):\n W = tf.Variable(tf.random_normal([input_dim, output_dim]), name=weight_name)\n b = tf.Variable(tf.random_normal([output_dim]), name=bias_name)\n layer = tf.sigmoid(tf.matmul(pre_layer, W) + b)\n\n return layer, output_dim\n\nmnist = input_data.read_data_sets(\"MNIST_data/\")\n\nprint(mnist.train.images)\nprint(mnist.test.labels)\nprint(mnist.train.images.shape)\nprint(mnist.test.labels.shape)\nprint(type(mnist.train.images))\n\n\nx_train = mnist.train.images.reshape((55000, 28*28, 1))\nx_test = mnist.test.images.reshape((10000, 28*28, 1))\ny_train = mnist.train.labels\ny_test = mnist.test.labels\n#################################################\n#### 코딩하시오. X, Y, W, b, hypothesis, cost, train\n#######################################################\nX = tf.placeholder(tf.float32, shape=[None, 28*28])\nY = tf.placeholder(tf.float32, shape=[None, 1])\n\n\n# 2. 모델 구성\nmodel = Sequential()\n\nmodel.add(LSTM(64, input_shape=(28*28,1)))\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(132, activation='relu'))\nmodel.add(Dense(138, activation='relu'))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n# 3.훈련\nmodel.compile(loss='mse', optimizer='adam', metrics=['mse'])\n\nfrom keras.callbacks import EarlyStopping\nearly_stopping = EarlyStopping(monitor='loss', patience=30, mode='auto')\nmodel.fit(x_train, y_train, epochs=50, batch_size=2048, verbose=1, callbacks=[early_stopping])\n\nloss, acc = model.evaluate(x_test, y_test)\n\ny_predict = model.predict(x_test)\n\nprint('loss :', loss)\nprint('y_predict(x_test) :', y_predict)\n","sub_path":"tf/Day190826/tf21_rnn_mnist_keras.py","file_name":"tf21_rnn_mnist_keras.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"632268773","text":"import django_filters\nfrom score_system.models import Score,ScoreLog\n\nclass ScoreListFilter(django_filters.FilterSet):\n studentid = django_filters.CharFilter(field_name='userprofile__user__student_id',lookup_expr='contains')\n professional_class = django_filters.CharFilter(field_name='userprofile__professional_class',lookup_expr='contains')\n current_score = django_filters.RangeFilter()\n total_score = django_filters.RangeFilter()\n term_score = django_filters.RangeFilter()\n\n class Meta:\n model = Score\n fields = ['userprofile__user__student_id','userprofile__professional_class','term_score','current_score','total_score']\n\nclass ScoreLogListFilter(django_filters.FilterSet):\n time = django_filters.DateTimeFromToRangeFilter()\n member_studentid = django_filters.CharFilter(field_name='member__student_id',lookup_expr='contains')\n goal_studentid = django_filters.CharFilter(field_name='score__userprofile__user__student_id',lookup_expr='contains')\n\n class Meta:\n model = ScoreLog\n fields = ['time','member__student_id','score__userprofile__user__student_id']","sub_path":"score_system/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"133857541","text":"#!/usr/bin/env python\nimport sys\n\nfrom crawlers.ebay.product import EbayProductCrawler\nfrom crawlers.ebay.search import EbaySearchCrawler\n\nfrom settings import (\n EBAY_SEARCH_CRAWLER_SETTINGS,\n EBAY_PRODUCTS_CRAWLER_SETTINGS,\n EBAY_APPLICATION_SETTINGS\n)\n\nfrom main import (\n logger,\n message_bus,\n database_client,\n read_arguments\n)\n\nif __name__ == '__main__':\n # Считывает аргументы из командной строки\n crawler_args = read_arguments()\n\n if crawler_args.module == 'search':\n # Если нужно и��ициализировать обработчик search очереди\n crawler = EbaySearchCrawler(\n logger=logger,\n message_bus=message_bus,\n ebay_app_settings=EBAY_APPLICATION_SETTINGS,\n crawler_settings=EBAY_SEARCH_CRAWLER_SETTINGS\n )\n elif crawler_args.module == 'products':\n # Если нужно инициализировать обработчик products очереди\n crawler = EbayProductCrawler(\n logger=logger,\n message_bus=message_bus,\n database_client=database_client,\n ebay_app_settings=EBAY_APPLICATION_SETTINGS,\n crawler_settings=EBAY_PRODUCTS_CRAWLER_SETTINGS\n )\n else:\n logger.error(\n 'Модуль \"{name}\" не найден.'.format(\n name=crawler_args.module\n )\n )\n sys.exit(-1)\n\n logger.info(\n 'eBay crawler запущен с параметрами: module: {module}; limit: {limit}; wait: {wait}'.format(\n module=crawler_args.module,\n limit=crawler_args.limit,\n wait=crawler_args.wait\n )\n )\n\n # Запускает процесс обработки данных\n crawler.run(\n wait=crawler_args.wait,\n limit=crawler_args.limit\n )\n","sub_path":"application/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"245551064","text":"\"\"\"\nFramework agnostic PEM file parsing functions.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport hashlib\nimport re\n\nfrom abc import ABCMeta\n\n\nclass AbstractPEMObject(metaclass=ABCMeta):\n \"\"\"\n Base class for parsed objects.\n \"\"\"\n\n _pem_bytes: bytes\n _sha1_hexdigest: str | None\n\n def __init__(self, pem_bytes: bytes | str):\n if isinstance(pem_bytes, str):\n self._pem_bytes = pem_bytes.encode(\"ascii\")\n else:\n self._pem_bytes = pem_bytes\n self._sha1_hexdigest = None\n\n def __str__(self) -> str:\n \"\"\"\n Return the PEM-encoded content as a native :obj:`str`.\n \"\"\"\n return self._pem_bytes.decode(\"ascii\")\n\n def __repr__(self) -> str:\n return \"<{}(PEM string with SHA-1 digest {!r})>\".format(\n self.__class__.__name__, self.sha1_hexdigest\n )\n\n @property\n def sha1_hexdigest(self) -> str:\n \"\"\"\n A SHA-1 digest of the whole object for easy differentiation.\n\n .. versionadded:: 18.1.0\n .. versionchanged:: 20.1.0\n\n Carriage returns are removed before hashing to give the same hashes\n on Windows and UNIX-like operating systems.\n \"\"\"\n if self._sha1_hexdigest is None:\n self._sha1_hexdigest = hashlib.sha1(\n self._pem_bytes.replace(b\"\\r\", b\"\")\n ).hexdigest()\n\n return self._sha1_hexdigest\n\n def as_bytes(self) -> bytes:\n \"\"\"\n Return the PEM-encoded content as :obj:`bytes`.\n\n .. versionadded:: 16.1.0\n \"\"\"\n return self._pem_bytes\n\n def as_text(self) -> str:\n \"\"\"\n Return the PEM-encoded content as Unicode text.\n\n .. versionadded:: 18.1.0\n \"\"\"\n return self._pem_bytes.decode(\"utf-8\")\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n\n return (\n type(self) == type(other) and self._pem_bytes == other._pem_bytes\n )\n\n def __ne__(self, other: object) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n\n return type(self) != type(other) or self._pem_bytes != other._pem_bytes\n\n def __hash__(self) -> int:\n return hash(self._pem_bytes)\n\n\nclass Certificate(AbstractPEMObject):\n \"\"\"\n A certificate.\n \"\"\"\n\n\nclass OpenSSLTrustedCertificate(Certificate):\n \"\"\"\n An OpenSSL \"trusted certificate\".\n\n .. versionadded:: 21.2.0\n \"\"\"\n\n\nclass CertificateRequest(AbstractPEMObject):\n \"\"\"\n A certificate signing request.\n\n .. versionadded:: 17.1.0\n \"\"\"\n\n\nclass CertificateRevocationList(AbstractPEMObject):\n \"\"\"\n A certificate revocation list.\n\n .. versionadded:: 18.2.0\n \"\"\"\n\n\nclass Key(AbstractPEMObject):\n \"\"\"\n A key of unknown type.\n \"\"\"\n\n\nclass PrivateKey(Key):\n \"\"\"\n A private key of unknown type.\n\n .. versionadded:: 19.1.0\n \"\"\"\n\n\nclass PublicKey(Key):\n \"\"\"\n A public key of unknown type.\n\n .. versionadded:: 19.1.0\n \"\"\"\n\n\nclass RSAPrivateKey(PrivateKey):\n \"\"\"\n A private RSA key.\n \"\"\"\n\n\nclass RSAPublicKey(PublicKey):\n \"\"\"\n A public RSA key.\n\n .. versionadded:: 19.1.0\n \"\"\"\n\n\nclass ECPrivateKey(PrivateKey):\n \"\"\"\n A private EC key.\n\n .. versionadded:: 19.2.0\n \"\"\"\n\n\nclass DSAPrivateKey(PrivateKey):\n \"\"\"\n A private DSA key.\n\n Also private DSA key in OpenSSH legacy PEM format.\n\n .. versionadded:: 21.1.0\n \"\"\"\n\n\nclass DHParameters(AbstractPEMObject):\n \"\"\"\n Diffie-Hellman parameters for DHE.\n \"\"\"\n\n\nclass OpenSSHPrivateKey(PrivateKey):\n \"\"\"\n OpenSSH private key format\n\n .. versionadded:: 19.3.0\n \"\"\"\n\n\nclass SSHPublicKey(PublicKey):\n \"\"\"\n A public key in SSH\n `RFC 4716 `_ format.\n\n The Secure Shell (SSH) Public Key File Format.\n\n .. versionadded:: 21.1.0\n \"\"\"\n\n\nclass SSHCOMPrivateKey(PrivateKey):\n \"\"\"\n A private key in SSH.COM / Tectia format.\n\n .. versionadded:: 21.1.0\n \"\"\"\n\n\n_PEM_TO_CLASS: dict[bytes, type[AbstractPEMObject]] = {\n b\"CERTIFICATE\": Certificate,\n b\"TRUSTED CERTIFICATE\": OpenSSLTrustedCertificate,\n b\"PRIVATE KEY\": PrivateKey,\n b\"PUBLIC KEY\": PublicKey,\n b\"ENCRYPTED PRIVATE KEY\": PrivateKey,\n b\"OPENSSH PRIVATE KEY\": OpenSSHPrivateKey,\n b\"DSA PRIVATE KEY\": DSAPrivateKey,\n b\"RSA PRIVATE KEY\": RSAPrivateKey,\n b\"RSA PUBLIC KEY\": RSAPublicKey,\n b\"EC PRIVATE KEY\": ECPrivateKey,\n b\"DH PARAMETERS\": DHParameters,\n b\"NEW CERTIFICATE REQUEST\": CertificateRequest,\n b\"CERTIFICATE REQUEST\": CertificateRequest,\n b\"SSH2 PUBLIC KEY\": SSHPublicKey,\n b\"SSH2 ENCRYPTED PRIVATE KEY\": SSHCOMPrivateKey,\n b\"X509 CRL\": CertificateRevocationList,\n}\n\n# See https://tools.ietf.org/html/rfc1421\n# and https://tools.ietf.org/html/rfc4716 for space instead of fifth dash.\n_PEM_RE = re.compile(\n b\"----[- ]BEGIN (\"\n + b\"|\".join(_PEM_TO_CLASS.keys())\n + b\"\"\")[- ]----\\r?\n.+?\\r?\n----[- ]END \\\\1[- ]----\\r?\\n?\"\"\",\n re.DOTALL,\n)\n\n\ndef parse(pem_str: bytes) -> list[AbstractPEMObject]:\n \"\"\"\n Extract PEM-like objects from *pem_str*.\n\n :param pem_str: String to parse.\n :type pem_str: bytes\n :return: list of :ref:`pem-objects`\n \"\"\"\n return [\n _PEM_TO_CLASS[match.group(1)](match.group(0))\n for match in _PEM_RE.finditer(pem_str)\n ]\n\n\ndef parse_file(file_name: str) -> list[AbstractPEMObject]:\n \"\"\"\n Read *file_name* and parse PEM objects from it using :func:`parse`.\n \"\"\"\n with open(file_name, \"rb\") as f:\n return parse(f.read())\n","sub_path":"src/pem/_core.py","file_name":"_core.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"332968587","text":"from logging import getLogger\n\nclass Queryer(object):\n SUCCEEDED = True\n FAILED = None\n\n \"\"\"description of class\"\"\"\n def __init__(self):\n self.__log = getLogger(str(self.__class__))\n\n def _TestQuery (self, query, onSuccess, *args):\n \"\"\"can return none!\"\"\"\n self.__log.info(\"testing query.\")\n\n if query.exists():\n self.__log.info('query found a match.')\n\n if len(args) > 0: \n return onSuccess(query, *args)\n\n else:\n return onSuccess(query)\n \n else:\n self.__log.warning('query did not work!')\n return Queryer.FAILED\n\n\n\n","sub_path":"app/back/Database/Queryer.py","file_name":"Queryer.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"563858086","text":"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Create by Bin Xiao (Bin.Xiao@microsoft.com)\n# Modified by Ke Sun (sunk@mail.ustc.edu.cn), Rainbowsecret (yuyua@microsoft.com)\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom yacs.config import CfgNode as CN\n\n# configs for HRNet64\nHRNET_64 = CN()\nHRNET_64.STEM_INPLANES = 64\nHRNET_64.FINAL_CONV_KERNEL = 1\nHRNET_64.WITH_HEAD = True\n\nHRNET_64.STAGE2 = CN()\nHRNET_64.STAGE2.NUM_MODULES = 1\nHRNET_64.STAGE2.NUM_BRANCHES = 2\nHRNET_64.STAGE2.NUM_BLOCKS = [4, 4]\nHRNET_64.STAGE2.NUM_CHANNELS = [64, 128]\nHRNET_64.STAGE2.BLOCK = 'BASIC'\nHRNET_64.STAGE2.FUSE_METHOD = 'SUM'\n\nHRNET_64.STAGE3 = CN()\nHRNET_64.STAGE3.NUM_MODULES = 4\nHRNET_64.STAGE3.NUM_BRANCHES = 3\nHRNET_64.STAGE3.NUM_BLOCKS = [4, 4, 4]\nHRNET_64.STAGE3.NUM_CHANNELS = [64, 128, 256]\nHRNET_64.STAGE3.BLOCK = 'BASIC'\nHRNET_64.STAGE3.FUSE_METHOD = 'SUM'\n\nHRNET_64.STAGE4 = CN()\nHRNET_64.STAGE4.NUM_MODULES = 3\nHRNET_64.STAGE4.NUM_BRANCHES = 4\nHRNET_64.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]\nHRNET_64.STAGE4.NUM_CHANNELS = [64, 128, 256, 512]\nHRNET_64.STAGE4.BLOCK = 'BASIC'\nHRNET_64.STAGE4.FUSE_METHOD = 'SUM'\n\n\n# configs for HRNet48\nHRNET_48 = CN()\nHRNET_48.STEM_INPLANES = 64\nHRNET_48.FINAL_CONV_KERNEL = 1\nHRNET_48.WITH_HEAD = True\n\nHRNET_48.STAGE2 = CN()\nHRNET_48.STAGE2.NUM_MODULES = 1\nHRNET_48.STAGE2.NUM_BRANCHES = 2\nHRNET_48.STAGE2.NUM_BLOCKS = [4, 4]\nHRNET_48.STAGE2.NUM_CHANNELS = [48, 96]\nHRNET_48.STAGE2.BLOCK = 'BASIC'\nHRNET_48.STAGE2.FUSE_METHOD = 'SUM'\n\nHRNET_48.STAGE3 = CN()\nHRNET_48.STAGE3.NUM_MODULES = 4\nHRNET_48.STAGE3.NUM_BRANCHES = 3\nHRNET_48.STAGE3.NUM_BLOCKS = [4, 4, 4]\nHRNET_48.STAGE3.NUM_CHANNELS = [48, 96, 192]\nHRNET_48.STAGE3.BLOCK = 'BASIC'\nHRNET_48.STAGE3.FUSE_METHOD = 'SUM'\n\nHRNET_48.STAGE4 = CN()\nHRNET_48.STAGE4.NUM_MODULES = 3\nHRNET_48.STAGE4.NUM_BRANCHES = 4\nHRNET_48.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]\nHRNET_48.STAGE4.NUM_CHANNELS = [48, 96, 192, 384]\nHRNET_48.STAGE4.BLOCK = 'BASIC'\nHRNET_48.STAGE4.FUSE_METHOD = 'SUM'\n\n\n# configs for HRNet32\nHRNET_32 = CN()\nHRNET_32.PRETRAINED_LAYERS = ['*']\nHRNET_32.STEM_INPLANES = 64\nHRNET_32.FINAL_CONV_KERNEL = 1\nHRNET_32.WITH_HEAD = True\n\nHRNET_32.STAGE2 = CN()\nHRNET_32.STAGE2.NUM_MODULES = 1\nHRNET_32.STAGE2.NUM_BRANCHES = 2\nHRNET_32.STAGE2.NUM_BLOCKS = [4, 4]\nHRNET_32.STAGE2.NUM_CHANNELS = [32, 64]\nHRNET_32.STAGE2.BLOCK = 'BASIC'\nHRNET_32.STAGE2.FUSE_METHOD = 'SUM'\n\nHRNET_32.STAGE3 = CN()\nHRNET_32.STAGE3.NUM_MODULES = 4\nHRNET_32.STAGE3.NUM_BRANCHES = 3\nHRNET_32.STAGE3.NUM_BLOCKS = [4, 4, 4]\nHRNET_32.STAGE3.NUM_CHANNELS = [32, 64, 128]\nHRNET_32.STAGE3.BLOCK = 'BASIC'\nHRNET_32.STAGE3.FUSE_METHOD = 'SUM'\n\nHRNET_32.STAGE4 = CN()\nHRNET_32.STAGE4.NUM_MODULES = 3\nHRNET_32.STAGE4.NUM_BRANCHES = 4\nHRNET_32.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]\nHRNET_32.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]\nHRNET_32.STAGE4.BLOCK = 'BASIC'\nHRNET_32.STAGE4.FUSE_METHOD = 'SUM'\n\n\n# configs for HRNet18\nHRNET_18 = CN()\nHRNET_18.PRETRAINED_LAYERS = ['*']\nHRNET_18.STEM_INPLANES = 64\nHRNET_18.FINAL_CONV_KERNEL = 1\nHRNET_18.WITH_HEAD = True\n\nHRNET_18.STAGE2 = CN()\nHRNET_18.STAGE2.NUM_MODULES = 1\nHRNET_18.STAGE2.NUM_BRANCHES = 2\nHRNET_18.STAGE2.NUM_BLOCKS = [4, 4]\nHRNET_18.STAGE2.NUM_CHANNELS = [18, 36]\nHRNET_18.STAGE2.BLOCK = 'BASIC'\nHRNET_18.STAGE2.FUSE_METHOD = 'SUM'\n\nHRNET_18.STAGE3 = CN()\nHRNET_18.STAGE3.NUM_MODULES = 4\nHRNET_18.STAGE3.NUM_BRANCHES = 3\nHRNET_18.STAGE3.NUM_BLOCKS = [4, 4, 4]\nHRNET_18.STAGE3.NUM_CHANNELS = [18, 36, 72]\nHRNET_18.STAGE3.BLOCK = 'BASIC'\nHRNET_18.STAGE3.FUSE_METHOD = 'SUM'\n\nHRNET_18.STAGE4 = CN()\nHRNET_18.STAGE4.NUM_MODULES = 3\nHRNET_18.STAGE4.NUM_BRANCHES = 4\nHRNET_18.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]\nHRNET_18.STAGE4.NUM_CHANNELS = [18, 36, 72, 144]\nHRNET_18.STAGE4.BLOCK = 'BASIC'\nHRNET_18.STAGE4.FUSE_METHOD = 'SUM'\n\n# configs for HRNext20\nHRNEXT_20 = CN()\nHRNEXT_20.FINAL_CONV_KERNEL = 1\n\nHRNEXT_20.STAGE1 = CN()\nHRNEXT_20.STAGE1.NUM_MODULES = 1\nHRNEXT_20.STAGE1.NUM_BRANCHES = 2\nHRNEXT_20.STAGE1.NUM_BLOCKS = [4, 4]\nHRNEXT_20.STAGE1.NUM_CHANNELS = [32, 64]\nHRNEXT_20.STAGE1.BLOCK = 'BOTTLENECK'\nHRNEXT_20.STAGE1.FUSE_METHOD = 'SUM'\n\nHRNEXT_20.STAGE2 = CN()\nHRNEXT_20.STAGE2.NUM_MODULES = 1\nHRNEXT_20.STAGE2.NUM_BRANCHES = 3\nHRNEXT_20.STAGE2.NUM_BLOCKS = [4, 4, 4]\nHRNEXT_20.STAGE2.NUM_CHANNELS = [20, 40, 80]\nHRNEXT_20.STAGE2.BLOCK = 'BASIC'\nHRNEXT_20.STAGE2.FUSE_METHOD = 'SUM'\n\nHRNEXT_20.STAGE3 = CN()\nHRNEXT_20.STAGE3.NUM_MODULES = 4\nHRNEXT_20.STAGE3.NUM_BRANCHES = 4\nHRNEXT_20.STAGE3.NUM_BLOCKS = [4, 4, 4, 4]\nHRNEXT_20.STAGE3.NUM_CHANNELS = [20, 40, 80, 160]\nHRNEXT_20.STAGE3.BLOCK = 'BASIC'\nHRNEXT_20.STAGE3.FUSE_METHOD = 'SUM'\n\nHRNEXT_20.STAGE4 = CN()\nHRNEXT_20.STAGE4.NUM_MODULES = 3\nHRNEXT_20.STAGE4.NUM_BRANCHES = 5\nHRNEXT_20.STAGE4.NUM_BLOCKS = [4, 4, 4, 4, 4]\nHRNEXT_20.STAGE4.NUM_CHANNELS = [20, 40, 80, 160, 320]\nHRNEXT_20.STAGE4.BLOCK = 'BASIC'\nHRNEXT_20.STAGE4.FUSE_METHOD = 'SUM'\n\nMODEL_CONFIGS = {\n 'hrnet18': HRNET_18,\n 'hrnet32': HRNET_32,\n 'hrnet48': HRNET_48,\n 'hrnet64': HRNET_64,\n 'hrnext20': HRNEXT_20,\n}\n","sub_path":"models/backbone/hrnet/hrnet_config.py","file_name":"hrnet_config.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"590667164","text":"__author__ = 'kirry'\n\nimport datetime\nimport time\nimport unittest\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\nfrom Methods.LoginTools import LoginTools\nfrom Methods.MemcacheTools import MemcacheTools\nfrom Methods.WebDriverTools import WebDriverTools\nfrom config import app\nfrom Methods.OtherTools import OtherTools\n\n\nclass Case096(unittest.TestCase):\n testCaseID = 'Case096'\n projectName = \"DemoEng09\"\n buzName = '导出数据验证时间参数是否传递正常'\n start = 0.0\n now = 'None'\n startTime = \"\"\n url = \"http://%s\" % app.config['SERVERIP']\n def setUp(self):\n\n self.start = datetime.datetime.now()\n self.startTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()).split(\" \")[-1]\n MemcacheTools.setMemTime(self.testCaseID, {'start': self.startTime})\n lg = LoginTools()\n self.driver = lg.InitialChrome(self.url, self.testCaseID)\n self.driver = lg.login(self.driver)\n\n def Test(self):\n sleep(2)\n self.errors=[]\n driver = self.driver\n # 进入demoEng09导出数据页面\n WebDriverTools.enterProject(driver,\"175\",\"demoEng09\",self.errors)\n WebDriverTools.enterModuleByUserMenu(driver,\"btnPointManager\",\"数据管理\",\"#dataManagerCloudMenu\")\n self.checkexport(driver)\n OtherTools.raiseError(self.errors)\n\n def checkexport(self,driver):\n eles = driver.find_element_by_id('PointManagerExportData')\n eles.click()\n sleep(3)\n inputele1 = driver.find_element_by_id(\"batchHistoryTimeStart\")\n inputele2 =driver.find_element_by_id(\"batchHistoryTimeEnd\")\n buttonele = driver.find_element_by_id(\"exportData\")\n startTime = datetime.datetime.strftime(datetime.datetime.now()-datetime.timedelta(days = 1),\"%d/%m/%Y %H:%M\")\n endTime = datetime.datetime.strftime(datetime.datetime.now(),\"%d/%m/%Y %H:%M\")\n inputele1.clear()\n inputele1.send_keys(startTime)\n inputele2.clear()\n inputele2.send_keys(endTime)\n buttonele.click()\n sleep(2)\n try:\n text = driver.find_elements_by_css_selector('body > div')[-1].text\n if \"Invalid Date\" in text:\n self.errors.append(\"%s中点击导出数据按钮后,确认消息中时间值有误\"%self.projectName)\n else:\n print(\"弹出框内的数据正常\")\n driver.find_elements_by_css_selector('body > div > div > button')[0].click()\n except:\n self.errors.append(\"数据管理页面中选取好时间后,点击导出数据,系统未弹出确认信息!\")\n sleep(3)\n try:\n text = driver.find_elements_by_css_selector('body > div')[-1].text\n if \"请求失败\"in text:\n self.errors.append(\"导出数据请求失败!\")\n else:\n print(\"导出数据成功!\")\n except:\n self.errors.append(\"导出数据后未给出导出结果提示信息!\")\n\n\n\n\n def tearDown(self):\n self.start = str((datetime.datetime.now() - self.start).seconds)\n self.start = self.start + \"s\"\n self.now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()).split(\" \")[-1]\n MemcacheTools.setMemTime(self.testCaseID, {'start': self.startTime, 'end': self.now})\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n suite = unittest.TestSuite()\n suite.addTest(Case096('Test'))\n runner = unittest.TextTestRunner()\n runner.run(suite)","sub_path":"BeOP-AutoTest2016/UICase/Case096DemoEng09ExportTime.py","file_name":"Case096DemoEng09ExportTime.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"46235456","text":"\"\"\"EWOSystem URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom django.conf.urls.static import static\r\nfrom administrar.views import *\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\nurlpatterns = [\r\n path('', InicioView.as_view(), name='inicio'),\r\n path('admin/', admin.site.urls),\r\n path('informes/presupuesto/', login_required(PresupuestoPDF.as_view()), name='informe'),\r\n path('informes/actividad/', login_required(ActividadPDF.as_view()), name='informeactividad'),\r\n path('informes/proyecto/', login_required(ProyectoPDF.as_view()), name='informeproyecto'),\r\n path('informes/eventos/', login_required(EventosPDF.as_view()), name='informeeventos'),\r\n path('informes/eventos_pendientes/', login_required(EventosPendientesPDF.as_view()), name='eventospendientes'),\r\n path('informes/eventos_realizados/', login_required(EventosRealizadosPDF.as_view()), name='eventosrealizados'),\r\n path('informes/pastores/', login_required(PastoresPDF.as_view()), name='informepastores'),\r\n path('informes/proyectos_rango/', login_required(InformeProyectoRangoPDF.as_view()), name='proyecto_por_rango'),\r\n path('informes/actividades_rango/', login_required(InformeActividadRangoPDF.as_view()), name='actividad_por_rango'),\r\n path('informes/presupuesto_rango/', login_required(InformePresupuestoRangoPDF.as_view()), name='presupuesto_por_rango'),\r\n path('informes/tipo_proyecto_rango/', login_required(InformeTipoProyectoRangoPDF.as_view()), name='tipo_proyecto_rango'),\r\n]\r\nif settings.DEBUG:\r\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n","sub_path":"EWOSystem/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"134918136","text":"'''\n Copyright (c) 2014, Joonhee Han.\n \n This file is part of MealyHMM.\n MealyHMM is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n'''\n\nfrom Queue import Queue\nimport alpha_calculator as ac\nimport beta_calculator as bc \nimport numpy\nimport sys\n\nclass ParameterEstimator:\n def __init__(self, hmm, new_hmm, sequences):\n self.hmm = hmm\n self.new_hmm = new_hmm\n self.alphabet = self.hmm.getAlphabet()\n self.numOfStates = self.hmm.getNumOfStates()\n self.state_probs = numpy.zeros( len(sequences) * self.numOfStates ).reshape( len(sequences), self.numOfStates )\n self.tran_probs = numpy.zeros( len(sequences) * self.numOfStates * self.numOfStates ).reshape( len(sequences), self.numOfStates, self.numOfStates ) \n self.epsil_tran_probs = numpy.zeros( len(sequences) * self.numOfStates * self.numOfStates ).reshape( len(sequences), self.numOfStates, self.numOfStates ) \n self.obs_probs = numpy.zeros( len(sequences) * self.numOfStates * self.numOfStates * len(self.alphabet) ).reshape( len(sequences), self.numOfStates, self.numOfStates, len(self.alphabet) ) \n self.sequences = sequences\n self.queue = Queue()\n\n def estimate(self):\n for idx, seq in enumerate( self.sequences ):\n a = ac.AlphaCalculator(self.hmm, seq)\n b = bc.BetaCalculator(self.hmm, seq)\n alpha = a.getAlpha()\n beta = b.getBeta()\n\n self.queue.put( self.hmm.getInitialState() )\n\n while not self.queue.empty():\n state = self.queue.get()\n\n for nb in self.hmm.getNeighbors(state):\n for t in range(0, len(seq)+1):\n if t > 0:\n p = alpha[state][t-1] * self.hmm.getObsProb(state, nb, seq[t-1]) * self.hmm.getTranProb(state, nb) * beta[nb][t]\n if p <= sys.float_info.epsilon:\n p = 0.0\n \n self.tran_probs[idx][state][nb] += p \n self.state_probs[idx][state] += p\n self.obs_probs[idx][state][nb][self.alphabet.index(seq[t-1])] += p \n \n p = alpha[state][t] * self.hmm.getEpsilonTranProb(state, nb) * beta[nb][t]\n if p <= sys.float_info.epsilon:\n p = 0.0\n\n self.epsil_tran_probs[idx][state][nb] += p \n self.state_probs[idx][state] += p \n\n if state != nb:\n self.queue.put(nb)\n\n # set\n self.queue.put( self.hmm.getInitialState() )\n while not self.queue.empty():\n state = self.queue.get()\n\n for nb in self.hmm.getNeighbors(state):\n for idx in range( len(self.sequences) ):\n #print idx, state, nb, self.obs_probs[idx][state][nb], self.tran_probs[idx][state][nb]\n\n p = self.tran_probs[idx][state][nb] / self.state_probs[idx][state] / float(len(self.sequences))\n if numpy.isnan(p):\n self.new_hmm.setTranProb(state, nb, self.new_hmm.getTranProb(state, nb))\n else:\n self.new_hmm.setTranProb(state, nb, self.new_hmm.getTranProb(state, nb) + p)\n\n p = self.epsil_tran_probs[idx][state][nb] / self.state_probs[idx][state] / float(len(self.sequences)) \n if numpy.isnan(p):\n self.new_hmm.setEpsilonTranProb(state, nb, self.new_hmm.getEpsilonTranProb(state, nb))\n else:\n self.new_hmm.setEpsilonTranProb(state, nb, self.new_hmm.getEpsilonTranProb(state, nb) + p)\n \n\n for a in self.alphabet:\n for idx in range( len(self.sequences) ):\n p = self.obs_probs[idx][state][nb][self.alphabet.index(a)] / self.tran_probs[idx][state][nb] / float(len(self.sequences)) \n if numpy.isnan(p): \n self.new_hmm.setObsProb(state, nb, a, self.new_hmm.getObsProb(state, nb, a))\n else:\n self.new_hmm.setObsProb(state, nb, a, self.new_hmm.getObsProb(state, nb, a) + p)\n\n if state != nb:\n self.queue.put(nb)\n","sub_path":"parameter_estimator.py","file_name":"parameter_estimator.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"96359844","text":"import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv('Data/dataQuandlLarger.csv')\n\ndata1 = (data.loc[data['Date']>='2000-01-01'])\n\ndata = pd.read_csv('Data/dataYF1.csv')\n\ndata2 = (data.loc[data['Date']>='2000-01-01'])\n\ndata = pd.read_csv('Data/dataYF2.csv')\n\ndata3 = (data.loc[data['Date']>='2000-01-01'])\n\ndel data1['Adj. Open']\ndel data1['Adj. Low']\ndel data1['Adj. High']\ndel data1['Adj. Volume']\n\ndata2 = data2.rename(columns = {\"Dividends\" : \"Ex-Dividend\", \"Splits\" : \"Split Ratio\", \"Adj Close\" : \"Adj. Close\"})\ndata3 = data3.rename(columns = {\"Dividends\" : \"Ex-Dividend\", \"Splits\" : \"Split Ratio\", \"Adj Close\" : \"Adj. Close\"})\nprint(data2.head())\nfinalData = pd.DataFrame()\nfinalData = finalData.append(data1)\nfinalData = finalData.append(data2)\nfinalData = finalData.append(data3)\n\nfinalData.to_csv('MergedData.csv')","sub_path":"Phase2/Zip/Group1_Phase2/Code/GatherMoreData.py","file_name":"GatherMoreData.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"469395736","text":"from dj.choices import Choices\nfrom django.conf import settings\nfrom django.core.validators import (\n MaxLengthValidator,\n MinLengthValidator,\n RegexValidator\n)\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ralph.back_office.models import Warehouse\nfrom ralph.lib.mixins.models import (\n AdminAbsoluteUrlMixin,\n NamedMixin,\n TimeStampMixin\n)\nfrom ralph.lib.transitions.fields import TransitionField\n\n\nPUK_CODE_VALIDATORS = [\n MinLengthValidator(5),\n RegexValidator(\n regex='^\\d+$',\n message=_('Required numeric characters only.')\n ),\n]\n\n\nPIN_CODE_VALIDATORS = [\n MinLengthValidator(4),\n RegexValidator(\n regex='^\\d+$',\n message=_('Required numeric characters only.')\n ),\n]\n\n\nclass SIMCardStatus(Choices):\n _ = Choices.Choice\n\n new = _(\"new\")\n in_progress = _(\"in progress\")\n waiting_for_release = _(\"waiting for release\")\n used = _(\"in use\")\n damaged = _(\"damaged\")\n liquidated = _(\"liquidated\")\n free = _(\"free\")\n reserved = _(\"reserved\")\n loan_in_progress = _(\"loan in progress\")\n return_in_progress = _(\"return in progress\")\n in_quarantine = _(\"in quarantine\")\n\n\nclass CellularCarrier(AdminAbsoluteUrlMixin, NamedMixin, models.Model):\n pass\n\n\nclass SIMCardFeatures(\n AdminAbsoluteUrlMixin,\n NamedMixin,\n models.Model\n):\n pass\n\n\nclass SIMCard(AdminAbsoluteUrlMixin, TimeStampMixin, models.Model):\n pin1 = models.CharField(\n max_length=8, null=True, blank=True,\n help_text=_('Required numeric characters only.'),\n validators=PIN_CODE_VALIDATORS\n )\n puk1 = models.CharField(\n max_length=16, help_text=_('Required numeric characters only.'),\n validators=PUK_CODE_VALIDATORS\n )\n pin2 = models.CharField(\n max_length=8, null=True, blank=True,\n help_text=_('Required numeric characters only.'),\n validators=PIN_CODE_VALIDATORS\n )\n puk2 = models.CharField(\n max_length=16, null=True, blank=True,\n help_text=_('Required numeric characters only.'),\n validators=PUK_CODE_VALIDATORS)\n carrier = models.ForeignKey(\n CellularCarrier, on_delete=models.PROTECT,\n )\n card_number = models.CharField(\n max_length=22, unique=True,\n validators=[\n MinLengthValidator(1),\n MaxLengthValidator(22),\n RegexValidator(\n regex='^\\d+$',\n message=_('Required numeric characters only.'),\n )\n ]\n )\n phone_number = models.CharField(\n max_length=16, unique=True, help_text=_('ex. +2920181234'),\n validators=[\n MinLengthValidator(1),\n MaxLengthValidator(16),\n RegexValidator(\n regex='^\\+\\d+$',\n message='Phone number must have +2920181234 format.'\n )\n ]\n )\n warehouse = models.ForeignKey(Warehouse, on_delete=models.PROTECT)\n owner = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, blank=True,\n on_delete=models.SET_NULL,\n related_name='owned_simcards',\n )\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, blank=True,\n on_delete=models.SET_NULL,\n related_name='used_simcards',\n )\n status = TransitionField(\n default=SIMCardStatus.new.id,\n choices=SIMCardStatus(),\n )\n remarks = models.TextField(blank=True)\n quarantine_until = models.DateField(\n null=True, blank=True,\n help_text=_('End of quarantine date.')\n )\n features = models.ManyToManyField(\n SIMCardFeatures,\n blank=True,\n )\n\n def __str__(self):\n return _('SIM Card: {}').format(self.phone_number)\n","sub_path":"src/ralph/sim_cards/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"3213994","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: ts=4:sw=4\n# Dirk Sohler, spam@0x7be.de\n# http://dev.0x7be.de/mar.html\n\n\nimport sys\nimport re\nimport os\n\nfrom optparse import OptionParser\nfrom pprint import pprint as pp\nfrom datetime import datetime\n\nprogversion = '2.0.2'\n\n\ndef options():\n\tparser = OptionParser()\n\tparser = OptionParser(\n\t\t\tusage='%prog [options] mask files',\n\t\t\tversion='%prog '+progversion)\n\tparser.add_option('-n', '--no-colors',\n\t\t\taction='store_true',\n\t\t\tdest='nocolors',\n\t\t\tdefault=False,\n\t\t\thelp='disable colored output')\n\tparser.add_option('-q', '--quiet',\n\t\t\taction='store_true',\n\t\t\tdest='quiet',\n\t\t\tdefault=False,\n\t\t\thelp='dismiss status informations')\n\tparser.add_option('-b', '--backup',\n\t\t\taction='store_true',\n\t\t\tdest='backup',\n\t\t\tdefault=False,\n\t\t\thelp='creating backup CSV file')\n\tparser.add_option('-s', '--start-with',\n\t\t\tdest='startwith',\n\t\t\tdefault=1,\n\t\t\ttype='int',\n\t\t\tmetavar='N',\n\t\t\thelp='start numbering with N instead 1')\n\tparser.add_option('-v', '--verbose',\n\t\t\taction='store_true',\n\t\t\tdest='verbose',\n\t\t\tdefault=False,\n\t\t\thelp='print verbose output')\n\treturn parser\nparams = options()\noptions,userinput = params.parse_args()\n\n\ndef color(text):\n\t# http://blog.holloway-web.de/wp-content/uploads/2009/11/ansi_colors.png\n\tif not options.nocolors == True:\n\t\tregular = {'gray': '\\033[0;30m', 'red': '\\033[0;31m',\n\t\t\t\t'green': '\\033[0;32m', 'yellow': '\\033[0;33m',\n\t\t\t\t'dblue': '\\033[0;34m', 'purple': '\\033[0;35m',\n\t\t\t\t'lblue': '\\033[0;36m', 'white': '\\033[0;37m'\n\t\t\t\t}\n\t\tbold = {'gray': '\\033[1;30m', 'red': '\\033[1;31m',\n\t\t\t\t'green': '\\033[1;32m', 'yellow': '\\033[1;33m',\n\t\t\t\t'dblue': '\\033[1;34m', 'purple': '\\033[1;35m',\n\t\t\t\t'lblue': '\\033[1;36m', 'white': '\\033[1;37m'\n\t\t\t\t}\n\t\tdef rc(o): return regular[o.group(1).lower()]\n\t\tdef bc(o): return bold[o.group(1).lower()]\n\t\ttext = text.replace('|RESET|', '\\033[0m') # reset in text\n\t\ttext += '\\033[0m' # reset at EOL\n\t\ttext = re.sub('\\|([a-z]{3,6})\\|',rc, text) # regular colors\n\t\ttext = re.sub('\\|([A-Z]{3,6})\\|',bc, text) # bold colors\n\telse:\n\t\ttext = re.sub('\\|([A-Za-z]{3,6})\\|','', text)\n\treturn text\n\n\ndef info(text):\n if options.quiet == True:\n return\n sys.stderr.write('%s\\n' % color(text))\n\n\ndef verbose(text):\n if not options.verbose == True:\n return\n sys.stderr.write(color('|GRAY|[%s]\\n' % text))\n\n\ndef checkinput():\n\tverbose('Checking user input')\n\tif len(userinput) >= 1:\n\t\tinfo('|YELLOW|-> |PURPLE|Using mask |DBLUE|%s' % userinput[0])\n\telif len(userinput) == 0:\n\t\ttext = color('|RED|-> |PURPLE|Missing input. Aborting.')\n\t\tsys.stderr.write('%s\\n' % text)\n\t\tsys.exit(1)\n\tif not '%s' in userinput[0]:\n\t\ttext = color('|RED|-> |PURPLE|Missing |DBLUE|%s |PURPLE|in mask!')\n\t\tsys.stderr.write('%s\\n' % text)\n\t\tsys.exit(1)\n\n\ndef getfiles():\n\tverbose('Getting files for renaming')\n\tfl = []\n\tfor f in userinput[1:]:\n\t\tabspath = os.path.abspath(f)\n\t\tbasename = os.path.basename(abspath)\n\t\tdirname = os.path.dirname(abspath)\n\t\tif os.access(dirname, os.W_OK):\n\t\t\tif not os.path.isdir(f):\n\t\t\t\tfl.append(f)\n\t\t\telse:\n\t\t\t\tinfo(('|RED|-> |PURPLE|Ignoring |DBLUE|%s |PURPLE|because ' +\n\t\t\t\t'it is a directory') % (f))\n\t\telse:\n\t\t\tinfo(('|RED|-> |PURPLE|Directory |DBLUE|%s |PURPLE|not writable. ' +\n\t\t\t\t'Ignoring |DBLUE|%s') % (dirname,basename))\n\tinfo('|YELLOW|-> |PURPLE|Found |DBLUE|%s |PURPLE|files for renaming'\n\t\t\t% len(fl))\n\treturn sorted(fl, key=lambda s: s.lower())\n\n\ndef associate(fl):\n\tverbose('Creating association dictionaries')\n\tinfo('|YELLOW|-> |PURPLE|Associating new names and files')\n\ti = options.startwith\n\tfill = len(str(len(fl)))\n\tres = []\n\tfor f in fl:\n\t\tnr = str(i).zfill(fill)\n\t\tres.append({\n\t\t\t'basename': os.path.basename(f),\n\t\t\t'dirname': os.path.dirname(f),\n\t\t\t'newname': userinput[0] % nr,\n\t\t\t'index': nr\n\t\t\t})\n\t\ti += 1\n\treturn res\n\n\ndef rename(assoc):\n\tverbose('Renaming files')\n\ttotal = len(assoc)\n\tskipped = 0\n\trenamed = 0\n\tindex = 0\n\n\tfor f in assoc:\n\n\t\tindex += 1\n\t\toldfile = os.path.join(f['dirname'], f['basename'])\n\t\tnewfile = os.path.join(f['dirname'], f['newname'])\n\n\t\t# \\033[F Cursor one line up\n\t\t# \\033[K Clear line until end\n\n\t\tif not os.path.exists(newfile):\n\t\t\ttext = color(('\\033[F|YELLOW|-> |PURPLE|Processing |DBLUE|%s ' +\n\t\t\t\t'|PURPLE|(|DBLUE|%s/%s|PURPLE|)\\033[K')\n\t\t\t\t\t% (f['basename'],index,total))\n\t\t\tinfo(text)\n\t\t\tos.rename(oldfile, newfile)\n\t\t\trenamed += 1\n\t\telse:\n\t\t\tskipped += 1\n\t\t\tinfo(('|RED|-> |PURPLE|File |DBLUE|%s |PURPLE|already exists. ' +\n\t\t\t\t\t'Skipping') % f['newname'])\n\n\tinfo('\\033[F\\033[K')\n\n\tif renamed == total:\n\t\tif total == 0:\n\t\t\tinfo('\\033[F|YELLOW|-> |PURPLE|No files to rename')\n\t\telse:\n\t\t\tinfo('\\033[F|GREEN|-> |PURPLE|All files renamed')\n\telif skipped == 1:\n\t\tinfo('\\033[F|RED|-> |PURPLE|Skipped |DBLUE|1 |PURPLE|file')\n\telif skipped > 1:\n\t\tinfo('\\033[F|RED|-> |PURPLE|Skipped |DBLUE|%s |PURPLE|files' % skipped)\n\n\ndef backup(assoc):\n\tif not options.backup:\n\t\treturn\n\tverbose('Creating backup CSV')\n\tfilename = '%s, PID %i' % (datetime.isoformat(datetime.now()),os.getpid())\n\tbackupfile = os.path.join(assoc[0]['dirname'], filename)\n\tinfo('|YELLOW|-> |PURPLE|Backup: |DBLUE|%s' % backupfile)\n\tbackup = '\"Old file name\",\"New file name\"\\n'\n\tfor f in assoc:\n\t\toldfile = os.path.join(f['dirname'], f['basename'])\n\t\tnewfile = os.path.join(f['dirname'], f['newname'])\n\t\tif oldfile.find('\"') != -1:\n\t\t\toq = '\"\"'\n\t\telse:\n\t\t\toq = '\"'\n\t\tif newfile.find('\"') != -1:\n\t\t\tnq = '\"\"'\n\t\telse:\n\t\t\tnq = '\"'\n\t\tbackup += '%s%s%s,%s%s%s\\n' % (oq,oldfile,oq,nq,newfile,nq)\n\tverbose('Writing backup')\n\twith open(backupfile, 'w+') as bf:\n\t\twrite_data = bf.write(backup)\n\n\ndef main():\n\tverbose('Startup')\n\tinfo('|WHITE|Renaming files')\n\tcheckinput()\n\tfiles = getfiles()\n\tassoc = associate(files)\n\trename(assoc)\n\tbackup(assoc)\n\tverbose('End')\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"mar.py","file_name":"mar.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"194257885","text":"import tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\ndef PoolFilter(data, universe_name, from_date, to_date):\r\n if universe_name is None:\r\n return data\r\n else:\r\n uni = pd.read_csv(GetFileName('RAW_UNIVERSE_%s.csv' % universe_name.replace('.', '_')), index_col=[0]).loc[from_date:to_date].fillna(0) > 0.5\r\n uni.index.name = 'FACTOR_DATE'\r\n uni.columns.name = 'STOCK_CODE'\r\n uni_info = uni.unstack().reset_index(name='UNI')\r\n uni_rec = uni_info.loc[uni_info['UNI']]\r\n filter_data = pd.merge(data, uni_rec, on=['FACTOR_DATE', 'STOCK_CODE'], how='inner')\r\n return filter_data\r\ndef Ret2Tag(data, use_short):\r\n tag = data.groupby('FACTOR_DATE')['RET'].rank(pct=True).apply(lambda x: np.int(np.floor(x*3-1e-4)-1))\r\n if not use_short:\r\n tag[tag < 0] = 0\r\n return tag\r\ndef WinTage(data, index):\r\n tmp = pd.merge(data, index, left_on=['FACTOR_DATE'], right_index=True)\r\n tag = (tmp['RET'] >= tmp['RETURN']).astype(np.int)\r\n return tag\r\ndef LoadData(raw_data, from_date, to_date):\r\n train_df = raw_data.loc[(all_df['FACTOR_DATE'] < to_date) & (all_df['FACTOR_DATE'] >= from_date)]\r\n train_df.loc[train_df['TAG'] > 1, 'TAG'] = 1\r\n train_data = train_df[factor_list['FACTOR_NAME']].values\r\n train_label = pd.get_dummies(train_df['TAG']).values\r\n return train_data, train_label\r\n\r\ndl_path = 'H:\\\\DL\\\\data'\r\nGetFileName = lambda x: os.path.join(dl_path, x)\r\n\r\n# import data\r\nuni_name = '000300.SH'\r\nmodel_name = 'dl3_ls.h5'\r\nuse_short = True\r\nlabel_num = 3\r\nfrom_date = 20060101\r\nto_date = 20190501\r\ntrain_last_date = 20180101\r\nGetFileName = lambda x: os.path.join(dl_path, x)\r\nfactor_list = pd.read_csv(GetFileName(\"factor_list.csv\"), names=['FACTOR_NAME', 'NEU_TYPE'])\r\nfactor_num = len(factor_list)\r\nraw_df = pd.read_pickle(GetFileName('all_data.pkl'), compression='gzip').dropna()\r\nall_df = PoolFilter(data=raw_df, universe_name=uni_name, from_date=from_date, to_date=to_date)\r\nhs300_ret = pd.read_csv(GetFileName('INDEX_000300.SH_RETURN.csv' ), index_col=[0], header=None, names=['RETURN'])\r\nhs300 = (1+hs300_ret).rolling(20).apply(lambda x: x.prod()-1, raw=True).shift(-19).loc[from_date:to_date]\r\nall_df['TAG'] =Ret2Tag(data=all_df, use_short=use_short)\r\n# all_df['TAG'] = WinTage(data=all_df, index=hs300)\r\ntrain_data, train_label = LoadData(raw_data=all_df, from_date=from_date, to_date=train_last_date)\r\nval_data, val_label = LoadData(raw_data=all_df, from_date=train_last_date, to_date=to_date)\r\n# model init\r\nmodel = tf.keras.Sequential([\r\n # Adds a densely-connected layer with 64 units to the model:\r\n layers.Dense(factor_num, activation='relu'),\r\n # Add another:\r\n layers.Dense(64, activation='relu'),\r\n # Add a softmax layer with 10 output units:\r\n layers.Dropout(0.2, noise_shape=None, seed=None),\r\n layers.Dense(label_num, activation='softmax')])\r\n\r\n# model compile\r\nmodel.compile(optimizer=tf.train.AdamOptimizer(0.001),\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n# model train\r\nmodel.fit(train_data, train_label, epochs=10, batch_size=1000, validation_data=(val_data, val_label))\r\n\r\n# save model\r\nmodel.save(GetFileName(model_name))","sub_path":"dl2_train.py","file_name":"dl2_train.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"342022704","text":"import multiprocessing as mp\n\n\ndef worker(list, mydict):\n key = list[0]\n value = list[1]\n mydict[key] = value\n\n\n\n#\nmanager = mp.Manager()\n# shared_dict_1 = manager.dict()\n#\n# process1 = mp.Process(target=worker, args=[shared_dict_1, ['Weizhi', 120]])\n# process2 = mp.Process(target=worker, args=[shared_dict_1, ['Shan', 90]])\n# process3 = mp.Process(target=worker, args=[shared_dict_1, ['Haoyan', 100]])\n#\n# process1.start()\n# process2.start()\n# process3.start()\n#\n# process1.join()\n# process2.join()\n# process3.join()\n#\n# print(shared_dict_1)\n# # {'Weizhi': 120, 'Shan': 90, 'Haoyan': 100}\n#\n#\n\n\n\n\nmeasurements = [['Weizhi', 120], ['Shan', 90], ['Haoyan', 100]]\npool = mp.Pool(processes=4)\nshared_dict_2 = manager.dict()\n\npool.map(worker, ([measurement, shared_dict_2] for measurement in measurements))\n\npool.close()\npool.join()\n\nprint(shared_dict_2)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"19_multiprocessing_demo_4_ shared_dict.py","file_name":"19_multiprocessing_demo_4_ shared_dict.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"431088055","text":"import pandas as pd\nimport nltk\nimport numpy as np\nimport datetime\nimport re\nfrom functools import reduce # For merging aggrated data frames together\n\nfrom sklearn import preprocessing\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.utils.tests.test_pprint import CountVectorizer\nfrom nltk.corpus import stopwords\n\nnltk.download('stopwords')\n\nsport_vectors = []\n\nfor i in range(2):\n filename_1 = input(\"filename:\")\n filename_2 = input(\"filename:\")\n filename_3 = input(\"filename:\")\n filename_4 = input(\"filename:\")\n\n # w_filename = input(\"write to filename:\")\n # f = open(w_filename, \"w\")\n\n data_1 = pd.read_csv(filename_1)\n data_2 = pd.read_csv(filename_2)\n data_3 = pd.read_csv(filename_3)\n data_4 = pd.read_csv(filename_4)\n datasets = [data_2, data_3, data_4]\n print(data_1.keys())\n\n\n #concatenate all datasets per sport type\n result = pd.concat(datasets)\n #make year its own column\n result['yr'] = result[' news_date'].str[-4:]\n new = result[' news_date'].str.split(\" \", n= 3, expand= True)\n #fix month\n result['month'] = new[0]\n d: {str, int} = {'Jan': 1, 'January': 1, 'Feb': 2, 'February': 2, 'Mar': 3, 'March': 3, 'Apr': 4, 'April': 4, 'May': 5, 'Jun': 6, 'June': 6, 'July': 7, 'Jul': 7, 'Aug': 8, 'August': 8, 'Sep': 9, 'September': 9, 'Oct': 10, 'October': 10, 'Nov': 11, 'November': 11, 'Dec': 12, 'December': 12}\n result['month'] = result['month'].map(d).fillna(0)\n result['month'] = result['month'].astype(int)\n result['month'] = result['month'].astype(str)\n new_day = new[1].str.split(\"|\", n=2, expand= True)\n result['day'] = new_day[0]\n\n result['date'] = result['yr']+result['month']+result['day']\n print(result['date'])\n result['date'] = pd.to_datetime(result['date'], format='%Y%m%d')\n result['weekday_weekend'] = result['date'].dt.dayofweek.fillna(0).astype(int)\n result['weekday_weekend'] = result['weekday_weekend'].astype(str)\n print(result.head())\n\n result['yr'] = result[' news_date'].str[-4:]\n new = result[' news_date'].str.split(\" \", n= 3, expand= True)\n #fix month\n result['month'] = new[0]\n d: {str, int} = {'Jan': 1, 'January': 1, 'Feb': 2, 'February': 2, 'Mar': 3, 'March': 3, 'Apr': 4, 'April': 4, 'May': 5, 'Jun': 6, 'June': 6, 'July': 7, 'Jul': 7, 'Aug': 8, 'August': 8, 'Sep': 9, 'September': 9, 'Oct': 10, 'October': 10, 'Nov': 11, 'November': 11, 'Dec': 12, 'December': 12}\n result['month'] = result['month'].map(d).fillna(0)\n result['month'] = result['month'].astype(int)\n result['month'] = result['month'].astype(str)\n new_day = new[1].str.split(\"|\", n=2, expand= True)\n result['day'] = new_day[0]\n\n print(\"----working on train set (2020)-----\")\n data_1['date'] = data_1['yr']+data_1['month']+data_1['day']\n print(data_1['date'])\n data_1['date'] = pd.to_datetime(data_1['date'], format='%Y%m%d')\n data_1['weekday_weekend'] = data_1['date'].dt.dayofweek.fillna(0).astype(int)\n data_1['weekday_weekend'] = data_1['weekday_weekend'].astype(str)\n print(data_1.head())\n\n train_data = pd.DataFrame(data_1['news_title'], data_1['weekday_weekend'])\n\n #convert date into datetime format\n\n #print(data_1.shape)\n print(data_2.shape)\n print(data_3.shape)\n print(data_4.shape)\n\n # merge datasets\n wordcount_holder = {}\n i = 0\n # for dataset in datasets:\n # word2count = {}\n # for data in dataset:\n # words = nltk.word_tokenize(data)\n # for word in words:\n # if word not in word2count.keys():\n # word2count[word] = 1\n # else:\n # word2count[word] += 1\n #\n # wordcount_holder[i] = word2count\n # i = i + 1\n #\n # print(wordcount_holder)\n\n\n\n\n top_N = 50\n top_D = 3\n a = result['news_title'].str.lower().str.cat(sep=' ' or '|')\n b = result['weekday_weekend'].str.cat(sep=' ')\n c = result['month'].str.cat(sep=' ')\n\n dayofweek = nltk.tokenize.word_tokenize(b)\n months = nltk.tokenize.word_tokenize(c)\n words = nltk.tokenize.word_tokenize(a)\n words = [w for w in words if w not in stopwords.words(\"english\")]\n for i in range(len(words)):\n words[i] = re.sub(r'\\W',' ',words[i])\n words[i] = re.sub(r'\\s+',' ',words[i])\n #words[i] = re.sub(\"[^a-zA-Z]\",' ',words[i])\n\n word_dist = nltk.FreqDist(words)\n day_dist = nltk.FreqDist(dayofweek)\n mth_dist = nltk.FreqDist(months)\n print(word_dist)\n print(\"------\")\n print(day_dist)\n print(\"------\")\n print(mth_dist)\n print(len(words))\n\n print('All frquencies')\n print('=' * 60)\n\n rslt_words = pd.DataFrame(word_dist.most_common(top_N), columns=['Word', 'Frequency'])\n print(rslt_words)\n print('=' * 60)\n\n\n rslt_day = pd.DataFrame(day_dist.most_common(top_D), columns=['WeekDate', 'Frequency'])\n print(rslt_day)\n print('=' * 60)\n #rslt.to_csv(r'{}'.format(w_filename), index=False, header=True)\n #f.close()`\n rslt_mth = pd.DataFrame(mth_dist.most_common(top_D), columns=['Month', 'Frequency'])\n print(rslt_mth)\n print('=' * 60)\n\n\n from sklearn.feature_extraction.text import CountVectorizer\n\n weekday = str(input(\"which was the top weekdate: \"))\n stmt_docs = [data_1['news_title'] for index, row in train_data.iterrows() if data_1['weekday_weekend'] == weekday]\n\n vec_s = CountVectorizer()\n X_s = vec_s.fit_transform(stmt_docs)\n tdm_s = pd.DataFrame(X_s.toarray(), columns=vec_s.get_feature_names())\n\n tdm_s\n\n weekday_1 = str(input(\"which was the top weekdate: \"))\n q_docs = [data_1['sent'] for index,row in train_data.iterrows() if data_1['weekday_weekend'] == weekday_1]\n\n vec_q = CountVectorizer()\n X_q = vec_q.fit_transform(q_docs)\n tdm_q = pd.DataFrame(X_q.toarray(), columns=vec_q.get_feature_names())\n\n tdm_q\n\n word_list_s = vec_s.get_feature_names();\n count_list_s = X_s.toarray().sum(axis=0)\n freq_s = dict(zip(word_list_s, count_list_s))\n freq_s\n\n word_list_q = vec_q.get_feature_names();\n count_list_q = X_q.toarray().sum(axis=0)\n freq_q = dict(zip(word_list_q,count_list_q))\n freq_q\n\n #count features\n from sklearn.feature_extraction.text import CountVectorizer\n\n docs = [data_1['news_title'] for index, row in train_data.iterrows()]\n\n vec = CountVectorizer()\n X = vec.fit_transform(docs)\n\n total_features = len(vec.get_feature_names())\n total_features\n\n #total count of all features in the training set\n total_cnts_features_s = count_list_s.sum(axis=0)\n total_cnts_features_q = count_list_q.sum(axis=0)\n\n\n top_N = 50\n train_a = data_1['news_title'].str.lower().str.cat(sep=' ' or '|')\n\n train_words = nltk.tokenize.word_tokenize(train_a)\n train_words = [w for w in train_words if w not in stopwords.words(\"english\")]\n for i in range(len(train_words)):\n train_words[i] = re.sub(r'\\W',' ',train_words[i])\n train_words[i] = re.sub(r'\\s+',' ',train_words[i])\n #words[i] = re.sub(\"[^a-zA-Z]\",' ',words[i])\n\n t_word_dist = nltk.FreqDist(train_words)\n\n print(t_word_dist)\n print(\"------\")\n\n\n print('All frquencies')\n print('=' * 60)\n\n t_rslt_words = pd.DataFrame(t_word_dist.most_common(top_N), columns=['Word', 'Frequency'])\n print(t_rslt_words)\n print('=' * 60)\n\n #total frequency\n from nltk.tokenize import word_tokenize\n new_sentence = data_1['news_title']\n prob_s_with_ls = []\n for n in new_sentence:\n new_word_list = word_tokenize(n)\n for word in new_word_list:\n if word in freq_s.keys():\n count = freq_s[word]\n else:\n count = 0\n prob_s_with_ls.append((count + 1)/(total_cnts_features_s + total_features))\n dict(zip(new_word_list,prob_s_with_ls))\n\n\n prob_q_with_ls = []\n for n in new_sentence:\n new_word_list = word_tokenize(n)\n for word in new_word_list:\n if word in freq_q.keys():\n count = freq_q[word]\n else:\n count = 0\n prob_q_with_ls.append((count + 1)/(total_cnts_features_q + total_features))\n dict(zip(new_word_list,prob_q_with_ls))","sub_path":"testing_words.py","file_name":"testing_words.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"408202593","text":"import random\nseed =int( 7823*random.random())\n\n\nbatch_size =4\nraw_size =512# image size\npath_images='/dev/shm/DR512/*.jpeg'# image files\n\n#Store trained model in this directory.\nweights_dir = './temp-theano_alex/models/'\n\n#SURF related parameters\nsurf_crop_size=raw_size*9//10\nsurf_distance=64\nsurf_size=64\n\n#Learning parameters\nlearning_rate = 0.01\nmomentum = 0.9\nweight_decay = 0.0005\nn_epochs = 60 \n\n\nif __name__ == '__main__':\n import sys\n print(globals()[sys.argv[1]])\n","sub_path":"bl100/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"577815348","text":"from pydoc import locate\n\nCOMPARISON_EXACT = 'exact'\nCOMPARISON_IEXACT = 'iexact'\nCOMPARISON_CONTAINS = 'contains'\nCOMPARISON_ICONTAINS = 'icontains'\nCOMPARISON_GT = 'gt'\nCOMPARISON_GTE = 'gte'\nCOMPARISON_LT = 'lt'\nCOMPARISON_LTE = 'lte'\nCOMPARISON_IN = 'in'\nCOMPARISON_STARTSWITH = 'startswith'\nCOMPARISON_ISTARTSWITH = 'istartswith'\nCOMPARISON_ENDSWITH = 'endswith'\nCOMPARISON_IENDSWITH = 'iendswith'\nCOMPARISON_ISNULL = 'isnull'\nCOMPARISONS = (\n COMPARISON_EXACT,\n COMPARISON_IEXACT,\n COMPARISON_CONTAINS,\n COMPARISON_ICONTAINS,\n COMPARISON_GT,\n COMPARISON_GTE,\n COMPARISON_LT,\n COMPARISON_LTE,\n COMPARISON_IN,\n COMPARISON_STARTSWITH,\n COMPARISON_ISTARTSWITH,\n COMPARISON_ENDSWITH,\n COMPARISON_IENDSWITH,\n COMPARISON_ISNULL,\n)\n\nCONNECTORS_OR = 'OR'\nCONNECTORS_AND = 'AND'\nCONNECTORS = (\n CONNECTORS_OR,\n CONNECTORS_AND,\n)\n\nAGGREGATES_SUM = 'SUM'\nAGGREGATES_COUNT = 'COUNT'\nAGGREGATES_MAX = 'MAX'\nAGGREGATES_MIN = 'MIN'\nAGGREGATES_AVG = 'AVG'\nAGGREGATES = (\n AGGREGATES_SUM,\n AGGREGATES_COUNT,\n AGGREGATES_MAX,\n AGGREGATES_MIN,\n AGGREGATES_AVG,\n)\n\nDjangoQ = locate('django.db.models.Q')\nDjangoQuerySet = locate('django.db.models.QuerySet')\nObjectDoesNotExist = locate('django.core.exceptions.ObjectDoesNotExist')\nMultipleObjectsReturned = locate('django.core.exceptions.MultipleObjectsReturned')\n","sub_path":"django_mock_queries/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"595879001","text":"import time\n\nsuspects = ['Mr. Bent','Ms. Stoon','Mrs. Locks']\n\nname = input(\"Welcome to the mystery, detective. What is your name? \")\n\ndef intro(name=''):\n\tprint('''\n\t\n\t\t `'::::. \n\t _____A_ \n \t / _ /\\ \n\t __/__/\\__/ \\___ \n\t---/__|\" '' \"| /___/\\---- \n\t |''|\"'||'\"| |' '|| \n\t `\"\"`\"\"))\"\"`\"`\"\"\"\"` \n\t\t\n\t''')\n\tprint(\"Welcome Detective\",name.title())\n\ttime.sleep(3)\n\tprint(\"You had been planning on going to the house, but..\")\n\ttime.sleep(3)\n\tprint(\"Why does everyone seem so worried?\")\n\ttime.sleep(3)\n\tprint(\"Has something happened here?\") \n\ttime.sleep(3)\n\tprint(\"You get a very bad feeling that something is wrong\")\n\ttime.sleep(3)\n\t\n\tprint(\"You can gather [w]ho is at the house, or check the [f]lower beds outside\")\n\tchoice = input(\"Pick an option \")\n\t\n\tif choice == 'w':\n\t\ttime.sleep(1)\n\t\tprint(\"There are several people at the house\")\n\t\ttime.sleep(1)\n\t\tprint(f'{suspects[0]},{suspects[1]},{suspects[2]}')\n\t\ttime.sleep(1)\n\telse:\n\t\ttime.sleep(1)\n\t\tprint(\"You go outside and check out the flowers and find nothing interesting\")\n\t\ttime.sleep(1)\n\t\tintro()\t\n\t\n\tchoice = input(f'{name.title()}, would you like to [c]ontinue or [g]o home? ')\n\ttime.sleep(1)\n\t\n\tif choice == 'c':\n\t\tprint(\"You go in and see Mrs. Cortez on the floor\")\n\t\tinvestigate()\n\telse:\n\t\tprint(\"You decide to go home because mysteries are not your thing\")\n\t\tintro()\t\n\ndef investigate(name=''):\n\tprint('You choose to investiate the room and take a look around')\n\ttime.sleep(3)\n\tprint('You notice that there are smashed teacups on the floor next to the body of Mrs. Cortez')\n\ttime.sleep(3)\n\tprint('you also notice some footprints')\n\ttime.sleep(3)\n\tprint('You go back to the living room')\n\tlivingroom()\n\t\ndef livingroom(name=''):\n\tprint('You can now interview your three suspects')\n\tprint('Who do you want to interview?')\n\tprint(f'''\n\tPress 1 for {suspects[0]}\n\tPress 2 for {suspects[1]}\n\tPress 3 for {suspects[2]} \n\t''')\n\tchoice = input(\"Pick an option \")\n\tif choice == 1:\n\t\tbent()\n\telif choice == 2:\n\t\tstoon()\n\telif choice == 3:\n\t\tlocks()\n \n\t\ndef bent(name=''):\n print('mr Bent was the best painter in town he paints oaintings for mrs. Cortez.')\n print('\\mrs. Cortezs home is actually his childhood home.')\n kitchen(name = '')\n\ndef stoon(name=''):\n print('Ms. Stoon was the cook for mrs. Cortez.')\n print('She usually serves her tea.')\n kitchen(name = '')\n\ndef locks(name=''):\n print('Mrs. LOcks is the maid for Mrs. Cortez.')\n print('She cleans and tidies rooms in the big mansion.')\n kitchen(name = '')\ndef kitchen(name=''):\n print('You bringn every one in to the room.')\n time.sleep(3)\n print('you are to ask a question to reveal the murder.')\n time.sleep(3)\n print('What question should you ask?')\n time.sleep(3)\n print('a) Who murdered her!? or b) Who gave her tea this morning?')\n choice = input (\"pick an option.\")\n if choice == 'a':\n print('No one answers. YOU LOSE.')\n elif choice == 'b':\n print('Ms. Stoon breaks out in tears. She admitts she done it.')\n time.sleep(3)\n print('Mrs. Cortez had been planning a dinner party for a while.')\n time.sleep(3)\n print('She never invited her. So Ms. Stoon poisened her tea.')\n time.sleep(3)\n print('Congrats! You caught the murderer! YOU WIN.')\n\nintro(name)\n\n","sub_path":"games/murder.py","file_name":"murder.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"183097319","text":"## mine information from wiki pages ##\r\n\r\n# import\r\n\r\nfrom pyquery import PyQuery as pq\r\nimport urllib.request as urllib2\r\nimport html.parser\r\nfrom math import log\r\nimport re\r\n\r\nimport queue\r\nfrom threading import Thread, Lock\r\n\r\n# constants\r\n\r\nwiki_base = \"http://en.wikipedia.org\"\r\nwiki = wiki_base + \"/wiki/Category:Video_game_lists_by_platform\"\r\nwiki2 = wiki_base + \"/w/index.php?title=Category:Video_game_lists_by_platform&pagefrom=Windows%0AIndex+of+Windows+games+%28P%29#mw-pages\"\r\napi_base = wiki_base + \"/w/api.php?action=query&prop=revisions&rvprop=content&format=json&titles=\"\r\n\r\nsynonyms = { \"3DO Interactive Multiplayer\" : \"3DO\", \"Enix home computer\" : \"Enix\",\r\n \"Nintendo Entertainment System\" : \"NES\", \"Super Nintendo Entertainment System\" : \"SNES\",\r\n \"Nintendo GameCube\" : \"GameCube\", \r\n \"PC Engine CD\" : \"TurboGrafx-16\", \"PC Engine\" : \"TurboGrafx-16\", \"Windows\" : \"PC\",\r\n \"Microsoft Windows\" : \"PC\",\r\n \"Windows Mobile Professional\" : \"Pocket PC\", \"PlayStation Portable\" : \"PSP\",\r\n \"PlayStation 2\" : \"PS2\", \"PlayStation 3\" : \"PS3\", \"PlayStation 4\" : \"PS4\",\r\n \"Macintosh\" : \"Mac\", \"PlayStation Vita\" : \"Vita\", \"Apple ][\" : \"Apple II\" }\r\n\r\ngenres = [\"educational\", \"adventure\", \"golf\", \"shogi\", \"role-playing\", \"open-world\", \"shooter\", \"trading\",\r\n \"visual novel\", \"puzzle\", \"shoot -em up\", \"shoot em up\", \"racing\", \"maze\", \"card\", \"tennis\", \"strategy\",\r\n \"football\", \"sports\", \"interactive fiction\", \"action\", \"platform\", \"stealth\", \"chess\", \"pinball\"]\r\n\r\n# functions\r\n\r\ndef unwiki(text, quiet = True):\r\n \"\"\"convert text in wiki markup format, into plain text\"\"\"\r\n # remove full html comments\r\n while (\"\" in text):\r\n text = text[: text.find(\"\") + 3 :]\r\n quiet or print(\"-html: \\t\" + text)\r\n\r\n # remove tags, but keep inner contents\r\n tags = [\"sup\", \"small\", \"s\", \"center\", \"u\", \"strong\", \"nowiki\"]\r\n for tag in tags:\r\n bgn = \"<\" + tag + \">\"\r\n end = \"\"\r\n if (bgn in text and end in text):\r\n text = text.replace(bgn, \" \").replace(end, \" \")\r\n \r\n # remove tags\r\n tags = [\"ref\", \"br\", \"Br\", \"span\", \"sup\", \"small\", \"center\", \"s\", \"div\", \"u\", \"strong\", \"nowiki\"]\r\n tags = tags + list(map(lambda s: s.upper(), tags))\r\n for tag in tags:\r\n while \"<\" + tag in text:\r\n # find first instance of tag w/o another tag inside of it\r\n opn = -1\r\n while (True):\r\n # use sorted instead of list to get first match appearing in string\r\n opn = text.find(\"<\" + tag, opn + 1)\r\n end = (list(map(lambda s: text.find(s, opn + 1) + len(s), filter(lambda s: s in text,\r\n [\"\", tag + \">\", \"/>\", \">\"]))))[0]\r\n\r\n # if same tag is found within the contents of the tag, then try again, otherwise break\r\n if not \"<\" + tag in text[opn + 1 : end]:\r\n break\r\n orig = text\r\n text = text.replace(text[opn : end], \"\" if tag != \"br\" else \"\\n\")\r\n\r\n # if no change, then try another tag, return to this tag later\r\n if (orig == text):\r\n tags.append(tag)\r\n break\r\n\r\n # remove standalone tags\r\n for tag in tags:\r\n text = text.replace(\"\", \"\")\r\n\r\n if (\"<\" in text and \">\" in text):\r\n print(\"unknown tag? \" + text[text.find(\"<\") : text.find(\">\") + 1])\r\n \r\n quiet or print(\"-tag: \\t\" + text)\r\n \r\n # remove style attributes\r\n styles = [\"style\", \"width\", \"scope\", \"rowspan\", \"colspan\", \"class\", \"align\"]\r\n for style in styles:\r\n if \" \" + style + \"=\" in text:\r\n ind = text.find(style)\r\n end = text.find(\"|\", ind)\r\n text = text.replace(text[ind : end + 1], \"\") if end != -1 else \\\r\n text.replace(text[ind :], \"\")\r\n quiet or print(\"-style: \\t\" + text)\r\n\r\n # balance template brackets\r\n if balanceof(text) != 0:\r\n text += \"}}\" * (balanceof(text))\r\n \r\n # remove templates\r\n temps = [\"Anchor\", \"Citation needed\"]\r\n for temp in temps:\r\n if temp in text and \"{{\" in text and \"}}\" in text:\r\n ind = text.find(temp)\r\n text = text.replace(text[text.rfind(\"{{\", 0, ind) :\r\n text.find(\"}}\", ind) + 2], \"\")\r\n quiet or print(\"-temp: \\t\" + text)\r\n\r\n # remove link targets\r\n while (\"[[\" in text and \"|\" in text and \"]]\" in text):\r\n # find first \"|\" bar inside of \"[[\" \"]]\" brackets\r\n ind = lnk = end = -1\r\n while (lnk == -1 or end == -1):\r\n ind = text.find(\"|\", ind + 1)\r\n lnk = text.rfind(\"[[\", text.rfind(\"]]\", 0, ind) + 1, ind)\r\n end = text.find(\"]]\", ind)\r\n\r\n # end if no more \"|\" bar characters\r\n if (ind == -1):\r\n break\r\n if (ind == -1):\r\n break\r\n\r\n # regular target link, so remove target\r\n if (not text[ind : ind + 3] == \"|]]\"):\r\n text = text[: lnk ] + text[ind + 1 : end] + text[end + 2 :]\r\n\r\n # automatic target link\r\n else:\r\n # remove text in parentheses\r\n if (text.rfind(\"(\", 0, ind) != -1):\r\n text = text[: text.rfind(\"(\", 0, ind)].strip() + \\\r\n text[text.rfind(\")\", 0, ind) + 1 :].strip()\r\n\r\n # remove text after comma\r\n if (text.rfind(\",\", 0, ind) != -1):\r\n text = text[: text.rfind(\",\", 0, ind)].strip() + \\\r\n text[ind + 1 :].strip()\r\n\r\n # remove \"Wikipedia:\" marker\r\n if ((\"Wikipedia:\") in text):\r\n text = text.replace(\"Wikipedia:\", \"\")\r\n text = text[: ind] + text[ind + 1 :]\r\n quiet or print(\"-target: \\t\" + text)\r\n \r\n # remove links\r\n while ((\"[[\") in text and (\"]]\") in text):\r\n text = text.replace(\"[[\", \"\").replace(\"]]\", \"\")\r\n quiet or print(\"-link: \\t\" + text)\r\n\r\n # remove link with single bracket\r\n while ((\"[\") in text and (\"]\") in text and (\"http\") in text):\r\n m = re.search(\"(.*)\\[http[^\\s\\[\\]]+\\s+([^\\[\\]]+)\\](.*)\", text, re.DOTALL | re.I)\r\n m2 = re.search(\"(.*)\\[http[^\\s\\[\\]]+\\](.*)\", text, re.DOTALL | re.I)\r\n if (m):\r\n text = \"\".join(m.groups())\r\n elif (m2):\r\n text = \"\".join(m2.groups())\r\n else:\r\n break\r\n quiet or print(\"-bracket: \\t\" + text)\r\n \r\n # replace misc templates - vgy, Nihongo, date, color, flagicon, excl, etc\r\n regexes = [('', r'[V|v]gy\\s*\\|\\|?(\\d{4})(?:\\|\\d{4})?'),\r\n ('', r'[V|v]gy\\s*\\|[T|t][B|b][A|a]'),\r\n ('', r'[N|n]ihongo\\s*\\|([^\\{\\}]*?)\\|[^\\{\\}]*'),\r\n (' ', r'[D|d]ts\\s*\\|(\\d{4}.\\d{1,2}.?\\d{1,2}?)'),\r\n (' ', r'[D|d]ts\\s*\\|([\\w\\s\\d,-| ]*)'),\r\n ('', r'[D|d]ate\\s*\\|([\\w\\d\\s-]+)\\|?.*?'),\r\n ('', r'[C|c]olor\\s*\\|[^\\{\\}]*?\\|([^\\{\\}]*)'),\r\n ('', r'[F|f]lagicon\\s*\\|([^\\}]+)'),\r\n ('', r'[N|n]owrap\\s*\\|+([^\\{\\}]*?)'),\r\n ('', r'[S|s]mall\\s*\\|([^\\{\\}]*?)'),\r\n ('', r'[S|s]up\\s*\\|([^\\{\\}]*?)'),\r\n ('', r'[N|n]oitalic\\s*\\|([^\\{\\}]*?)'),\r\n ('', r'[S|s]c\\|([^\\{\\}]*?)'),\r\n ('', r'([Y|y]es|[N|n]o|[P|p]artial|[M|m]aybe|dunno|Y|N|y|n)(?:\\|[^\\{\\}]*)?'),\r\n ('', r'(Cancelled)\\|?[^\\{\\}]*?'),\r\n ('', r'[S|s]ort\\|[^\\{\\}\\|]*?\\|([^\\{\\}]*)'),\r\n ('', r'(?:[C|c]itation [N|n]eeded|[C|c]n)(?:.?span)?(?:\\|[^\\{\\}]*?)?'),\r\n ('', r'[C|c]ite[^\\{\\}]*?'),\r\n ('', r'[R|r]ef[^\\|]*?\\|[^\\{\\}]*?(?:\\|[^\\{\\}]*)?'),\r\n ('', r'[R|r]efn\\|[^\\{\\}]+?'),\r\n ('', r'[D|d][i|n][s]?[^\\{\\}]*?'),\r\n ('', r'[S|s]fn\\s*\\|[^\\{\\}]*?'),\r\n ('', r'[E|e]fn\\s*\\|[^\\{\\}]*?'),\r\n ('', r'((?:[N|n]/?[A|a])|(?:[T|t][B|b][A|a])[^\\{\\}]*?)'),\r\n ('', r'(?:AUS|BRA|CAN|GER|ESP|EU|FIN|FRA|JPN|UK|USA)'),\r\n (' ', r'(AUS|US|UK|EU|JP)\\|([^\\{\\}]*?)'),\r\n ('', r'#time:[^\\{\\}]*?,\\s*.\\|([\\w\\d\\s-]*)'),\r\n (' ', r'[R|r]elease date and age\\s*\\|(\\d*)\\|?(\\d*)\\|?(\\d*)[^\\{\\}]*?'),\r\n (' ', r'[S|s]tart date and age\\s*\\|(\\d*)\\|?(\\d*)\\|?(\\d*)[^\\{\\}]*?'),\r\n (' ', r'[S|s]tart.?[D|d]ate\\|?(\\d*)\\|?(\\d*)\\|?(\\d*)[^\\{\\}]*?'),\r\n (' ', r'[E|e]nd [D|d]ate\\s*\\|(\\d*)\\|?(\\d*)\\|?(\\d*)[^\\{\\}]*?'),\r\n ('', r'[V|v]grelease new\\|([^\\{\\}v]*)\\|v=\\d\\|([^\\{\\}]*)'),\r\n #(' ', r'[V|v](?:grelease|ideo game release)\\s*new\\s*\\|+(?:v=\\d\\|)?' + r'(\\w*)\\|?([^\\{\\}\\|]*)(\\|{0,1})' * 6),\r\n (' ', r'[V|v](?:grelease|ideo game release)\\s*(?:new)?\\s*\\|{0,2}(?:v=\\d\\|)?'+'([^=\\|\\}]*)(?:=|\\|)([^=\\|\\}]*)'),\r\n (' ', r'[V|v](?:grelease|ideo game release)\\s*(?:new)?\\s*\\|{0,2}(?:v=\\d\\|)?'+'([^=\\|\\}]*)(?:=|\\|)([^=\\|\\}]*)(\\|{0,2})' * 2),\r\n (' ', r'[V|v](?:grelease|ideo game release)\\s*(?:new)?\\s*\\|{0,2}(?:v=\\d\\|)?'+'([^=\\|\\}]*)(?:=|\\|)([^=\\|\\}]*)(\\|{0,2})' * 3),\r\n (' ', r'[V|v](?:grelease|ideo game release)\\s*(?:new)?\\s*\\|{0,2}(?:v=\\d\\|)?'+'([^=\\|\\}]*)(?:=|\\|)([^=\\|\\}]*)(\\|{0,2})' * 4),\r\n (' ', r'[V|v](?:grelease|ideo game release)\\s*(?:new)?\\s*\\|{0,2}(?:v=\\d\\|)?'+'([^=\\|\\}]*)(?:=|\\|)([^=\\|\\}]*)(\\|{0,2})' * 5),\r\n (' ', r'[V|v](?:grelease|ideo game release)\\s*(?:new)?\\s*\\|{0,2}(?:v=\\d\\|)?'+'([^=\\|\\}]*)(?:=|\\|)([^=\\|\\}]*)(\\|{0,2})' * 6),\r\n ('', r'[V|v]grtbl(?:-tx|-bl)?\\|?([^\\{\\}]*?)'),\r\n (' ', r'[V|v](?:grelease|ideo game release\\*)\\|'),\r\n (' ', r'[V|v](?:grelease|ideo game release\\s*)\\|{1,2}([^=\\{\\}]*)(=?)([^=\\{\\}]*?)'),\r\n (' ', r'[V|v](?:grelease|ideo game release)\\s*(?:new)?\\|{1,2}([^\\|]+?)\\|([^\\{\\}]+)'),\r\n ('', r'[V|v](?:g|ideo game).?rating[s]?\\|([^\\{\\}]*?)'),\r\n ('', r'(?:[O|o]fficial\\s)?[W|w]eb(?:[S|s]ite)?\\s*\\|?([^\\{\\}]+)'),\r\n ('', r'(?:[D|d]ecrease|[I|i]ncrease|[L|l]oss)'),\r\n ('', r'([U|u][S|s]\\$?\\|?[^\\{\\}]+)'),\r\n ('', r'([Y|y][E|e][N|n]\\|?[^\\{\\}]+)'),\r\n ('', r'([J|j][P|p][Y|y]\\|?[^\\{\\}]+)'),\r\n ('', r'([S|s][E|e][K|k]\\|?[^\\{\\}]+)'),\r\n ('', r'([T|t][Y|y][O|o]\\|?[^\\{\\}]+)'),\r\n ('', r'([C|c][N|n][Y|y]\\|?[^\\{\\}]+)'),\r\n ('', r'((?:[J|j]asdaq|[H|h]kex)\\|?[^\\{\\}]+)'),\r\n ('', r'(€\\|?[^\\{\\}]+)'),\r\n ('', r'[F|f]ormat.?[N|n]um[^\\{\\}]*'),\r\n ('', r'[C|c]ollapsible[^\\|\\{\\}]*\\|(?:\\s*framestyle[^\\|]*?\\|)?\\s*(?:title\\s*=\\s*)?(?:([^\\|]*?\\d[^\\|]*?)|(?:[^\\|]*()[^\\|]+))[\\| ]*(?:\\s*titlestyle[^\\|]*?\\|)?' + r'(?:\\d=)*([^\\{\\}]*\\|?)' * 4),\r\n ('', r'[F|f]lat[ ]?[L|l]ist\\|([^\\{\\}]+)'),\r\n ('', r'[U|u]bl\\|((?:[^\\|\\{\\}]+\\|{0,3})+)'),\r\n ('', r'[U|u]nbulleted\\s[L|l]ist([^\\{\\}]*?)'),\r\n ('', r'[H|h]list\\s*\\|([^\\{\\}]+?)'),\r\n ('', r'[U|u][R|r][L|l]\\s*\\|?([^\\{\\}]+?)(?:\\s*\\|\\s*[^\\{\\}]+)?'),\r\n ('', r'([C|c]heck[ ]?[M|m]ark)\\|[^\\{\\}]*?'),\r\n ('', r'([C|c]ross)\\|[^\\{\\}]*?'),\r\n ('', r'[P|p]lain.?list\\s*\\|([^\\{\\}]*?)\\s*'),\r\n ('', r'[T|t]ooltip\\s*\\|(.*)(?:\\|[^\\{\\}]*?)?'),\r\n ('', r'[A|a]bbr\\|([^\\{\\}]*?)'),\r\n ('', r'(.)'),\r\n ('', r'[N|n]ot a typo\\|([^\\{\\}]*?)'),\r\n ('', r'[U|u]nknown'),\r\n ('', r'[R|r]eflist'),\r\n ('', r'[V|v]ideo game lists by platform')]\r\n\r\n for regex in regexes:\r\n r = re.compile(r'(.*)\\{\\{\\s*' + regex[1] + '\\s*\\}\\}(.*)', re.DOTALL | re.I)\r\n\r\n # remove all occurences of template\r\n while (re.match(r, text)):\r\n m = re.match(r, text)\r\n delim = regex[0]\r\n text = delim.join([g for g in m.groups() if g is not None])\r\n quiet or print(\"-regex: \\t\" + text)\r\n\r\n # expand cpu spec template\r\n cpus = ['Z80', 'z80', '6502', 'POKEY']\r\n for cpu in cpus:\r\n r = re.compile(r'(.*)\\{\\{' + cpu + r'\\|?([^\\{\\}\\|]*?)' * 2 + r'\\}\\}(.*)')\r\n while (re.match(r, text)):\r\n m = re.match(r, text)\r\n text = m.group(1) + \" \" + m.group(2) + \"x \" + cpu + \" @ \" + m.group(3) + \" MHz\" + m.group(4)\r\n\r\n # expand raster details template\r\n rasters = ['Raster', 'raster']\r\n for raster in rasters:\r\n r = re.compile(r'(.*)\\{\\{' + raster + r'\\|rgb\\s*=\\s*(\\d+)\\|vertical\\s*=\\s*(\\d+)\\|size\\s*=\\s*(\\d+)\\}\\}(.*)')\r\n while (re.match(r, text)):\r\n m = re.match(r, text)\r\n text = m.group(1) + \" \" + \"RGB raster, vertical orientation (\" + m.group(4) + \"-inch diagonal)\" + m.group(5)\r\n\r\n # replace fraction templates\r\n fracs = [(r'frac\\|1\\|4', \"¼\"), (r'frac\\|1\\|2', \"½\"), (r'frac\\|3\\|4', \"¾\"),\r\n (r'frac\\|3\\|1\\|2', \"3½\")]\r\n for frac in fracs:\r\n text = re.sub(r'\\{\\{' + frac[0] + r'\\}\\}', frac[1], text)\r\n\r\n # clean numerical lists\r\n r = re.compile(r'(.*)(\\|)\\s*\\d{1,2}\\s*=(.*)(\\||\\}|$)(.*)', re.DOTALL | re.I)\r\n while (re.match(r, text)):\r\n m = re.match(r, text)\r\n text = \"\".join(m.groups())\r\n\r\n # remove all unknown templates\r\n while (\"}}\" in text and \"{{\" in text):\r\n temp = text[text.find(\"{{\") : text.find(\"}}\") + 2]\r\n text = text.replace(temp, \"\")\r\n print(\"unknown template? \" + temp)\r\n if temp == \"\":\r\n break\r\n\r\n # remove partial html comments\r\n while (\"\" in text):\r\n text = (text[: text.find(\"\") + 3 :] if \"-->\" in text else \"\")\r\n quiet or print(\"-html: \\t\" + text)\r\n\r\n # remove italics, bold\r\n if (\"''\" in text or \"\\\\'\\\\'\" in text):\r\n text = text.replace(\"'''\", \"\").replace(\"''\", \"\") \\\r\n .replace(\"\\\\'\" * 3, \"\").replace(\"\\\\'\" * 2, \"\")\r\n quiet or print(\"-italic: \\t\" + text)\r\n\r\n # convert escape characters and unicode characters\r\n text = convert(text)\r\n quiet or print(\"-escape: \\t\" + text)\r\n \r\n # combine multiple lines, using bar character\r\n text = \" | \" .join(t.strip() for t in text.splitlines() if t.strip() != \"\") \\\r\n if text != \"\" else \"\"\r\n quiet or print(\"-lines: \\t\" + text)\r\n \r\n # reorder text if in \"xxx, The yyy\" format\r\n text = reorder(text)\r\n quiet or print(\"-reorder: \\t\" + text)\r\n\r\n # eliminate redundant spacing\r\n text = re.sub(r'\\s+', \" \", text)\r\n\r\n # eliminate irregular characters\r\n text = text.replace(\"•\", \"\")\r\n \r\n return text.strip()\r\n\r\ndef convert(text):\r\n \"\"\"convert escape characters and unicode characters and html symbols\"\"\"\r\n # convert all known unicode / escape chars\r\n while (\"\\\\\" in text):\r\n ind = text.find(\"\\\\\")\r\n if ind < 0 or ind + 1 >= len(text):\r\n break\r\n if (text[ind + 1] == \"n\"):\r\n text = text.replace(\"\\\\n\", \"\\n\")\r\n if (text[ind + 1] == \"t\"):\r\n text = text.replace(\"\\\\t\", \" \")\r\n elif (text[ind + 1] == \"u\"):\r\n code = text[ind + 2 : ind + 6]\r\n text = text.replace(\"\\\\u\" + code, chr(int(code, 16)))\r\n elif (text[ind + 1] == \"'\"):\r\n text = text.replace(\"\\\\'\", \"\\'\")\r\n elif (text[ind + 1] == \"\\\"\"):\r\n text = text.replace(\"\\\\\\\"\", \"\\\"\")\r\n elif (text[ind + 1] == \"\\\\\"):\r\n text = text.replace(\"\\\\\\\\\", \"\\\\\")\r\n elif (text[ind + 1] == \"}\"):\r\n text = text.replace(\"{{\\\\}}\", \"/\")\r\n elif (text[ind + 1] == \"\\\\\"):\r\n text = text.replace(\"\\\\b\", \"\\b\")\r\n else:\r\n print(\"convert error: \" + text[ind + 1])\r\n #raise BaseException\r\n break\r\n\r\n # convert html symbols\r\n text = html.parser.HTMLParser().unescape(text)\r\n text = text.replace(\"–\", \"–\")\r\n text = text.replace(\"—\", \"—\")\r\n text = text.replace(\"<\", \"<\")\r\n text = text.replace(\">\", \">\")\r\n text = text.replace(\"&\", \"&\")\r\n text = text.replace(\"×\", \"×\")\r\n text = text.replace(\"©\", \"©\")\r\n text = text.replace(\" \", \"\\n\")\r\n \r\n return text\r\n\r\ndef reorder(text):\r\n \"\"\"reorder strings that are in format [title, The] to [The title]\"\"\"\r\n m = re.match(\"(.+)(?:,|;) The(.*)\", text)\r\n return \"The \" + m.group(1) + m.group(2) if m else text\r\n\r\ndef balanceof(text):\r\n \"\"\"find bracket balance of string, return 0 if balanced, else diff in brackets\"\"\"\r\n return text.count(\"{{\") - text.count(\"}}\")\r\n\r\ndef bar_split(text):\r\n \"\"\"split text divided by wiki formatted bars, into list of strings\"\"\"\r\n text = re.sub(r'(\\{\\{[^\\{\\}]+)\\|\\|([^\\{\\}]+)', r'\\1|\\2', text)\r\n strs = []\r\n while (True):\r\n # use non-greedy matching to find first bar separator, from the left\r\n m = re.search(r'(.+?)([\\|]{2,})(.+)', text)\r\n if (m is None):\r\n # if no bar separator found, then return w/ remaining text\r\n strs.append(text.strip())\r\n return strs\r\n else:\r\n # split up text\r\n strs.append(m.group(1).strip())\r\n bars = m.group(2)\r\n text = m.group(3)\r\n\r\n # append blank entries, if multiple bar separators (ex: abc |||| def)\r\n lenb = len(bars) // 3 - 1 if (len(bars) % 3 == 0) else \\\r\n len(bars) // 2 - 1 if (len(bars) % 2 == 0) else 0\r\n strs += [\"\"] * lenb \r\n\r\ndef get_mo(text):\r\n \"\"\"take string representing month or abbreviation, convert it to number\"\"\"\r\n mo = { \"january\" : \"1\", \"february\" : \"2\", \"march\" : \"3\", \"april\" : \"4\", \"may\" : \"5\", \"june\" : \"6\",\r\n \"july\" : \"7\", \"august\" : \"8\", \"september\" : \"9\", \"october\" : \"10\", \"november\" : \"11\", \"december\" : \"12\",\r\n \"jan\" : \"1\", \"feb\" : \"2\", \"mar\" : \"3\", \"apr\" : \"4\", \"jun\" : \"6\", \"jul\" : \"7\", \"aug\" : \"8\",\r\n \"sep\" : \"9\", \"sept\" : \"9\", \"oct\" : \"10\", \"nov\" : \"11\", \"dec\" : \"12\" }\r\n return mo.get(text.lower().strip(\".\"), \"UNK\")\r\n\r\ndef year(text, multiline = True):\r\n \"\"\"interpret date data, convert to mm/dd/yyyy format\"\"\"\r\n # if multiple lines given, then find year for each line\r\n if (len(text.splitlines()) > 1 and multiline):\r\n return \" | \".join([year(t) for t in text.splitlines() if t != \"\"])\r\n if (len(text.split(\"|\")) > 1): # \" | \"\r\n return \" | \".join([year(t) for t in text.split(\"|\") if year(t) != \"\"])\r\n\r\n # extract region information\r\n regions = re.search(r\"(([A-Z]{2,3}([ ]?(,|/)[ ]?)?)+)\", text)\r\n reg_str = \" \" + regions.group(1) if regions else \"\"\r\n\r\n # search for date in format: 10/21/2003\r\n date0 = re.search(r\"(\\d{1,2})/(\\d{1,2})/(\\d{4})\", text)\r\n if (date0):\r\n return date0.group(1) + \"/\" + date0.group(2) + \"/\" + date0.group(3) + reg_str\r\n\r\n # search for date in format: 2003/10/21\r\n date0a = re.search(r\"(\\(d{4})/(\\d{1,2})/(\\d{1,2})\", text)\r\n if (date0a):\r\n return date0a.group(2) + \"/\" + date0a.group(3) + \"/\" + date0a.group(1) + reg_str\r\n\r\n # search for date in format: October 21, 2003\r\n date1 = re.search(r\"([A-Z|a-z|\\.]+) (\\d{1,2})(?:th)?.? (\\d{4})\", text)\r\n if (date1):\r\n return get_mo(date1.group(1)) + \"/\" + date1.group(2) + \"/\" + date1.group(3) + reg_str\r\n\r\n # search for date in format: 2003-10-21\r\n date2 = re.search(r\"(\\d{4})[^\\d](\\d{1,2})[^\\d](\\d{1,2})\", text)\r\n if (date2):\r\n return date2.group(2) + \"/\" + date2.group(3) + \"/\" + date2.group(1) + reg_str\r\n \r\n # search for date in format: 21 October 2003\r\n date1b = re.search(r\"(\\d+) ([A-Z|a-z|\\.]+) (\\d+)\", text)\r\n if (date1b):\r\n return get_mo(date1b.group(2)) + \"/\" + date1b.group(1) + \"/\" + date1b.group(3) + reg_str\r\n\r\n # search for date in format: October 2003\r\n date1a = re.search(r\"([A-Z|a-z|\\.]+)\\s+([1|2]\\d{3})\", text)\r\n if (date1a):\r\n return date1a.group(2) + reg_str\r\n\r\n # search for date in format: 2003\r\n date3 = re.search(r\"([1|2]\\d{3})\", text)\r\n if (date3):\r\n return date3.group(1) + reg_str\r\n\r\n # search for date in format: 10/14/03\r\n date4 = re.search(r\"(\\d{2})[^\\d](\\d{2})[^\\d](\\d{2})\", text)\r\n if (date4):\r\n yr = date4.group(3)\r\n yr = \"19\" + yr if int(yr) >= 50 else \"20\" + yr\r\n return date4.group(1) + \"/\" + date4.group(2) + \"/\" + yr\r\n\r\n # search for 'TBA' string\r\n tba = re.search(r\"[T|t][B|b][A|a]\", text)\r\n if (tba):\r\n # don't repeat the string 'TBA' \r\n reg_str = re.sub(r'\\s*[T|t][B|b][A|a]\\s*', '', reg_str)\r\n return \"TBA\" + reg_str\r\n\r\n # search for 'TBD' string\r\n tbd = re.search(r\"[T|t][B|b][D|d]\", text)\r\n if (tbd):\r\n # don't repeat the string 'TBD' \r\n reg_str = re.sub(r'\\s*[T|t][B|b][D|d]\\s*', '', reg_str)\r\n return \"TBD\" + reg_str\r\n\r\n # if date not parsed, then return empty string\r\n return \"\"\r\n\r\ndef country(text):\r\n \"\"\"find country that text is referring to\"\"\"\r\n if ((\"NA\") in text or (\"North America\") in text):\r\n return \"NA\"\r\n elif ((\"JP\") in text or (\"Japan\") in text):\r\n return \"JP\"\r\n elif ((\"AS\") in text or (\"Asia\") in text):\r\n return \"AS\"\r\n elif ((\"EU\") in text or (\"Europe\") in text):\r\n return \"EU\"\r\n elif ((\"CA\") in text or (\"Canada\") in text):\r\n return \"CA\"\r\n elif ((\"PAL\") in text):\r\n return \"PAL\"\r\n elif ((\"FR\") in text or (\"France\") in text):\r\n return \"FR\"\r\n elif ((\"DE\") in text or (\"Ger\") == text):\r\n return \"DE\"\r\n elif ((\"AU\") in text or (\"Australia\") in text or (\"Australasia\") in text):\r\n return \"AU\"\r\n elif ((\"BR\") in text or (\"Brazil\") in text):\r\n return \"BR\"\r\n elif ((\"SK\") in text or (\"South Korea\") in text):\r\n return \"SK\"\r\n elif ((\"WW\") in text or (\"World\") in text):\r\n return \"WW\"\r\n elif ((\"INT\") in text or (\"International\") in text):\r\n return \"INT\"\r\n else:\r\n return \"\"\r\n\r\ndef to_canon(text, dev = True, quiet = True):\r\n \"\"\"find simplest string representing the developer\"\"\"\r\n sufs = [\"ltd\", \"inc\", \"llc\", \"pty\", \"ab\", \"studios\", \"studio\", \"multimedia\",\r\n \"media\", \"entertainment\", \"international\", \"corporation\", \"corp\", \"co\",\r\n \"software\", \"soft\", \"games\", \"productions\", \"production\", \"company\",\r\n \"international\", \"int\", \"foundation\", \"interactive\", \"group\",\r\n \"publishing\", \"limited\", \"digital\", \"design\", \"invention\", \"wireless\"]\r\n spcs = [('á', 'a'), ('ä', 'a'), ('é', 'e'), ('ë', 'e'), ('ı', 'i'), ('ï', 'i'),\r\n ('ø', 'o'), ('ō', 'o'), ('õ', 'o'), ('ó', 'o'), ('ü', 'u'), ('ú', 'u'),\r\n ('ū', 'u'), ('ç', 'c'), ('ł', 'l'), ('ñ', 'n'), ('ş', 's'),\r\n ('&', 'and')]\r\n \r\n # convert to lowercase, remove punctuation\r\n canon = re.sub(r'[,/\\.]', \"\", text.strip(\" \\r\\n\\t\").lower())\r\n quiet or print(canon)\r\n\r\n # remove parenthesized text, if enough information left\r\n m = re.match(r'(.*)\\s*\\([^\\(\\)]*?\\)(.*)', canon)\r\n if m:\r\n rem = \"\".join(m.groups())\r\n if len(rem) > log(len(canon) - len(rem)):\r\n canon = rem\r\n quiet or print(canon)\r\n\r\n # convert special characters\r\n for char, norm in spcs:\r\n canon = canon.replace(char, norm)\r\n quiet or print(canon)\r\n\r\n # remove all remaining non alphanumerical characters, if enough info\r\n if len(canon) > 4:\r\n canon = re.sub(r'[^0-9A-Za-z ]', '', canon)\r\n quiet or print(canon)\r\n\r\n # remove suffixes\r\n if (dev):\r\n for suf in sufs:\r\n m = re.match(r'(.*?)\\s+' + suf + r'$', canon)\r\n if m:\r\n rem = m.group(1)\r\n # only remove suffix if 'rem' holds enough info, relative to 'suf'\r\n if len(rem) > log(len(suf), 2):\r\n canon = rem\r\n quiet or print(canon)\r\n\r\n # remove 'the'\r\n canon = re.sub(r'^[T|t]he ', '', canon).strip()\r\n\r\n # make all plural objects singular\r\n if (dev):\r\n canon = re.sub(r'([0-9A-Za-z]+)s(?:\\s|$)', r'\\1', canon)\r\n\r\n # remove spaces\r\n canon = re.sub(r' ', '', canon)\r\n \r\n return canon\r\n\r\ndef split_bal(text, r = r'\\n|, |;|/|\\|'):\r\n \"\"\"split up text, so that parentheses are balanced, strips text of whitespace\"\"\"\r\n texts = re.split(r'(' + r + r')', text)\r\n i = 0\r\n while i < len(texts):\r\n while texts[i].count(\"(\") != texts[i].count(\")\"):\r\n if i + 1 >= len(texts):\r\n break\r\n texts[i : i + 2] = [texts[i] + texts[i + 1]]\r\n i += 1\r\n texts = [t.strip(\" \\t\\n\\r\") for t in texts if not re.match(r, t)]\r\n return texts\r\n \r\ndef find_devs(text):\r\n \"\"\"parse list of developers in delimeter separated text, return official names separated by bar\"\"\"\r\n text = re.sub(r', [I|i]nc', ' Inc', text)\r\n text = re.sub(r', [L|l][L|l][C|c]', ' LLC', text)\r\n #text = text.replace(\"}}\", \"}}|\").replace(\"{{\", \"|{{\")\r\n return \" | \".join( \\\r\n [find_dev(clean_dev(t.strip(\" \\t\\n\\r\"))) for t in\r\n split_bal(text, r = r', |\\n|/|\\|') if #r'\\n|,|/|\\|\r\n re.match(r'^\\s+$', t) is None and t != \"\"])\r\n\r\ndef find_dev(text):\r\n \"\"\"try to find developer from list of developers\"\"\"\r\n # preserve content in parenthesis, if any\r\n m = re.search(r'(\\([^\\(\\)]*?\\))', text)\r\n appd = \" \" + m.group(1) if m else \"\"\r\n\r\n # find smallest, canonical representation of given text\r\n canon = to_canon(text)\r\n if canon in devs:\r\n # found match with developer / publisher in database\r\n return devs[canon] + appd\r\n else:\r\n #print(\"no match for: \" + text.strip())\r\n return text.strip()\r\n\r\ndef clean_dev(text):\r\n \"\"\"clean developer text, using specific rules\"\"\"\r\n text = text.replace(\"Additional work by:\", \"\")\r\n return text\r\n\r\ndef get_devs():\r\n \"\"\"get all developers, in dict format: { canonical_name : full_name }\"\"\"\r\n devs = {}\r\n faq = open('dev_faq.txt', 'r', encoding = 'utf-8')\r\n for line in faq.readlines():\r\n line = line.strip()\r\n [canon, dev,] = line.split(\"\\t\")\r\n if not canon in devs:\r\n devs[canon] = dev\r\n return devs\r\n\r\ndef isTitle(low, dat):\r\n \"\"\"return true if column is for video game titles, and name is not blank\"\"\"\r\n return ((low.find(\"title\") != -1 or low.find(\"name\") != -1 or\r\n low.find(\"game\") != -1) and dat != \"—\")\r\n\r\ndef isDev(low):\r\n \"\"\"returns true if column is for video game developers\"\"\"\r\n return low.find(\"develop\") != -1 or low.find(\"program\") != -1\r\n\r\ndef isPub(low):\r\n \"\"\"returns true if column is for video game publishers\"\"\"\r\n return low.find(\"publish\") != -1\r\n\r\ndef get_file(value):\r\n \"\"\"extract link to file, from wiki text\"\"\"\r\n beg = value.find(\"[[\") + 2 if \"[[\" in value else 0\r\n end = value.find(\"|\") if \"|\" in value else value.find(\"]]\")\r\n end = len(value) if end == -1 else end\r\n return value[beg : end].strip(\" \\t\\r\\n\").replace(\" \", \"_\")\r\n\r\ndef find_consoles(value):\r\n \"\"\"find synonyms for all consoles in delimeter separated text\"\"\"\r\n return \" | \".join([synonyms[cons.strip()] if cons.strip() in synonyms else cons.strip()\r\n for cons in re.split(r', |;|/|\\|', value)\r\n if cons.strip() != \"\"])\r\n\r\ndef separate(value):\r\n \"\"\"separate text using bar, instead of comma or slash\"\"\"\r\n return \" | \".join([v.strip()\r\n for v in split_bal(value, r', |;|/|\\|') #r',|;|/|\\|'\r\n if v.strip() != \"\"])\r\n\r\ndef preproc_space(text):\r\n \"\"\"line preprocessing - removes first space\"\"\"\r\n return text.replace(\" \", \"\", 1) if text.startswith(\" \") else text\r\n\r\ndef preproc_star(text):\r\n \"\"\"line preprocessing - removes first asterisk\"\"\"\r\n return text.replace(\"*\", \" |\", 1) if text.startswith(\"*\") else text\r\n\r\ndef preproc(text):\r\n \"\"\"line preprocessing - removes first space, removes first asterisk\"\"\"\r\n return preproc_star(preproc_space(text))\r\n\r\ndef release(value, quiet = True):\r\n \"\"\"interpret release date in wiki text\"\"\"\r\n value = value.replace(\"''\", \"|''\") # fix for minecraft\r\n value = unwiki(value.replace(\"}}\", \"}}|\").replace(\"{{\", \"|{{\"), quiet)\r\n value = re.sub(r'([^ ])\\|([^ ])', r'\\1 | \\2', value)\r\n if re.match(r'^[\\d\\|]+\\s*\\-\\s*[\\d\\|]+$', value):\r\n return \"-\".join([year(v) for v in value.split(\"-\")])\r\n if re.match(r'^([\\w\\s]+-[\\d\\s]+\\|?)+$', value):\r\n value = value.replace(\"-\", \"|\")\r\n out = \"\"\r\n values = [v for v in value.split(\"|\") if not re.match(r'^[\\s,;/]+$', v) and v != '']\r\n for val, i in zip(values, range(len(values))):\r\n yr = year(val)\r\n if yr != \"\":\r\n par = \" \" + val[val.find(\"(\") : val.find(\")\") + 1] if \"(\" in val and \")\" in val else \"\"\r\n par = par.replace(\"(\", \"[\").replace(\")\", \"]\")\r\n out += yr + par + \" | \"\r\n else:\r\n out = out.strip(\" | \") + \") | \" if out.count(\"(\") > out.count(\")\") else out\r\n val = val.strip().replace(\"(\", \"[\").replace(\")\", \"]\")\r\n val = synonyms.get(val, val)\r\n out += val\r\n out += \" (\" if i != len(values) - 1 else \"\"\r\n quiet or print(out)\r\n \r\n out = out.strip(\"| (\")\r\n out += \")\" if out.count(\"(\") > out.count(\")\") else \"\"\r\n #out = re.sub(r'\\s*\\(\\s*\\)\\s*', ' ', out)\r\n out = re.sub(r'(^|\\|)[^\\(\\)\\|]*\\(\\)[^\\(\\)\\|]*\\|', r'\\1', out)\r\n return out.strip(\" \\t\\n\\r\")\r\n\r\ndef find_wiki_url(text):\r\n \"\"\"find first link in wiki markup text, return link and name\"\"\"\r\n link = text[text.find(\"[[\") + 2 : text.find(\"]]\")]\r\n link = link[: link.find(\"|\")] if \"|\" in link else link\r\n link = link.replace(\"Wikipedia:\", \"\")\r\n name = unwiki(text[text.find(\"[[\") : text.find(\"]]\") + 2]) #name = link \r\n link = link.replace(\" \", \"_\")\r\n link = convert(link)\r\n return (link, name)\r\n\r\ndef open_wiki_url(dat, f = None, games = {}, skip_to_game = None):\r\n # find first wikipedia link, contained in dat, write to file, return result as dict\r\n if \"[[\" not in dat and \"]]\" not in dat:\r\n return {}\r\n\r\n # find link in wiki markup text\r\n (link, name) = find_wiki_url(dat)\r\n\r\n # if skipping, and desired title is not reached, then return w/ empty dict\r\n if skip_to_game is not None and skip_to_game == name:\r\n skip_to_game = None\r\n if skip_to_game:\r\n return {}\r\n\r\n # if games dict already has the game, then return w/ empty dict\r\n if name in games:\r\n return {}\r\n\r\n # update games dict, with data from infobox\r\n games.update(mine_wiki_info(name, link, f = f))\r\n\r\n return games\r\n\r\ndef update_games(games, name, dat, col):\r\n \"\"\"update games info with new data, return games and name\"\"\"\r\n low = col.lower()\r\n if (name == \"\"):\r\n # set the name of game, add a new blank entry to games dictionary\r\n if isTitle(low, dat):\r\n name = reorder(dat)\r\n (games, name) = parse_name(games, name)\r\n else:\r\n t = dat\r\n yr = year(t)\r\n cn = country(col)\r\n\r\n # add year in which game was released\r\n if ((low.find(\"year\") != -1 or low.find(\"date\") != -1 or\r\n col == \"Release\" or col == \"Released\" or col == \"First released\") and\r\n (cn == \"\")):\r\n games[name][0] += yr + \" | \"\r\n\r\n # add game genre\r\n if (low.find(\"genre\") != -1):\r\n games[name][1] = t\r\n\r\n # add game developers / programmers\r\n if (isDev(low)):\r\n games[name][2] = find_devs(dat)\r\n\r\n # add publishing company, or multiple companies separated by bar\r\n if (isPub(low)):\r\n games[name][3] = find_devs(dat)\r\n\r\n # add regions in which game was released\r\n if (low.find(\"region\") != -1):\r\n games[name][5] = t\r\n\r\n # add rating information, can be from multiple standards\r\n if (low.find(\"esrb\") != -1 or low.find(\"pegi\") != -1 or \\\r\n low.find(\"cero\") != -1 or low.find(\"acb\") != -1):\r\n games[name][6] += col + \" \" + t + \", \"\r\n\r\n # find misc info / details about a game\r\n if (low.find(\"details\") != -1 or low.find(\"description\") != -1):\r\n g = [genre for genre in genres if genre in t.lower()]\r\n if (not(len(g) < 1)):\r\n games[name][1] += g[0]\r\n\r\n # check if 'yes' or checked or release date given\r\n if (yr != \"\" or t.lower() == \"yes\" or \"check mark\" in t.lower()):\r\n if (cn != \"\"):\r\n games[name][5] += cn + \", \"\r\n if (yr != \"\"):\r\n games[name][0] += yr + \" \" + cn + \" | \"\r\n return (games, name)\r\n\r\ndef parse_name(games, name, title = \"\"):\r\n \"\"\"parses game title for extra info: year, regions. add blank entry to games dict\"\"\"\r\n year = \"\"\r\n dev = \"\"\r\n regions = \"\"\r\n group = m = \"\"\r\n\r\n # look for all extra information in parantheses\r\n while (group != None and m != None):\r\n m = re.search(r'(.*)\\(([^\\(\\)]+)\\)(.*)', name)\r\n if (m):\r\n name = m.group(1).strip()\r\n group = m.group(2).strip()\r\n if (group):\r\n # add region info\r\n if (re.match(r'([A-Z]{2}\\s*)+', group) or \\\r\n country(group) != \"\"):\r\n regions += \", \".join([country(g) for g in group.split()]) + \" \"\r\n\r\n # add release date info\r\n elif (re.match(r'\\*([0-9]{4})\\*', group)):\r\n year = year(group)\r\n\r\n # otherwise, leave content in parenthesis, and end loop\r\n else:\r\n name += \" (\" + group + \")\"\r\n group = None\r\n \r\n name += \" \" + m.group(3).strip() if m.group(3) != None else \"\"\r\n\r\n # create a new blank entry for the game\r\n if True: ###(not name in games) or (games[name][4] == title):\r\n games[name] = [\"\",] * 7\r\n games[name][0] = year\r\n games[name][2] = dev\r\n games[name][3] = dev\r\n games[name][4] = title\r\n games[name][5] = regions.strip()\r\n return (games, name)\r\n else:\r\n # return blank name if duplicate\r\n return (games, \"\")\r\n\r\ndef post_process(games, name, title):\r\n \"\"\"add title to platforms data, remove trailing comma\"\"\"\r\n # add platform info\r\n games[name][4] += title\r\n\r\n # remove trailing bar from release date info\r\n games[name][0] = games[name][0].strip(\" | \").strip()\r\n\r\n # remove trailing comma from region info\r\n games[name][5] = games[name][5].strip(\", \").strip()\r\n\r\n # remove trailing comma from rating info\r\n games[name][6] = games[name][6].strip(\", \").strip()\r\n \r\n return (games, name)\r\n\r\ndef write_db(games, name, f = None, quiet = True):\r\n \"\"\"write games data of [name] to file, tab separated, if opened\"\"\"\r\n # if file is given, not None, then write to file\r\n if (f):\r\n if (name != \"\"):\r\n # file contents are tab separated, and marked with null if unknown\r\n data = [name,] + [d if d != \"\" else \"null\" for d in games[name]]\r\n f.write(\"\\t\".join(data) + \"\\n\")\r\n else:\r\n print(\"empty name\")\r\n\r\n # else, print out data for debugging\r\n else:\r\n data = [name,] + [d if d != \"\" else \"null\" for d in games[name]]\r\n quiet or print(\"\\t\".join(data))\r\n \r\ndef get_platforms(url):\r\n \"\"\"get a list of elements w/ platform data from wiki url\"\"\"\r\n d = pq(url)\r\n c = d(\"div#mw-pages\")(\"div.mw-content-ltr\")\r\n return [l.find(\"a\") for l in c(\"li\")]\r\n\r\ndef mine_wiki_img(img):\r\n \"\"\"mine wiki image page, return link to full image\"\"\"\r\n try:\r\n img = \"File:\" + img if \"File\" not in img else img\r\n url = wiki_base + \"/wiki/\" + urllib2.quote(img)\r\n d = pq(url)\r\n except BaseException:\r\n print(\"img error: \" + img)\r\n return \"\"\r\n a = d(\"div.fullImageLink\")(\"a\")\r\n if not a or not 'href' in a[0].keys():\r\n return \"\"\r\n else:\r\n return \"https:\" + a[0].attrib['href']\r\n\r\ndef mine_wiki_info(name, sub_url, info = \"vg\", depth = 3):\r\n \"\"\"mine wiki game page, get cover image, info from infobox\"\"\"\r\n # games = { name : (year, genre, dev, pub, platforms, regions, rating,\r\n # url, img, series, engine, modes, media\r\n # direct, prod, design, prog, artist, writer, composer,\r\n # cabinet, arcade, cpu, sound, display, distributor) }\r\n attrs = [\"release\", \"genre\", \"develop\", \"publish\", \"platform\", \"region\",\r\n \"rating\", \"wiki_url\", \"image\", \"series\", \"engine\", \"mode\", \"media\",\r\n \"direct\", \"produc\", \"design\", \"prog\", \"artist\", \"writer\", \"compose\",\r\n \"cabinet\", \"arcade\", \"cpu\", \"sound\", \"display\", \"distributor\"] \\\r\n if info == \"vg\" else \\\r\n [\"_name\", \"type\", \"wiki_url\", \"logo\", \"location\", \"foundation\",\r\n \"parent\", \"predecessor\", \"successor\", \"defunct\", \"fate\", \r\n \"founder\", \"employees\", \"people\", \"equity\", \"website\"]\r\n print(\" mining: \" + name)\r\n try:\r\n url = api_base + urllib2.quote(sub_url) if api_base not in sub_url else sub_url\r\n #print(url)\r\n res = urllib2.urlopen(url, timeout = 30)\r\n html = str(res.read())\r\n except BaseException:\r\n print(\"error: \" + name)\r\n return ([], {})\r\n short_url = sub_url #url[url.find(\"titles=\") + len(\"titles=\") :]\r\n infobox = False\r\n infoend = False\r\n names = []\r\n text = \"\"\r\n\r\n # retry if pages contains a redirect link\r\n if len(html) < 2500 and \"Infobox\" not in html and \"infobox\" not in html and \\\r\n \"#REDIRECT\" in html:\r\n (link, _) = find_wiki_url(html[html.find(\"#REDIRECT\") + 8 :])\r\n return mine_wiki_info(name, link, info, depth - 1) if depth > 0 else {}\r\n\r\n # return empty dict for missing pages\r\n if len(html) < 250 and \"Infobox\" not in html and \"infobox\" not in html and \\\r\n '\"missing\"' in html:\r\n return ([], {})\r\n\r\n # only keep main text of document\r\n html = html[html.find(\"wikitext\") :]\r\n\r\n games = {}\r\n games[name] = [\"\",] * len(attrs)\r\n\r\n uind = [i for (i, a) in zip(range(len(attrs)), attrs) if \"wiki_url\" in a][0]\r\n games[name][uind] = short_url\r\n\r\n lines = html.split(\"\\\\\\\\n\")\r\n for line, next in zip(lines, lines[1:]):\r\n # convert escape chars and unicode chars\r\n line = preproc(convert(line))\r\n next = preproc_space(next)\r\n\r\n # irregular end\r\n if (line.startswith(\"|}}\")) and not (next.startswith(\"|\") or next.startswith(\"!\")):\r\n infobox = False\r\n infoend = True\r\n \r\n # detect start of infobox\r\n if \"infobox\" in line.lower() and not infobox and \\\r\n ((info == \"vg\" and (\"vg\" in line.lower() or \"game\" in line.lower())) or \\\r\n (info != \"vg\" and (\"company\" in line.lower()))):\r\n #print(line)\r\n infobox = True\r\n\r\n # detect infobox contents\r\n elif (line.startswith(\"|\") or line.startswith(\"!\") or text) and infobox:\r\n # add current line to text accumulator\r\n text += line\r\n\r\n # if next line is a continuation of first, then keep appending\r\n if (not(next.startswith(\"|\") or next.startswith(\"!\") or \\\r\n next.startswith(\"}\")) or balanceof(text) > 0) and \\\r\n not(line.endswith(\"}}\") and balanceof(text) < 0):\r\n continue\r\n\r\n # clean up special characters\r\n text = text.lstrip(\"|\").lstrip(\"!\")\r\n text = text.rstrip(\"}\") if balanceof(text) < 0 else text + \" \"\r\n\r\n # check for no equals sign\r\n if \"=\" not in text:\r\n text = re.sub(r'\\s{6,}', \"=\", text)\r\n print(\"no = : \" + text + \" in \" + name)\r\n\r\n # extract field name, value\r\n field = text[: text.find(\"=\")].lower()\r\n value = text[text.find(\"=\") + 1 :]\r\n\r\n # clear text, but save temporarily\r\n prev = text\r\n text = \"\"\r\n\r\n ###\r\n if field.strip().lower() == \"image\":\r\n print(\"uses image: \" + name)\r\n \r\n # check if title attribute\r\n if \"title\" in field and not \"file:\" in value.lower() and \\\r\n not \"image:\" in value.lower() and not \"italic\" in field.lower():\r\n new_name = unwiki(value)\r\n\r\n # try to maximize name length, so less duplicates are created\r\n if new_name != name and new_name != \"\" and \\\r\n (len(new_name) > len(name) or games[name].count(\"\") < len(attrs) - 1):\r\n # if conflicting name, then remove other name\r\n if games[name].count(\"\") >= len(attrs) - 1:\r\n games.pop(name)\r\n \r\n name = new_name\r\n games[name] = [\"\",] * len(attrs)\r\n games[name][uind] = short_url\r\n\r\n # ignore infobox-specific attributes, distribution, etc\r\n if not (any([f in field for f in [\"italic\", \"collapsible\", \"state\", \"show\", \"caption\", \"website\", \"spinoff\", \"origin\", \"_size\", \"_alt\", \"padding\"]])):\r\n # find corresponding attribute\r\n for attr, i in zip(attrs, range(len(attrs))):\r\n if attr in field or (\"website\" in attr and \"homepage\" in field):\r\n #print(\"found: \" + attr)\r\n #if \"release\" in attr:\r\n # print(value)\r\n value = mine_wiki_img(get_file(value)) if \"image\" in attr or \"logo\" in attr else value\r\n value = release(value) if \"release\" in attr or \"foundation\" in attr else value\r\n value = unwiki(value)\r\n value = find_devs(value) if \"dev\" in attr or \"pub\" in attr else value\r\n value = find_consoles(value) if \"platform\" in attr else value\r\n value = separate(value) if not(any([a in attr for a in [\"url\", \"webs\", \"page\", \"image\", \"logo\", \"release\", \"foundation\", \"defunct\", \"dev\", \"pub\", \"location\"]])) else value\r\n games[name][i] += \" \" + value\r\n games[name][i] = games[name][i].strip(\" \\t\\n\\r\")\r\n break\r\n\r\n # irregular end\r\n if line.endswith(\"}}\") and not (next.startswith(\"|\") or next.startswith(\"!\")) and \\\r\n balanceof(line) < 0:\r\n infobox = False\r\n infoend = True\r\n \r\n # detect end of infobox\r\n elif (line.startswith(\"}}\") and not (next.startswith(\"|\") or next.startswith(\"!\")) and infobox):\r\n infobox = False\r\n infoend = True\r\n\r\n # if end of infobox reached, then request for game to be written to file\r\n if (infoend and name != \"\" and name in games):\r\n names.append(name)\r\n infoend = False\r\n\r\n return (names, games)\r\n \r\ndef mine_wiki_page(title, url, f = None, games = {}, skip_to_game = None, infobox = False, info = \"vg\"):\r\n \"\"\"mine wiki page for list of video games, return dict of game info\"\"\"\r\n # games = { name : (year, genre, dev, pub, platforms, regions, rating) }\r\n print(\"mining: \" + title)\r\n \r\n response = urllib2.urlopen(url, timeout = 30)\r\n html = str(response.read())\r\n header = \"\"\r\n endcol = False\r\n gmlist = False\r\n islist = False\r\n table = False\r\n unbal = False\r\n rowsp = 0\r\n data = cols = []\r\n end_of_page = False\r\n list_of_games = re.compile(r'[L|l]ist of.*[G|g]ames')\r\n\r\n # read all lines in html document\r\n for line in html.split(\"\\\\\\\\n\"):\r\n # convert escape chars and unicode chars\r\n line = convert(line)\r\n \r\n # if end of table row, then add data to dict\r\n if table and data != [] and (line.startswith(\"|-\") or line.startswith(\"|}\")):\r\n name = \"\"\r\n\r\n if not infobox:\r\n # account for rowspan by adding blank columns for each row\r\n if (rowsp != 0):\r\n data = [\"\"] * (len(cols) - len(data)) + data\r\n rowsp -= 1\r\n \r\n # ignore row entry if not enough data, doesn't match column\r\n if (not len(data) < len(cols)):\r\n # update games dict\r\n for dat, col in zip(data, cols):\r\n #print(col + \": \" + unwiki(dat))\r\n (games, name) = update_games(games, name, unwiki(dat), col)\r\n\r\n # if preceding header contains year info, then update w/ year info\r\n if year(header) != \"\":\r\n if name != \"\" and year(header) not in games[name][0]:\r\n (games, name) = update_games(games, name, header, \"Year\")\r\n\r\n # do post processing, then write to database file\r\n if (name != \"\"):\r\n (games, name) = post_process(games, name, title)\r\n write_db(games, name, f)\r\n else:\r\n for dat, col in zip(data, cols):\r\n # if title element, then follow link\r\n low = col.lower()\r\n if (info == \"vg\" and isTitle(low, dat)) or \\\r\n (info == \"dev\" and isDev(low)) or \\\r\n (info == \"pub\" and isPub(low)):\r\n work_queue.put(dat)\r\n #game = (open_wiki_url(dat, f = f, games = games, skip_to_game = skip_to_game))\r\n #games.update(game)\r\n #skip_to_game = None if game != {} else skip_to_game\r\n \r\n data = []\r\n unbal = False\r\n\r\n # blank line\r\n if (len(line) < 1):\r\n continue\r\n\r\n # detect start of wikitable\r\n elif \"class\" in line and \"wikitable\" in line:\r\n print(\"table found\")\r\n table = True\r\n endcol = False\r\n cols = []\r\n\r\n # detect wikitable column declarations\r\n elif line.startswith(\"!\") and table and not endcol:\r\n cols += map(unwiki, line.strip(\"!|\").split(\"!!\"))\r\n\r\n # end of table column\r\n elif line.startswith(\"|}\") or \\\r\n (line.startswith(\"|-\") and \"sortbottom\" in line):\r\n print(\"table end\")\r\n table = False\r\n unbal = False\r\n elif line.startswith(\"|-\") and table:\r\n if cols != [] and not endcol:\r\n endcol = True\r\n print(\"cols: \" + str(cols))\r\n continue\r\n\r\n # skip table caption\r\n elif line.startswith(\"|+\") and table:\r\n continue\r\n\r\n # skip table header\r\n elif line.startswith(\"||\") and table:\r\n continue\r\n\r\n # row entry, can start with \"|\" or \"!\" if not in column declaration\r\n elif (line.startswith(\"|\") or line.startswith(\"!\")) and \\\r\n table and endcol and not unbal:\r\n if (\"rowspan\" in line):\r\n m = re.match(r'.*rowspan\\s*=[\"\\s\\\\]*(\\d+).*', line)\r\n rowsp = int(m.group(1)) if m else 0\r\n #print(\"rowsp: \" + str(rowsp))\r\n if (\"colspan\" in line):\r\n m = re.match(r'.*colspan\\s*=\\s*\\\"?(\\d+)\\\"?\\s*.*', line)\r\n colsp = int(m.group(1)) if m else 0\r\n line = line.strip(\"|\").strip()\r\n line = line + (\"||\" + line) * (colsp - 1)\r\n\r\n # add all lines to data list\r\n data += bar_split(line.strip(\"|\").strip(\"!\"))\r\n\r\n # determine if brackets in line are unbalanced\r\n unbal = balanceof(line) != 0\r\n elif line.startswith(\"*\") and table:\r\n data[len(data) - 1] += \" \".join(bar_split(line.strip(\"*\"))) + \" \"\r\n \r\n # parse headings\r\n elif line.startswith(\"=\") and line.endswith(\"=\"):\r\n header = line.strip(\"=\")\r\n hlower = header.lower()\r\n print(\"header: \" + header)\r\n # determine if the end of the wiki page is reached\r\n if (\"see also\" in hlower or \"references\" in hlower or\r\n \"external links\" in hlower or \"footnote\" in hlower or\r\n \"update notes\" in hlower or \"upcoming\" in hlower):\r\n print(\"unstructured list end\")\r\n gmlist = False\r\n end_of_page = True\r\n\r\n # parse list\r\n elif \"{{Div col}}\" in line and not end_of_page:\r\n print(\"list start\")\r\n islist = True\r\n\r\n # parse list item\r\n elif line.startswith(\"*\") and islist and not table:\r\n if not infobox:\r\n (games, name) = parse_name(games, unwiki(line[1:]), title)\r\n write_db(games, name, f)\r\n elif info == \"vg\":\r\n work_queue.put(line.strip(\"*\"))\r\n #game = (open_wiki_url(line.strip(\"*\"), f = f, games = games, skip_to_game = skip_to_game))\r\n #games.update(game)\r\n #skip_to_game = None if game != {} else skip_to_game\r\n #print(name)\r\n\r\n # parse end of list\r\n elif (\"{{Div col end}}\") in line:\r\n print (\"list end\")\r\n islist = False\r\n\r\n # parse unstructured list\r\n elif (\"list\" in line or \"List\" in line) and \\\r\n re.search(list_of_games, unwiki(line)) and not end_of_page:\r\n print(\"unstructured list start\")\r\n gmlist = True\r\n\r\n # parse unstructured list entry\r\n elif line.startswith(\"*\") and gmlist and not table and \\\r\n not \"Exclus\" in header and not end_of_page:\r\n if len(line.split(\" \")) < 10 and not \"only release\" in line:\r\n if not infobox:\r\n (games, name) = parse_name(games, unwiki(line[1:]), title)\r\n write_db(games, name, f)\r\n elif info == \"vg\":\r\n work_queue.put(line.strip(\"*\"))\r\n #game = (open_wiki_url(line.strip(\"*\"), f = f, games = games, skip_to_game = skip_to_game))\r\n #games.update(game)\r\n #skip_to_game = None if game != {} else skip_to_game\r\n #print(name)\r\n\r\n # otherwise, assume line is part of previous col and append data\r\n elif table and data != []:\r\n data[len(data) - 1] += line\r\n unbal = balanceof(data[len(data) - 1])\r\n\r\n return games\r\n\r\ndef mine_wiki(f = None, games = {}, skip_to = (None, None), end_at = None, infobox = False, info = \"vg\"):\r\n \"\"\"mine wikipedia list of all video games for info\"\"\"\r\n # games dict: { name : (year, genre, dev, pub, platforms, regions, rating) }\r\n pfs = get_platforms(wiki) + get_platforms(wiki2)\r\n (skip_to_plat, skip_to_game) = skip_to\r\n\r\n def readURL(thread_id):\r\n while (True):\r\n try:\r\n # try to retrieve a string from the queue\r\n dat = work_queue.get(block = False)\r\n\r\n # skip if no url found\r\n if \"[[\" not in dat and \"]]\" not in dat:\r\n continue\r\n\r\n # find link in wiki markup text\r\n (link, name) = find_wiki_url(dat)\r\n #print(\"thread \" + str(thread_id) + \" mining: \" + name)\r\n\r\n # if games dict already has the game, then skip\r\n if name in games:\r\n continue\r\n\r\n # mine a single wikipedia game page, in parallel\r\n (names, new_games) = mine_wiki_info(name, link, info)\r\n\r\n # lock shared resources while writing new data\r\n lock.acquire()\r\n \r\n # write all new games to the database\r\n for name in names:\r\n # only update / write if not already added\r\n if not name in games:\r\n print(\"updating: \" + name) #print(\"mining: \" + name)\r\n games[name] = new_games[name]\r\n write_db(games, name, f)\r\n\r\n # unlock after writing to shared resources\r\n lock.release()\r\n \r\n except queue.Empty:\r\n break\r\n\r\n # set up multithreading\r\n global work_queue, lock\r\n N_THREADS = 64\r\n threads = []\r\n work_queue = queue.Queue()\r\n lock = Lock()\r\n \r\n titles = []\r\n for pf in pfs:\r\n title = pf.text\r\n\r\n # ignore combined pages, redundant pages\r\n if (title == \"List of Commodore 64 games\" or\r\n title == \"List of Amiga games\" or\r\n title == \"List of PC video games\" or\r\n title == \"List of free PC titles\"):\r\n continue\r\n if (title.find(\"Super Famicom and Super Nintendo\") != -1):\r\n continue\r\n if (title.find(\"network\") != -1 or title.find(\"multiplayer\") != -1 or\r\n title.find(\"exclusive\") != -1 or title.find(\"downloadable\") != -1):\r\n continue\r\n if (title.find(\"CD-ROM\") != -1 or title.find(\"DVD-9\") != -1):\r\n continue\r\n if (title.find(\"arcade video games:\") != -1):\r\n continue\r\n if (title.find(\"Gamesharing\") != -1 or\r\n title.find(\"trackball\") != -1 or\r\n title.find(\"System Link\") != -1 or\r\n title.find(\"Move\") != -1 or\r\n title.find(\"Games with Gold\") != -1 or\r\n title.find(\"Xbox One applications\") != -1 or\r\n title.find(\"3D PlayStation\") != -1 or\r\n title.find(\"Draft\") != -1 or\r\n title.find(\"Kinect\") != -1):\r\n continue\r\n\r\n # strip prefix\r\n if (title.startswith(\"List of\")):\r\n title = title.replace(\"List of\", \"\").lstrip()\r\n elif (title.startswith(\"Index of\")):\r\n title = title.replace(\"Index of\", \"\").lstrip()\r\n elif (title.startswith(\"Draft\")):\r\n continue # empty page, ignore\r\n elif (title.startswith(\"Chronology\")):\r\n continue # redundant, ignore (Chronology of Wii games)\r\n else:\r\n # ignore kinect fun labs, platinum hits\r\n continue\r\n\r\n # remove parenthesis / colon for subcategories\r\n if (title.find(\":\") != -1):\r\n title = title[: title.find(\":\")].strip()\r\n if (title.find(\")\") != -1):\r\n title = title[: title.find(\"(\") - 1].strip()\r\n\r\n # independent dreamcast games, other labels, etc\r\n title = title.replace(\"commercially released independently developed\", \"\")\r\n title = title.replace(\"commercial\", \"\")\r\n title = title.replace(\"free\", \"\")\r\n title = title.replace(\"unlicensed and prototype\", \"\")\r\n title.lstrip()\r\n\r\n # strip suffix\r\n if (title.endswith(\"video games\")):\r\n title = title.replace(\"video games\", \"\").rstrip()\r\n elif (title.endswith(\"games\")):\r\n title = title.replace(\"games\", \"\").rstrip()\r\n elif (title.endswith(\"titles\")):\r\n title = title.replace(\"titles\", \"\").rstrip()\r\n elif (title.endswith(\"software\")): # wii u software\r\n title = title.replace(\"software\", \"\").rstrip()\r\n elif (title.endswith(\"applications\")):\r\n title = title.replace(\"applications\", \"\").rstrip()\r\n elif (title.startswith(\"games for the original\")): # Game Boy\r\n title = title.replace(\"games for the original\", \"\").lstrip()\r\n elif (title.startswith(\"Xbox games on\")): # xbox 360 kinect games\r\n title = title.replace(\"Xbox games on\", \"\").lstrip()\r\n #elif (title.find(\"Virtual Console games for\") != -1):\r\n # title = \"Virtual Console\"\r\n else:\r\n # ignore eye toy, exclusives, conversions, accessories, etc\r\n continue\r\n\r\n # find synonyms for game titles\r\n if title in synonyms:\r\n title = synonyms[title]\r\n\r\n if not title in titles:\r\n titles.append(title)\r\n\r\n # if skip_to parameter given, then stop skipping when reached the desired title\r\n if skip_to_plat is not None and skip_to_plat == title:\r\n skip_to_plat = None\r\n\r\n # if end_to parameter given, then stop mining when given title is reached\r\n if end_at is not None and end_at == title:\r\n return games\r\n\r\n if not skip_to_plat:\r\n # for arcade games, visit all subpages instead of parsing main page\r\n if (title == \"arcade\"):\r\n for sub in [\"0..9\",] + [chr(i) for i in range(ord('A'), ord('Z') + 1)] + [\"Not_released\",]:\r\n games.update(mine_wiki_page(title, api_base + pf.attrib['href'][6:] + \":_\" + sub, f, games, skip_to_game, infobox, info))\r\n\r\n # otherwise, use wiki api to find game info, by reading wiki markup source\r\n else:\r\n games.update(mine_wiki_page(title, api_base + pf.attrib['href'][6:], f, games, skip_to_game, infobox, info))\r\n if (not infobox):\r\n f is None or f.write(\"\\n\\n\\n\")\r\n skip_to_game = None\r\n print(title)\r\n\r\n # start all threads\r\n for i in range(N_THREADS):\r\n t = Thread(target = readURL, args = (i,))\r\n t.start()\r\n threads.append(t)\r\n\r\n # merge all threads when done\r\n for thread in threads:\r\n thread.join()\r\n\r\n print(titles)\r\n return games\r\n\r\ndef load(f = \"games.txt\", v = True):\r\n \"\"\"load games dict from file, return games info as dict\"\"\"\r\n f = open(f, 'r', encoding = 'utf-8')\r\n games = {}\r\n for line in f.readlines():\r\n line = line.strip().strip(\"\\r\\n\")\r\n # ignore blank lines\r\n if len(line) < 1:\r\n continue\r\n\r\n # ignore comments\r\n elif line.startswith(\"#\"):\r\n continue\r\n\r\n info = line.split(\"\\t\")\r\n name = info[0]\r\n if name not in games:\r\n games[name] = info[1:]\r\n else:\r\n if name != \"\" and name != \" \":\r\n print(\"dup: \" + name)\r\n return games\r\n\r\ndevs = get_devs()\r\n\r\nif __name__ == \"__main__\":\r\n f = open('games_wiki.txt', 'a', encoding = 'utf-8') #games.txt\r\n #games = mine_wiki(f, games = load(\"games_wiki.txt\"), skip_to = (None, None), infobox = True, info = \"vg\")\r\n f.close()\r\n pass\r\n\r\n# TODO - missing PS3 games, need to reupload\r\n# dups - pokemon yellow: special pikachu ed\r\n# split up - pokemon black | white, X | Y\r\n# minecraft stored in db as 'pocket version'\r\n# TODO - use wiki url as key (?) (Portal, Spore)\r\n","sub_path":"miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":58592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"12723242","text":"from keras import applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Input, Conv2D, MaxPool2D\nfrom keras import backend as k\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping\nimport time\n\nimg_width, img_height = 256, 256\ntrain_data_dir = \"data/train\"\nvalidation_data_dir = \"data/val\"\nnb_train_samples = 129\nnb_validation_samples = 21\nbatch_size = 16\nepochs = 50\ninput_layer = Input(shape=(256,256,3))\nmodel = applications.InceptionV3(include_top=False, weights='imagenet', input_tensor=input_layer, pooling=None)\n\n\"\"\"\nLayer (type) Output Shape Param #\n=================================================================\ninput_1 (InputLayer) (None, 256, 256, 3) 0\n_________________________________________________________________\nblock1_conv1 (Conv2D) (None, 256, 256, 64) 1792\n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 256, 256, 64) 36928\n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 128, 128, 64) 0\n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 128, 128, 128) 73856\n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 128, 128, 128) 147584\n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 64, 64, 128) 0\n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 64, 64, 256) 295168\n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 64, 64, 256) 590080\n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 64, 64, 256) 590080\n_________________________________________________________________\nblock3_conv4 (Conv2D) (None, 64, 64, 256) 590080\n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 32, 32, 256) 0\n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 32, 32, 512) 1180160\n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 32, 32, 512) 2359808\n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 32, 32, 512) 2359808\n_________________________________________________________________\nblock4_conv4 (Conv2D) (None, 32, 32, 512) 2359808\n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 16, 16, 512) 0\n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 16, 16, 512) 2359808\n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 16, 16, 512) 2359808\n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 16, 16, 512) 2359808\n_________________________________________________________________\nblock5_conv4 (Conv2D) (None, 16, 16, 512) 2359808\n_________________________________________________________________\nblock5_pool (MaxPooling2D) (None, 8, 8, 512) 0\n=================================================================\nTotal params: 20,024,384.0\nTrainable params: 20,024,384.0\nNon-trainable params: 0.0\n\"\"\"\nmodel.summary()\n\n# Freeze the layers which you don't want to train. Here I am freezing the first 5 layers.\n\"\"\"\nfor layer in model.layers:\n layer.trainable = False\n\nfor layer in model.layers[:141]:\n layer.trainable = False\n\"\"\"\nx = model.get_layer('mixed7').output\n#x = Conv2D(128,kernel_size=(3,3))(x)\n#x = Conv2D(128,kernel_size=(3,3))(x)\n#x = MaxPool2D(pool_size=(2,2))(x)\n#x = Conv2D(256,kernel_size=(3,3))(x)\n#x = Conv2D(256,kernel_size=(1,1))(x)\n#x = MaxPool2D(pool_size=(2,2))(x)\nx = GlobalAveragePooling2D()(x)\n#x = Dense(512, activation=\"relu\")(x)\n#x = Dropout(0.5)(x)\n#x = Dense(512, activation=\"relu\")(x)\npredictions = Dense(4, activation=\"softmax\")(x)\n\n# creating the final model\nmodel_final = Model(inputs = model.input, outputs = predictions)\nfor layer in model_final.layers[:-11]:\n layer.trainable = False\nmodel_final.summary()\n# compile the model\nmodel_final.compile(loss = \"categorical_crossentropy\", optimizer = optimizers.SGD(lr = 0.0001, momentum = 0.9), metrics=[\"accuracy\"])\n\n# Initiate the train and test generators with data Augumentation\ntrain_datagen = ImageDataGenerator(\nrescale = 1./255,\nhorizontal_flip = True,\nfill_mode = \"nearest\",\nzoom_range = 0.3,\nwidth_shift_range = 0.3,\nheight_shift_range=0.3,\nrotation_range=30)\n\ntest_datagen = ImageDataGenerator(\nrescale = 1./255,\nhorizontal_flip = True,\nfill_mode = \"nearest\",\nzoom_range = 0.3,\nwidth_shift_range = 0.3,\nheight_shift_range=0.3,\nrotation_range=30)\n\ntrain_generator = train_datagen.flow_from_directory(\ntrain_data_dir,\ntarget_size = (img_height, img_width),\nbatch_size = batch_size,\nclass_mode = \"categorical\")\n\nvalidation_generator = test_datagen.flow_from_directory(\nvalidation_data_dir,\ntarget_size = (img_height, img_width),\nclass_mode = \"categorical\"\n)\n\n# Save the model according to the conditions\ncheckpoint = ModelCheckpoint(\"Inception_5.h5\", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\nearly = EarlyStopping(monitor='val_acc', min_delta=0, patience=14, verbose=1, mode='auto')\n\n\n# Train the model\nt=time.time()\n\nhist = model_final.fit_generator(\ntrain_generator,\n#steps_per_epoch = nb_train_samples,\nepochs = epochs,\nvalidation_data = validation_generator,\n#validation_steps = nb_validation_samples,\ncallbacks = [checkpoint, early])\n\nprint('Training time: %s' % (t - time.time()))\n(loss, accuracy) = model_final.evaluate_generator(validation_generator, steps=None, max_queue_size=10, verbose=0)\n\nprint(\"[INFO] loss={:.4f}, accuracy: {:.4f}%\".format(loss,accuracy * 100))\n\nimport matplotlib.pyplot as plt\n# visualizing losses and accuracy\ntrain_loss=hist.history['loss']\nval_loss=hist.history['val_loss']\ntrain_acc=hist.history['acc']\nval_acc=hist.history['val_acc']\n\nplt.figure(1,figsize=(7,5))\nplt.plot(train_loss)\nplt.plot(val_loss)\nplt.xlabel('num of Epochs')\nplt.ylabel('loss')\nplt.title('train_loss vs val_loss')\nplt.grid(True)\nplt.legend(['train','val'],loc='upper left')\n#print plt.style.available # use bmh, classic,ggplot for big pictures\nplt.show()\n\n\nplt.figure(2,figsize=(7,5))\nplt.plot(train_acc)\nplt.plot(val_acc)\nplt.xlabel('num of Epochs')\nplt.ylabel('accuracy')\nplt.title('train_acc vs val_acc')\nplt.grid(True)\nplt.legend(['train','val'],loc=4)\n#print plt.style.available # use bmh, classic,ggplot for big pictures\nplt.show()\n","sub_path":"FeatureTraining/transferLearning.py","file_name":"transferLearning.py","file_ext":"py","file_size_in_byte":7036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"468062704","text":"\nimport os\nfrom pathlib import Path\n\nimport ExternalScript.ExternalScriptRunner as scriptRunner\nfrom Registration.RegisterDatabase import databaseCollection, GetDatabase\n\nregisteredFiles = 0\nregisteredDirs = 0\n\ndef GetNewEntryCount():\n\treturn registeredFiles, registeredDirs\n\ndef RegisterPaths(dirPath, pathGlob, tags):\n\tglobal registeredFiles\n\n\t# Get registry\n\tregistryDB = GetDatabase(\"Registry\")\n\t\n\t# Get paths from pathGlob\n\tpaths = dirPath.glob(pathGlob)\n\n\t# Flag for having modified the database\n\tmodifiedDB = False\n\n\tfor path in paths:\n\t\t# Check if path exists\n\t\texists = os.path.exists(path)\n\n\t\tif (exists == True):\n\t\t\t# If Registry doesn't contain this path, add it to Registry\n\t\t\tregistryDB.AddRow(\"pathCollection\", path = path, type = 1, generated = \"\", tags = tags)\n\n\t\t\t# Increment registered files\n\t\t\tregisteredFiles += 1\n\t\t\t\n\t\t\t# Set modified flag to true so commit will commence at the end of the glob iteration\n\t\t\tmodifiedDB = True\n\n\tif (modifiedDB == True):\n\t\tregistryDB.Commit()\n\ndef RegisterDirectory(parentPath, subDir, tags, generated = \"\"):\n\tglobal registeredDirs\n\n\t# Get path\n\tdirPath = parentPath.joinpath(subDir)\n\n\t# Get registry\n\tregistryDB = GetDatabase(\"Registry\")\n\n\t# Check if path exists\n\texists = os.path.exists(dirPath)\n\t\n\tif (exists == True):\n\t\t# If Registry doesn't contain this path, add it to Registry\n\t\tregistryDB.AddRow(\"pathCollection\", path = dirPath, type = 0, generated = generated, tags = tags)\n\n\t\t# Increment registered files\n\t\tregisteredDirs += 1\n\n\t\tregistryDB.Commit()","sub_path":"Tools/Registration/RegisterPaths.py","file_name":"RegisterPaths.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"558301280","text":"import requests,os,time,csv,json,sqlite3,sys,io,subprocess,configparser,codecs,operator,pymysql,mysql,datetime\nimport urllib.parse as parse\nsys.path.append(r\"..\")\nimport GSDUb.Lib.LF as Common\n\nclass Main(object):\n CurrentPath = os.getcwd()\n LogPath = []\n SQLiteDataBaseFile = \"SDC.db\"\n SQLiteDataBaseFileLocation = os.path.join(os.path.dirname(os.path.abspath(__file__)), SQLiteDataBaseFile)\n\nclass DataProcessor(object):\n def MergeMultiTupleList(TupleList):\n List = []\n for Tuple in TupleList:\n for Data in Tuple:\n List.append(Data)\n return List\n\n\nclass FormatTime():\n def YYYYMMDD(self,Time):\n YYYYMMDD = time.strftime(\"%Y%m%d\",Time)\n return YYYYMMDD\n def YYYYMMDDHHMMSS(self,Time):\n YYYYMMDDHHMMSS = time.strftime(\"%Y-%m-%d-%H-%M-%S\",Time)\n return YYYYMMDDHHMMSS\n\nConnectDataBase = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='hsg@123', db='sdc',\n charset='utf8')\nDataBaseCursor = ConnectDataBase.cursor()\n# TableList = ['atp_atg_ruku','atp_atg_chuku','stock','atg_kucun_qushi_12_1','atp_kucun_qushi_12_1','atp_ruku','atp_chuku','atg_ruku','atg_chuku']\n# for TableName in TableList:\n# CreateTable = (\"\"\"CREATE TABLE IF NOT EXISTS %s (\n# `ChanpinBianma` varchar(255) DEFAULT NULL,\n# `ChanpinMingcheng` varchar(255) DEFAULT NULL,\n# `Riqi` varchar(255) DEFAULT NULL,\n# `Jianshu` varchar(255) DEFAULT NULL,\n# `Zhongliang` varchar(255) DEFAULT NULL,\n# `Tiji` varchar(255) DEFAULT NULL,\n# `BaozhuangZhongliang` varchar(255) DEFAULT NULL,\n# `BaozhuangTiji` varchar(255) DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8\"\"\" % TableName)\n# DataBaseCursor.execute(CreateTable)\n# print(\"Done\")\n#\n\n# Keys = ['コード','SKUコード','日付','ステータスID1','ステータスID2','ステータスID3','ステータスID4','ステータスID5','値1','値2','値3','値4']\n# LogPath = (\"%s\\Logs\\{%s}Log.csv\" % (Main.CurrentPath, time.strftime(\"%Y-%m-%d-%H-%M-%S\",time.localtime())))\n# Main.LogPath.append(LogPath)\n# Sin = \"入库\"\n# Kehu = \"ATP\"\n# StockInData = Infra.MariaDB(SQL=(\"SELECT ChanpinBianma,ChanpinMingcheng,Riqi,Jianshu,Zhongliang,Tiji,BaozhuangZhongliang,BaozhuangTiji FROM atp_atg_quanbu_ruku_chuku WHERE Caozuo='%s' AND Kehu='%s' \")% (Sin,Kehu),Data=\"None\",Database=\"sdc\",NumberOfRow=0)\n# with open(LogPath, 'w', newline='') as CSV:\n# Writer = csv.DictWriter(CSV, fieldnames=Keys)\n# # 写入列名称(字典的键)\n# Writer.writeheader()\n# for Data in StockInData:\n# Time = datetime.datetime.strptime(Data[2], \"%Y-%m-%d\").strftime(\"%Y%m%d\")\n# Values = ['X',Data[0],Time,'-1','-1','-1','-1','-1',Data[3],Data[5],Data[6],Data[7]]\n# Dict = dict(zip(Keys, Values))\n# Writer.writerow(Dict)\n\n# Keys = ['コード','SKUコード','日付','ステータスID1','ステータスID2','ステータスID3','ステータスID4','ステータスID5','値1','値2','値3','値4']\n# LogPath = (\"%s\\Logs\\{%s}Log.csv\" % (Main.CurrentPath, time.strftime(\"%Y-%m-%d-%H-%M-%S\",time.localtime())))\n# Main.LogPath.append(LogPath)\n# Kehu = \"ATG\"\n# Sout = \"出库\"\n# StockOutData = Infra.MariaDB(SQL=(\"SELECT ChanpinBianma,ChanpinMingcheng,Riqi,Jianshu,Zhongliang,Tiji,BaozhuangZhongliang,BaozhuangTiji FROM atp_atg_quanbu_ruku_chuku WHERE Caozuo='%s' AND Kehu='%s' \")% (Sout,Kehu),Data=\"None\",Database=\"sdc\",NumberOfRow=0)\n# with open(LogPath, 'w', newline='') as CSV:\n# Writer = csv.DictWriter(CSV, fieldnames=Keys)\n# # 写入列名称(字典的键)\n# Writer.writeheader()\n# for Data in StockOutData:\n# Time = datetime.datetime.strptime(Data[2], \"%Y-%m-%d\").strftime(\"%Y%m%d\")\n# Values = ['X',Data[0],Time,'-1','-1','-1','-1','-1',Data[3],Data[5],Data[6],Data[7]]\n# Dict = dict(zip(Keys, Values))\n# Writer.writerow(Dict)\n\n\nclass Calculat():\n\n def ReverseStockTrend(self):\n initDaySource = '2017-12-30'\n initDay = time.mktime(time.strptime(initDaySource, \"%Y-%m-%d\"))\n CinitDay = initDay\n StockDOrig = Common.Infra.MariaDB(SQL=(\"SELECT * FROM atg_kucun_12yuemo \"), Data=\"None\", Database=\"sdc\", NumberOfRow=0,\n Host='127.0.0.1', Port=3306, User='root', Password='hsg@123', CharSet='utf8mb4')\n # print(StockDOrig)\n # Bianli ChanpinBianhao\n Keys = ['コード','SKUコード','日付','ステータスID1','ステータスID2','ステータスID3','ステータスID4','ステータスID5','値1','値2','値3','値4']\n LogPath = (\"%s\\Logs\\{%s}Log.csv\" % (Main.CurrentPath, FormatTime.YYYYMMDDHHMMSS(self,time.localtime())))\n Main.LogPath.append(LogPath)\n # 打开文件\n with open(LogPath, 'w', newline='') as CSV:\n Writer = csv.DictWriter(CSV, fieldnames=Keys)\n # 写入列名称(字典的键)\n Writer.writeheader()\n for Row in StockDOrig:\n print(\"+++++++++++++++++++++++++\")\n ChanpinBianma = Row[2]\n ChanpinMingcheng = Row[3]\n Riqi = initDay\n QimoXiangshu = int(float(Row[8]))\n QimoJianshu = int(float(Row[9]))\n QimoZhongliang = int(float(Row[10]))\n QimoTiji = int(float(Row[11]))\n QimoBaozhuangZhongliang = int(float(Row[12]))\n QimoBaozhuangTiji = int(float(Row[13]))\n StockIOD = Common.Infra.MariaDB(\n SQL=(\"SELECT ChanpinBianma,ChanpinMingcheng,Riqi,Jianshu,Zhongliang,\"\n \"Tiji,BaozhuangZhongliang,BaozhuangTiji,Caozuo FROM atp_atg_quanbu_ruku_chuku\"\n \" WHERE ChanpinBianma='%s' \") % ChanpinBianma, Data=\"None\", Database=\"sdc\", NumberOfRow=0,\n Host='127.0.0.1', Port=3306, User='root', Password='hsg@123',CharSet='utf8mb4')\n # Data = ('02010848 ', '导管FE010(G1)', '2017-01-06', 1600, '0', '0.017424', '36.4', '0.017424', '出库')\n # print(Data)\n Biandonzhi = 0\n CompareEachRow = []\n SameRow = []\n DS = sorted(StockIOD, key=lambda Date: Date[2], reverse=True)\n DSS = DS.copy()\n if len(DSS) < 1:pass\n else:\n del DSS[0]\n for EachRow,EachRowB in zip(DS,DSS):\n if EachRow[2] == EachRowB[2]:\n SameRow.append(EachRow)\n else:\n SameRow.append(EachRow)\n Base = []\n KucunRuku= 0\n KucunChuku= 0\n ZhongliangRuku = 0\n ZhongliangChuku = 0\n TijiRuku = 0\n TijiChuku = 0\n BaozhuangZhongliangRuku = 0\n BaozhuangZhongliangChuku = 0\n BaozhuangTijiRuku = 0\n BaozhuangTijiChuku = 0\n if len(SameRow) < 1:\n for Row in SameRow:\n for Element in SameRow[0]:\n Base.append(Element)\n OperationType = Row[-1]\n if OperationType == \"入库\":\n # print(int(Row[3]))\n KucunRuku += int(Row[3])\n ZhongliangRuku += int(Row[4])\n TijiRuku += int(Row[5])\n BaozhuangZhongliangRuku += int(Row[6])\n BaozhuangTijiRuku += int(Row[7])\n elif OperationType == \"出库\":\n # print(int(Row[3]))\n KucunChuku += int(Row[3])\n ZhongliangChuku += int(Row[4])\n TijiChuku += int(Row[5])\n BaozhuangZhongliangChuku += int(Row[6])\n BaozhuangTijiChuku += int(Row[7])\n else:\n for Row in SameRow:\n if len(Base) < 1:\n for Element in SameRow[0]:\n Base.append(Element)\n OperationType = Row[-1]\n if OperationType == \"入库\":\n # print(int(Row[3]))\n KucunRuku += int(Row[3])\n ZhongliangRuku += int(Row[4])\n TijiRuku += int(Row[5])\n BaozhuangZhongliangRuku += int(Row[6])\n BaozhuangTijiRuku += int(Row[7])\n elif OperationType == \"出库\":\n # print(int(Row[3]))\n KucunChuku += int(Row[3])\n ZhongliangChuku += int(Row[4])\n TijiChuku += int(Row[5])\n BaozhuangZhongliangChuku += int(Row[6])\n BaozhuangTijiChuku += int(Row[7])\n else:\n OperationType = Row[-1]\n if OperationType == \"入库\":\n # print(int(Row[3]))\n KucunRuku += int(Row[3])\n ZhongliangRuku += int(Row[4])\n TijiRuku += int(Row[5])\n BaozhuangZhongliangRuku += int(Row[6])\n BaozhuangTijiRuku += int(Row[7])\n elif OperationType == \"出库\":\n # print(int(Row[3]))\n KucunChuku += int(Row[3])\n ZhongliangChuku += int(Row[4])\n TijiChuku += int(Row[5])\n BaozhuangZhongliangChuku += int(Row[6])\n BaozhuangTijiChuku += int(Row[7])\n RiKucunBiandon = KucunChuku+(-KucunRuku)\n RiZhongliangbiandon = ZhongliangChuku+(-ZhongliangRuku)\n RiTijiBiandon = TijiChuku+(-TijiRuku)\n RiBaozhuangZhongliangBiandon = BaozhuangZhongliangChuku+(-BaozhuangZhongliangRuku)\n RibaozhuangTijiBiandon = BaozhuangTijiChuku+(-BaozhuangTijiRuku)\n IOVariation = [QimoJianshu + RiKucunBiandon,QimoZhongliang + RiZhongliangbiandon,QimoTiji + RiTijiBiandon\n ,QimoBaozhuangZhongliang + RiBaozhuangZhongliangBiandon,QimoBaozhuangTiji + RibaozhuangTijiBiandon]\n QimoJianshu = IOVariation[0]\n QimoZhongliang = IOVariation[1]\n QimoTiji = IOVariation[2]\n QimoBaozhuangZhongliang = IOVariation[3]\n QimoBaozhuangTiji = IOVariation[4]\n Time = datetime.datetime.strptime(Base[2], \"%Y-%m-%d\").strftime(\"%Y%m%d\")\n Values = ['X',Base[0],Time,'-1','-1','-1','-1','-1',QimoJianshu,QimoXiangshu,QimoBaozhuangZhongliang,QimoBaozhuangTiji]\n Dict = dict(zip(Keys,Values))\n print(Dict)\n # 循环写入列表中每一个元素到CSV文件\n Writer.writerow(Dict)\n SameRow.clear()\n CompareEachRow.append(EachRowB)\n QimoJianshu = 0\n\nCalculat.ReverseStockTrend(object)\n","sub_path":"Lab/GSDUwithUI/SDC.py","file_name":"SDC.py","file_ext":"py","file_size_in_byte":12094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"145482290","text":"from kafka import KafkaConsumer\nimport json\nimport ast\nfrom keras.models import load_model\nimport pickle\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras import backend as K\nimport tensorflow as tf\nfrom nltk.corpus import stopwords\nimport nltk\nimport string\nimport re\nfrom elasticsearch import Elasticsearch\n\nstop = set(stopwords.words('english'))\nes = Elasticsearch(timeout=30)\nes.indices.create(index='tweet_3')\n\n# import curses\ndef clean_document(doco):\n punctuation = string.punctuation\n punc_replace = ''.join([' ' for s in punctuation])\n doco_link_clean = re.sub(r'http\\S+', '', doco)\n doco_clean_and = re.sub(r'&\\S+', '', doco_link_clean)\n doco_clean_at = re.sub(r'@\\S+', '', doco_clean_and)\n doco_clean_digits = re.sub(r'\\w*\\d\\w*', '', doco_clean_at)\n doco_clean_digits = doco_clean_digits.replace('-', ' ')\n doco_alphas = re.sub(r'\\W +', ' ', doco_clean_digits)\n doco_alphas = re.sub(r'[½¿¯§ã£¹à殮™Ã¢Â°®ˆ©ª±»Œº³·¤¾Ž‡ðŸãƒð]', '', doco_alphas)\n trans_table = str.maketrans(punctuation, punc_replace)\n doco_clean = ' '.join([word.translate(trans_table) for word in doco_alphas.split(' ')])\n doco_clean = doco_clean.split(' ')\n p = re.compile(r'\\s*\\b(?=[a-z\\d]*([a-z\\d])\\1{3}|\\d+\\b)[a-z\\d]+', re.IGNORECASE)\n doco_clean = ([p.sub(\"\", x).strip() for x in doco_clean])\n doco_clean = [word.lower() for word in doco_clean if len(word) > 0]\n doco_clean = ([i for i in doco_clean if i not in stop])\n doco_clean = ([p.sub(\"\", x).strip() for x in doco_clean])\n return doco_clean\n\n\n\nwith open('D:/Numer8/twitter_on_flask/tokenizer_twitter.pkl', 'rb') as tok1:\n tokenizer = pickle.load(tok1)\n\nsentiment_model = load_model('D:/Numer8/twitter_on_flask/sentiment_twitter.h5')\nsentiment_model._make_predict_function()\n\n\ndef test_model(yet):\n tokenized_tweet = tokenizer.texts_to_sequences([yet])\n tweet_padded = pad_sequences(tokenized_tweet, maxlen=25, dtype='int32', value=0)\n tweet_sentiment = sentiment_model.predict(tweet_padded)\n if tweet_sentiment[0][0] >= 0.5:\n return 1\n elif tweet_sentiment[0][0] < 0.5:\n return 0\n\n\n\nconsumer = KafkaConsumer(bootstrap_servers=['localhost:9092'], auto_offset_reset='latest', group_id='twitter_test_1',\n consumer_timeout_ms=10000, value_deserializer=lambda m: json.loads(m.decode('utf-8')))\n\n\ndef get_lat_long(q):\n res = es.search(index='location_latlong', doc_type='loc-type', size=1000,\n body={\"query\": {\"match_phrase\": {\"Address\": q}}}, request_timeout=60)\n return res\n\nconsumer.subscribe('twitter_1')\n# print(consumer)\n\nfor message in consumer:\n # print(message)\n message = (message.value)\n tweet_text = message[0]\n created = message[1]\n location = message[2]\n coordinates = message[3]\n tweet_id = message[4]\n mode = message[5]\n print(\"Tweet\",tweet_text)\n print(\"Datetime\",created)\n print(\"Location\",location)\n print(\"Cordinates\",coordinates)\n print(\"Tweet ID\",tweet_id)\n print(\"Mode\", mode)\n\n try:\n p = get_lat_long(location)\n sample = p['hits']['hits'][0]\n latitude = (sample['_source']['Latitude'])\n longitude = (sample['_source']['Longitude'])\n except Exception as e:\n latitude = 0\n longitude = 0\n print(\"Longitude\",longitude)\n print(\"Latitude\", latitude)\n\n tweet_text_cleaned = clean_document(tweet_text)\n\n sentiment = test_model(tweet_text_cleaned)\n print(\"Sentiment\",sentiment)\n print(\"--------------\")\n\n # mymappings = {\"mappings\": {\"tweet_3\": {\"properties\":}}}\n\n es.indices.put_mapping(\n index=\"tweet_3\",\n doc_type=\"geo_point\",\n body={\n \"properties\": {\n \"geolocation\": {'type': 'geo_point'}\n }\n }\n )\n\n\n\n es.index(index=\"tweet_3\",\n doc_type=\"geo_point\",\n body={\"mode\": mode,\n \"tweet\": tweet_text,\n \"created\": created,\n \"location\": location,\n \"coordinates\": coordinates,\n \"geolocation\": {\n \"lat\": latitude,\n \"lon\": longitude},\n \"sentiment\": sentiment,\n \"ID\": tweet_id})\n\n # es.indices.put_mapping(\n # index=\"tweet_3\",\n # doc_type=\"geo_point\",\n # body= {\n # \"properties\": {\n # \"location_coordinates\": {'type': 'geo_point'}\n # }\n # }\n # )","sub_path":"twitter_consumer.py","file_name":"twitter_consumer.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"234923636","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 13:43:04 2019\n\n@author: Ajinkya Sakhare\n\"\"\"\nimport solver_utils\nimport numpy as np\n\ndef solve(inputmatrix):\n \"\"\"\n This function contains a solution to the data in 4be741c5.json posed by the Abstraction and\n Reasoning Corpus (ARC).\n\n The problem presents an n x m grid, with some rows containing 0-m coloured squares with repetition over a row or colomuns.\n The solution requires the same colour pattern to be first mirrored and then concatenated with the older pattern. which\n in result will create a grid of n x2m.\n \"\"\"\n y = np.array(inputmatrix)#convert input matrix to numpy array\n y_copy=y#Make a copy of existing pattern\n y=y[[2,1,0], :]#swap first and last rows with each other\n return((np.concatenate((y, y_copy))).tolist())#return a concatinated mirrored matrix to the old stored in y_copy\n\n# Use main() from solution_4be741c5.py as template\nif __name__ == \"__main__\":\n data = solver_utils.parse_json_file()\n\n for training in data['train']:\n solver_utils.solve_wrapper(training['input'], solve)\n\n for testing in data['test']:\n solver_utils.solve_wrapper(testing['input'], solve)","sub_path":"src/solution_4c4377d9.py","file_name":"solution_4c4377d9.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"580861786","text":"# -*- coding: utf-8 -*-\nimport json\nimport re\n\n\n'''\nAll methods for manipulating the private messages: \nOpen file that has template\nGive attributes to each PM\nReplace template with actual values using regex/ dictionary\n'''\n\nclass PrivateMessage:\n \n message_location = 'messages.json' # default location\n \n def __init__(self, message_type, replace_dict): \n self.channel = \"\"\n self.timestamp = \"\"\n self.text = self.open_json_messages(message_type)\n self.text = self.replace_string(self.text, replace_dict)\n \n # opens json file; is converted to a dict{str:[list]} \n # input- message_type:str is the dictionary keys\n # returns dictionary list \n def open_json_messages(self, message_type):\n with open(self.message_location) as json_file:\n json_dict = json.load(json_file)\n \n # returns a string, with newline char between eachlist item. \n return '\\n'.join(json_dict[message_type]) \n \n def replace_string_chars(self, message_string, replace_dictionary):\n \n # replace_dictionary = {'String_to_replace': 'new_string', 'String_to_replace_2': 'new_string2'}\n # we want to find keys in the replace_dictionary that are between {{ }} \n # find all {{ }}\n ###########(group1)(group2)(group3)(group4)(group5) group3 = dict key\n regex_string = r'(\\{\\{)( {0,4})(\\w*)( {0,4})(\\}\\})'\n template = re.compile(regex_string)\n regex_match = template.match(message_string)\n while(regex_match):\n \n # get the dictionary value by the group3 match\n # get call rather than using brackets prevents Exception\n insert_text = replace_dictionary.get(regex_match.group(3))\n if insert_text:\n message_string = message_string[:regex_match.start()] + insert_text + message_string[regex_match.end():]\n \n regex_match = template.match(message_string)\n return message_string \n \n \nif __name__ == '__main__':\n test1 = PrivateMessage('new_greeting')\n \n testdict = {'UserName': 'Will', 'TestName': None} \n found_string = '{{ UserName }} {{ TestName }} is this going to work?'\n \n fixed_string = test1.replace_string_chars(found_string, testdict)\n print(found_string)\n print(fixed_string)\n ","sub_path":"message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"588101299","text":"from mlstudiosdk.solution_gallery.solution.SolutionBase import SolutionBase\n\nclass Solution(SolutionBase):\n\n def __init__(self):\n super().__init__()\n self.model()\n\n def model(self):\n\n reader1 = self.myscheme.new_node(\"mlstudiosdk.modules.components.io.reader.Reader\")\n reader1.set_title(\"train_input\")\n\n Text_Extract_Keywords = self.myscheme.new_node('Text_Extract_Keywords.Text_Extract_Keywords')\n Text_Extract_Keywords.set_title('textrank')\n outputwriter = self.myscheme.new_node(\"mlstudiosdk.modules.components.io.writer.Writer\")\n outputwriter.set_title(\"output\")\n eva_visualization = self.myscheme.new_node(\n \"mlstudiosdk.modules.components.visualization.evaluation_matrix.Evaluation\")\n eva_visualization.set_title(\"evaluation_visualization\")\n evaluation_writer = self.myscheme.new_node(\"mlstudiosdk.modules.components.io.writer.JsonWriter\")\n evaluation_writer.set_title(\"evaluation_output\")\n pred_stat_visualization = self.myscheme.new_node(\n \"mlstudiosdk.modules.components.visualization.data_statistics.Statistics\")\n pred_stat_visualization.set_title(\"pred_statistics_visualization\")\n pred_stat_writer = self.myscheme.new_node(\"mlstudiosdk.modules.components.io.writer.JsonWriter\")\n pred_stat_writer.set_title(\"pred_statistics_output\")\n\n# self.myscheme.new_link(reader2, \"Data\", pred_stat_visualization, \"Data\")\n self.myscheme.new_link(pred_stat_visualization, \"Data\", pred_stat_writer, \"Data\")\n\n self.myscheme.new_link(Text_Extract_Keywords, \"Evaluation Results\", eva_visualization, \"Result\")\n self.myscheme.new_link(Text_Extract_Keywords, \"Metric Score\", eva_visualization, \"Metric Score\")\n # self.myscheme.new_link(sentimentModel, \"Metric\", eva_visualization, \"Metric\")\n self.myscheme.new_link(eva_visualization, \"Evaluation\", evaluation_writer, \"Data\")\n\n\n self.myscheme.new_link(reader1, \"Data\", Text_Extract_Keywords, \"Train Data\")\n# self.myscheme.new_link(reader2, \"Data\", Text_Extract_Keywords, \"Test Data\")\n self.myscheme.new_link(Text_Extract_Keywords, \"News\", outputwriter, \"Data\")\n","sub_path":"Automatically extract Keyword&Topics/code/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"237507096","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom .models import *\nfrom dal import autocomplete\n\nclass InstitutionForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Institution\n\t\texclude = ['created_on', 'updated_on']\n\nclass BlockForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = Block\n\t\texclude = ['created_on', 'updated_on']\n\nclass FloorForm(forms.ModelForm):\n\t\n\t# auto complete\n\tblock = forms.ModelChoiceField(\n\t\t\tqueryset = Block.objects.all(),\n\t\t\twidget = autocomplete.ModelSelect2(url='physical_structure:block_autocomplete')\n\t)\n\n\tclass Meta:\n\t\tmodel = Floor\n\t\texclude = ['created_on', 'updated_on']\n\nclass RoomForm(forms.ModelForm):\n\t# auto complete\n\tfloor = forms.ModelChoiceField(\n\t\t\tqueryset = Floor.objects.all(),\n\t\t\twidget = autocomplete.ModelSelect2(url='physical_structure:floor_autocomplete')\n\t)\n\n\tclass Meta:\n\t\tmodel = Room\n\t\texclude = ['created_on', 'updated_on']\n\n#Reports\nclass ReportRoomsByBlocksForm(forms.ModelForm):\n\t\n\t# auto complete\n\tblock = forms.ModelChoiceField(\n\t\t\tqueryset = Block.objects.all(),\n\t\t\twidget = autocomplete.ModelSelect2(url='physical_structure:block_autocomplete')\n\t)\n\n\t# floor = forms.ModelChoiceField(\n\t# \t\tqueryset = Floor.objects.all(),\n\t# \t\twidget = autocomplete.ModelSelect2(url='physical_structure:floor_autocomplete')\n\t# )\n\n\tclass Meta:\n\t\tmodel = Block\n\t\tfields = ('block',)","sub_path":"reservasalas/physical_structure/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"610547009","text":"import pandas as pd\nall_ages=pd.read_csv(\"all-ages.csv\")\nrecent_grads=pd.read_csv(\"recent-grads.csv\")\nprint(all_ages.head())\nprint(recent_grads.head())\n\n## Create dictionary of each Major_Cat_Graduates\naa_cat_counts = dict()\nrg_cat_counts = dict()\npivot_table_aa=all_ages.pivot_table(index=\"Major_category\",values=\"Total\",aggfunc=np.sum)\naa_cat_counts=pivot_table_aa.to_dict()\npivot_table_rg=recent_grads.pivot_table(index=\"Major_category\",values=\"Total\",aggfunc=np.sum)\nrg_cat_counts=pivot_table_rg.to_dict()\n### Calculate percent of low_wage_graduates from data\ncolumns=list(recent_grads.columns)\nlow_wage_percent=(recent_grads['Low_wage_jobs'].sum())/(recent_grads['Total'].sum())\n\n### Count of Majore for which recent_grads did better in terms of unemployment_rate\n\nmajors = recent_grads['Major'].unique()\n \nrg_lower_count = 0\npivot_rg=recent_grads.pivot_table(index=\"Major\",values=\"Unemployment_rate\")\npivot_aa=all_ages.pivot_table(index=\"Major\",values=\"Unemployment_rate\")\ndict1=pivot_rg.to_dict()\ndict2=pivot_aa.to_dict()\nfor i in dict1:\n if dict1[i] val_eval:\n # Save this model checkpoint\n self.save(save_path, epoch_idx, prefix='best')\n best_eval = val_eval\n else:\n print(f'Avg Loss for epoch:{avg_epoch_loss}')\n if epoch_idx % 10 == 0:\n # Save the model every 10 epochs anyways\n self.save(save_path, epoch_idx)\n\n def eval(self):\n self.model.eval()\n eval_loss = 0\n with torch.no_grad():\n for idx, (img_batch, target_batch) in enumerate(self.val_loader):\n self.optimizer.zero_grad()\n img_batch = img_batch.to(self.device)\n target_batch = target_batch.to(self.device)\n predictions = self.model(img_batch).squeeze()\n loss = self.val_criterion(predictions, target_batch)\n eval_loss += loss.item()\n return eval_loss / len(self.val_loader)\n\n def train_one_epoch(self):\n self.model.train()\n epoch_loss = 0\n tk0 = tqdm(self.train_loader)\n for idx, (img_batch, target_batch) in enumerate(tk0):\n self.optimizer.zero_grad()\n img_batch = img_batch.to(self.device)\n target_batch = target_batch.to(self.device)\n predictions = self.model(img_batch).squeeze()\n loss = self.train_criterion(predictions, target_batch)\n loss.backward()\n self.optimizer.step()\n epoch_loss += loss.item()\n if idx % self.log_step == 0:\n tk0.set_postfix_str(f'Loss at step {idx + 1}: {loss.item()}')\n return epoch_loss / len(self.train_loader)\n\n def save(self, path, epoch_id, prefix=''):\n checkpoint_name = f'chkpt_{epoch_id}'\n path = os.path.join(path, prefix)\n checkpoint_path = os.path.join(path, f'{checkpoint_name}.pt')\n state_dict = {}\n model_state = copy.deepcopy(self.model.state_dict())\n model_state = {k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in model_state.items()}\n optim_state = copy.deepcopy(self.optimizer.state_dict())\n for state in optim_state['state'].values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cpu()\n\n state_dict['model'] = model_state\n state_dict['optimizer'] = optim_state\n state_dict['scheduler'] = self.lr_scheduler.state_dict()\n state_dict['epoch'] = epoch_id + 1\n state_dict['loss_profile'] = self.loss_profile\n\n os.makedirs(path, exist_ok=True)\n for f in os.listdir(path):\n if f.endswith('.pt'):\n os.remove(os.path.join(path, f))\n torch.save(state_dict, checkpoint_path)\n del model_state, optim_state\n gc.collect()\n\n def load(self, load_path):\n state_dict = torch.load(load_path)\n iter_val = state_dict.get('epoch', 0)\n self.loss_profile = state_dict.get('loss_profile', [])\n if 'model' in state_dict:\n print('Restoring Model state')\n self.model.load_state_dict(state_dict['model'])\n\n if 'optimizer' in state_dict:\n print('Restoring Optimizer state')\n self.optimizer.load_state_dict(state_dict['optimizer'])\n # manually move the optimizer state vectors to device\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(device)\n\n if 'scheduler' in state_dict:\n print('Restoring Learning Rate scheduler state')\n self.lr_scheduler.load_state_dict(state_dict['scheduler'])\n\n\nclass VAETrainer(Trainer):\n def train_one_epoch(self):\n self.model.train()\n epoch_loss = 0\n tk0 = tqdm(self.train_loader)\n for idx, (img_batch, _) in enumerate(tk0):\n self.optimizer.zero_grad()\n img_batch = img_batch.to(self.device)\n _, predictions, mu, logvar = self.model(img_batch)\n loss = self.train_criterion(img_batch, predictions, mu, logvar)\n loss.backward()\n self.optimizer.step()\n epoch_loss += loss.item()\n if idx % self.log_step == 0:\n tk0.set_postfix_str(f'Loss at step {idx + 1}: {loss.item()}')\n return epoch_loss/ len(self.train_loader)\n\n def eval(self):\n self.model.eval()\n eval_loss = 0\n with torch.no_grad():\n for idx, (img_batch, _) in enumerate(self.val_loader):\n self.optimizer.zero_grad()\n img_batch = img_batch.to(self.device)\n _, predictions, mu, logvar = self.model(img_batch)\n loss = self.val_criterion(img_batch, predictions)\n eval_loss += loss.item()\n return eval_loss / len(self.val_loader)\n","sub_path":"carla-aebs/utils/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"399442747","text":"import math\n# 函数的基础用法\n\n\ndef my_abs(x):\n \"\"\"\n 求输入值的绝对值\n :param x:\n :return:\n \"\"\"\n # 使用isinstance方法对参数类型进行检测,类型错误则返回异常\n if not isinstance(x, (int, float)):\n raise TypeError('bad operand type')\n if x >= 0:\n return x\n else:\n return -x\n\n\nprint(\"my_abs(-123) :\", my_abs(-123))\n\n\n# 多参数和默认参数:\n# 默认参数可以简化函数的调用。设置默认参数时,有几点要注意:\n# 一是必选参数在前,默认参数在后,否则Python的解释器会报错;\n# 二是如何设置默认参数。\n# 当函数有多个参数时,把变化大的参数放前面,变化小的参数放后面。变化小的参数就可以作为默认参数。\n# 定义默认参数要牢记一点:默认参数必须指向不变对象!不能指定默认参数为数组等,否则在函数中向数组添加、删除等操作会被\n# 保存,使默认参数发生改变\n\n# 坐标点移位函数\ndef move(x, y, step=1, angle=0):\n nx = x + step * math.cos(angle)\n ny = y + step * math.sin(angle)\n return nx, ny\n\n\n# 函数有默认参数时,默认参数可以不输入,默认使用默认值\nprint(\"move(2, 3):\", move(2, 3))\n# 有多个默认参数时,调用的时候,既可以按顺序提供默认参数\nxn, yn = move(2, 3, 3)\nprint(\"nx, ny:\", xn, yn)\n# Python的函数返回多值是返回一个tuple,在语法上,返回一个tuple可以省略括号,而多个变量可以同时接收一个tuple,按位置\n# 赋给对应的值,所以,Python的函数返回多值其实就是返回一个tuple\nt = move(2, 3, 3, 2)\nprint(\"move(2, 3, 3, 2)\", t)\n# 当函数存在多个默认参数时,当不按顺序提供部分默认参数时,需要把参数名写上\nprint(\"move(2, 3, angle=1)\", move(2, 3, angle=1))\n\n\n# 可变参数:\n# 参数个数不确定的函数,可以用可变参数来接受参数\n# 在参数前加‘*’声明这是一个可变参数\n# 在函数内部,可变接收到的是一个tuple\n\n# 多数求和函数\ndef sums(*nums):\n total = 0\n for n in nums:\n total += n\n return total\n\n\n# 直接传入多个参数\nprint(\"sums(1, 2, 3, 4, 5):\", sums(1, 2, 3, 4, 5))\n# 如果已经有一个list或者tuple,可以直接在list或tuple前加‘*’,把集合中所有参数当做可变参数传入函数\nnumbers = [1, 2, 3, 4, 5]\nprint(\"sums[*numbers]:\", sums(*numbers))\n\n\n# 关键字参数:\n# 关键字参数允许你传入0个或任意个含参数名的参数,这些关键字参数在函数内部自动组装为一个dict\n# 在参数前加‘**’声明这是一个关键字参数\n\n# 打印信息函数\ndef person(name, age, **other):\n print(\"name:\", name, \"age:\", age, \"other:\", other)\n\n\n# 只传入必选参数\nperson(\"张三\", 22)\n# 传入关键字参数\nperson(\"张三\", 22, city=\"成都\")\n# 已经存在一个字典,将字典转换为关键字参数传入函数,在字典前加“**”\nextra = {'city': 'Beijing', 'job': 'Engineer'}\nperson(\"张三\", 22, **extra)\n\n\n# 命名关键字参数:\n# 如果要限制关键字参数的名字,就可以用命名关键字参数\n# 命名关键字参数需要一个特殊分隔符*,*后面的参数被视为命名关键字参数\n# 命名关键字参数必须传入参数名\n# 如果函数定义中已经有了一个可变参数,后面跟着的命名关键字参数就不再需要一个特殊分隔符*了\n# 命名关键字参数可以有缺省值,从而简化调用\ndef person(name, age, *args, city=\"ChengDu\", job):\n print(\"name:\", name, \"age:\", age, \"args:\", args, \"city:\", city, \"job:\", job)\n\n\nperson(\"张三\", 22, \"arg1\", \"arg2\", job=\"Engineer\")\n\n\n# 参数组合:\n# 在Python中定义函数,可以用必选参数、默认参数、可变参数、关键字参数和命名关键字参数,这5种参数都可以组合使用。\n# 但是请注意,参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数\n\n","sub_path":"function/function_base.py","file_name":"function_base.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"351764719","text":"#coding=utf-8\n\nfrom message_lib import amap,reply_msg\nimport logging\n\nlogger = logging.getLogger('mylog')\n\n#def add_reply(call, feedback):\n# amap[call] = feedback\n\n#def delete_reply(call):\n# if call in amap.keys():\n# del(amap[call])\n\ndef find_reply(call):\n flag = 0\n call_list = []\n logger.warn(call+'$$$$$$$$$$$$$$$')\n if call in amap.keys():\n flag = 1\n return flag, call_list\n else:\n for k in amap:\n if call in k:\n call_list.append(k)\n if len(call_list) != 0:\n flag = 2\n return flag, call_list\n\ndef show_reply(call):\n flag, list = find_reply(call)\n if flag == 1:\n return amap[call]\n elif flag == 0:\n return reply_msg['not_found']\n else:\n str = '您是否想说:'+list[0]\n for k in list[1:]:\n str += ' or '\n str += k\n str += '?'\n return str\n","sub_path":"weixin/weixin/reply_map.py","file_name":"reply_map.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"113900235","text":"import os\nimport ntpath\nimport time\nfrom .train_util import mkdir, tensor2img, tensor2colorlabel, save_image\nimport scipy.misc\nimport tensorflow as tf\n\n\nclass Visualizer:\n def __init__(self, opt):\n self.opt = opt\n if opt.is_train and opt.save_log:\n self.log_dir = mkdir(os.path.join(opt.checkpoints_dir, opt.dataset))\n self.log_name = os.path.join(opt.checkpoints_dir, opt.dataset, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime('%c')\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n def print_current_errors(self, epoch, batch_id, running_time, lossG, lossD):\n message = '(epoch: %d, iters: %d, time: %.2f sec, lossG: %.3f, lossD: %.3f) ' \\\n % (epoch, batch_id, running_time, lossG, lossD)\n print(message, flush=True)\n if self.opt.save_log:\n with open(self.log_name, 'a') as log_file:\n log_file.write('%s\\n' % message)\n\n def save_train_images(self, epoch, iter, label_images, real_images, generated_images):\n image_dir = mkdir(os.path.join(self.opt.images_dir, self.opt.dataset))\n for i in range(label_images.size()[0]):\n img_name = f'e{epoch}-i{iter}-{i}.png'\n label_img = tensor2colorlabel(label_images[i], self.opt)\n real_img = tensor2img(real_images[i], self.opt)\n generated_img = tensor2img(generated_images[i], self.opt)\n\n label_dir = mkdir(os.path.join(image_dir, 'label'))\n real_dir = mkdir(os.path.join(image_dir, 'real'))\n generate_dir = mkdir(os.path.join(image_dir, 'spade_generate'))\n\n save_image(label_img, label_dir, img_name)\n save_image(real_img, real_dir, img_name)\n save_image(generated_img, generate_dir, img_name)\n print(f'Epoch {epoch}, iter {iter}, generated images saved.', flush=True)\n\n","sub_path":"spade/util/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"486562069","text":"#\n#\n#Problem 6\n#\n#\n\nimport datetime #import some cool/useful modules for handling dates\nimport calendar\n\n# The input string prompter function.\n# Part a\ndef get_date():\n\t\"\"\"Get a date from the user is a specific format\"\"\"\n\t\n\ttoday_date = datetime.date.today()\n\t # gets the number value of the month\n\tmonth_val = int(today_date.strftime(\"%m\")) \n\t# make the abbreviated name of the month corresponding to month_val\n\tmonth_string=calendar.month_abbr[month_val] \n\t# format a date string with desiged 3 letter middlepart\n\tdate_str = today_date.strftime(\"%d{}%Y\").format(month_string)\n\n\tprint('Todays Date is',date_str)\n\n\tcalendar_months=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\twhile True:\n\t\t\tinputstr=input(\"\\nInput a date of your choice in the above format::\\n\")\n\t\t\ttry:\n\t\t\t\t# catch non string-able input\n\t\t\t\tinputstr=str(inputstr)\n\t\t\texcept Exception:\n\t\t\t\tprint('Not a string-able object, try again')\n\t\t\telse:\n\t\t\t\t#split into day month year parts\n\t\t\t\tday=inputstr[0:2]\n\t\t\t\tmonth=inputstr[2:5]\n\t\t\t\tyear=inputstr[5:9]\n\t\t\t\ttry:\n\t\t\t\t\t# catch format errors by asserting formatting\n\t\t\t\t\t# conditions and catching exceptions.\n\t\t\t\t\t#Define a day\n\t\t\t\t\tassert len(day)<=2 and len(day)>0\n\t\t\t\t\tday=int(day)\n\t\t\t\t\tassert day>0 and day<=31\n\t\t\t\t\t#Define a month\n\t\t\t\t\tassert month in calendar_months\n\t\t\t\t\t#Define a year\n\t\t\t\t\tassert len(year)==4\n\t\t\t\t\tyear=int(year)\n\t\t\t\t\t#return input iff its right\n\t\t\t\t\treturn(inputstr)\n\t\t\t\t\tbreak\n\t\t\t\texcept Exception:\n\t\t\t\t\tprint('Incorrect format, try again')\n\n# The input string parser function\n# Part b\ndef parsley(inputstr):\n\t\"\"\"Parses input string for the year, month number, and day of the month \"\"\"\n\tcalendar_months={'Nov': 11, 'Aug': 8, 'Jul': 7, 'Oct': 10, 'Sep': 9, 'Mar': 3, 'Dec': 12, 'May': 5, 'Apr': 4, 'Feb': 2, 'Jan': 1, 'Jun': 6}\n\n\t#Slice out the relevant parts. This will definitely work if inputstr passed the get_date function\n\tday=inputstr[0:2]\n\tmonth=inputstr[2:5]\n\tyear=inputstr[5:9]\n\t#conversions\n\tdaynum=int(day)\n\tmonthnum=calendar_months[str(month)]\n\tyearnum=int(year)\n\n\treturn([yearnum,monthnum,daynum])\n\n# Part c\ndef julian(date_lst):\n\t(year,month,day)=date_lst\n\t#calculate formula values\n\ta=int(year/100)\n\tb=2-a+int(a/4)\n\tjd=int(365.25*(year+4716))+int(30.6001*(month+1))+day+b-1524.5\n\treturn(jd) #the julian day\n\n# Part d\ndef weekday(julianday):\n\tdayindex={'0':'Sunday','1':'Monday','2':'Tuesday','3':'Wednesday','4':'Thursday','5':'Friday','6':'Saturday'}\n\t#calc the remainder\n\tremainder_str=str(int((julianday+1.5)%7))\n\tweekday=dayindex[remainder_str]\n\treturn(weekday)\n\n\nif __name__==\"__main__\":\n\n\t#get the date\n\tparsed=parsley(get_date())\n\t#get the julian day\n\tjulday=julian(parsed)\n\tprint('Julian day Number ::',julday)\n\tdayofweek=weekday(julday)\n\tprint('The corresponding day of the week ::',dayofweek)\n\n\ttoday_date = datetime.date.today()\n\t # gets the number value of the month\n\tmonth_val = int(today_date.strftime(\"%m\")) \n\t# make the abbreviated name of the month corresponding to month_val\n\tmonth_string=calendar.month_abbr[month_val] \n\t# format a date string with desiged 3 letter middlepart\n\tdate_str = today_date.strftime(\"%d{}%Y\").format(month_string)\n\n\t#get today in julian\n\ttodayjulday=julian(parsley(date_str))\n\n\t#print the difference between today and the input day in julian days\n\tprint('Number of days since/to that Julian date ::', todayjulday-julday, ' (in julian days.) ')\n\n#my birthday is 09Oct1995 so I get output\n# Julian day Number :: 2449999.5\n# The corresponding day of the week :: Monday\n# Number of days since/to that Julian date :: 8417.0 in julian days.\n\n\n\n\n","sub_path":"Homework_4/hwk_4_P6.py","file_name":"hwk_4_P6.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"339957022","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 8 11:30:12 2020\n\n@author: eo\n\"\"\"\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Add local path\n\nimport os\nimport sys\n\ndef find_path_to_local(target_folder = \"local\"):\n \n # Skip path finding if we successfully import the dummy file\n try:\n from local.dummy import dummy_func; dummy_func(); return\n except ImportError:\n print(\"\", \"Couldn't find local directory!\", \"Searching for path...\", sep=\"\\n\")\n \n # Figure out where this file is located so we can work backwards to find the target folder\n file_directory = os.path.dirname(os.path.abspath(__file__))\n path_check = []\n \n # Check parent directories to see if we hit the main project directory containing the target folder\n prev_working_path = working_path = file_directory\n while True:\n \n # If we find the target folder in the given directory, add it to the python path (if it's not already there)\n if target_folder in os.listdir(working_path):\n if working_path not in sys.path:\n tilde_swarm = \"~\"*(4 + len(working_path))\n print(\"\\n{}\\nPython path updated:\\n {}\\n{}\".format(tilde_swarm, working_path, tilde_swarm))\n sys.path.append(working_path)\n break\n \n # Stop if we hit the filesystem root directory (parent directory isn't changing)\n prev_working_path, working_path = working_path, os.path.dirname(working_path)\n path_check.append(prev_working_path)\n if prev_working_path == working_path:\n print(\"\\nTried paths:\", *path_check, \"\", sep=\"\\n \")\n raise ImportError(\"Can't find '{}' directory!\".format(target_folder))\n \nfind_path_to_local()\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Imports\n\nimport cv2\nimport numpy as np\n\nfrom local.lib.launcher_utils.configuration_loaders import Reconfigurable_Single_Station_Loader\nfrom local.lib.launcher_utils.video_processing_loops import Station_Processing_Video_Loop\n\nfrom local.lib.ui_utils.display_specification import Input_Display\n\nfrom local.configurables.stations._helper_functions import Zoomed_Station_Display\nfrom local.configurables.stations._helper_functions import Leveled_Data_Display, Boolean_Result_Display\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Define displays\n\nclass Count_Levels_Display(Leveled_Data_Display):\n \n # .................................................................................................................\n \n def __init__(self, layout_index, num_rows, num_columns, initial_display = False,\n lower_level_color = (150, 150, 150),\n upper_level_color = (125, 125, 125),\n display_width = 500,\n display_height = 256):\n \n # Inherit from parent class\n super().__init__(layout_index, num_rows, num_columns, initial_display,\n window_name = \"Count (Levels)\",\n ch1_color = (255, 255, 255),\n minimum_value = 0,\n maximum_value = 1000,\n lower_level_color = lower_level_color,\n upper_level_color = upper_level_color,\n display_width = display_width,\n display_height = display_height)\n \n # .................................................................................................................\n \n def get_levels(self, configurable_ref):\n return (configurable_ref.low_count, configurable_ref.high_count)\n \n # .................................................................................................................\n \n def get_latest_plot_data(self, configurable_ref):\n return configurable_ref._latest_norm_count_int_for_config\n \n # .................................................................................................................\n # .................................................................................................................\n\n\n# =====================================================================================================================\n# =====================================================================================================================\n\n\nclass Station_Display(Zoomed_Station_Display):\n \n # .................................................................................................................\n \n def __init__(self, layout_index, num_rows, num_columns, initial_display = False):\n \n # Inherit from parent class\n super().__init__(layout_index, num_rows, num_columns, initial_display = initial_display)\n \n # .................................................................................................................\n \n def postprocess_cropmasked_frame(self, cropmasked_frame, configurable_ref):\n \n # Get each color channel separately for convenience\n red_ch_img = cropmasked_frame[:, :, 2]\n green_ch_img = cropmasked_frame[:, :, 1]\n blue_ch_img = cropmasked_frame[:, :, 0]\n \n # Get masking based on red channel settings\n low_red = configurable_ref.low_red\n high_red = configurable_ref.high_red\n invert_red = configurable_ref.invert_red\n red_ch_mask = configurable_ref._check_in_range(red_ch_img, low_red, high_red, invert_red)\n \n # Get masking based on green channel settings\n low_green = configurable_ref.low_green\n high_green = configurable_ref.high_green\n invert_green = configurable_ref.invert_green\n green_ch_mask = configurable_ref._check_in_range(green_ch_img, low_green, high_green, invert_green)\n \n # Get masking based on blue channel settings\n low_blue = configurable_ref.low_blue\n high_blue = configurable_ref.high_blue\n invert_blue = configurable_ref.invert_blue\n blue_ch_mask = configurable_ref._check_in_range(blue_ch_img, low_blue, high_blue, invert_blue)\n \n # Create a mask from the separate channels, which we'll apply to the proper color image for display\n combined_mask_1ch = np.uint8(255 * np.bitwise_and(blue_ch_mask, np.bitwise_and(green_ch_mask, red_ch_mask)))\n combined_mask_3ch = cv2.cvtColor(combined_mask_1ch, cv2.COLOR_GRAY2BGR)\n \n return cv2.bitwise_and(cropmasked_frame, combined_mask_3ch)\n \n # .................................................................................................................\n # .................................................................................................................\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Main\n\n# For clarity\ntarget_script_name = \"contains_target_rgb_station\"\n\n# Make all required selections\nloader = Reconfigurable_Single_Station_Loader(target_script_name)\narg_selections = loader.parse_standard_args()\nloader.selections(*arg_selections)\n\n# Set up video capture, processing stages & playback control\nconfigurable_ref = loader.setup_all(__file__)\n\n# Get drawing specification for the given edge decay variable\nzone_drawing_spec = configurable_ref.get_drawing_spec(\"station_zones_list\")\n\n# Set up object to handle all video processing\nmain_process = \\\nStation_Processing_Video_Loop(loader,\n ordered_display_list = [Boolean_Result_Display(1, 1, 4),\n Count_Levels_Display(0, 4, 1),\n Station_Display(2, 3, 3),\n Input_Display(1, 4, 1,\n window_name = \"Draw Station Zones\",\n drawing_json = zone_drawing_spec)])\n\n# Most of the work is done here!\nmain_process.loop()\n\n# Ask user to save config\nloader.ask_to_save_configurable_cli(__file__, configurable_ref)\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% For debugging\n\n# Access results for debugging\nfinal_frame = main_process.debug_frame\nfinal_fed_time_args = main_process.debug_fed_time_args\ndebug_dict = main_process.debug_dict\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n#%% Scrap\n\n","sub_path":"configuration_utilities/stations/contains_target_rgb.py","file_name":"contains_target_rgb.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"143009268","text":"import copy\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Rectangle\nimport numpy as np\n\ndef _remove_spines(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n\ndef _framed_pic(ax, img):\n \"\"\"\n Display a black-and-white image in a tranparent gray frame.\n \"\"\"\n if len(img.shape)==1 or img.shape[1]==1:\n ax.imshow(img.reshape((np.sqrt(img.size), np.sqrt(img.size))), \n cmap='Greys', interpolation='nearest', vmin=0, vmax=1)\n else:\n ax.imshow(img, cmap='Greys', interpolation='nearest', \n vmin=0, vmax=1)\n ax.add_patch(Rectangle((0,0), 1, 1, transform=ax.transAxes, \n fc='none', lw=2, edgecolor='black', alpha=0.2))\n\ndef _plot_column(figH, x0, start_xs, end_xs, \n im_shape, txt, clr1, clr2=None, freqs=None):\n \"\"\"\n Plot a column of original and manipulated images.\n \"\"\"\n # Color pallette \n ALPHA_solid=1.\n ALPHA_trans=0.2\n BLACK = (0, 0, 0)\n # Define the dimensions of the column.\n\n W_im = 0.09 # Image side len\n H_major = 0.1 # Total height of two-image row\n x = (0.25-2*W_im)/3 # Horizontal margin\n y = (H_major-W_im)/2 # Vertical margin\n W = 2*W_im+x # Width of two-image column \n H_minor = H_major-2*y # Two-image row heigth without margins\n assert H_minor==W_im, 'Wrong dimensions!'\n H_title = 0.666*H_minor\n H_bar = H_minor-H_title-2*y\n\n ax_title=figH.add_axes([x0+x, (1-H_major)+2*y+H_bar, W, H_title])\n plt.axis('off')\n ax_title.add_patch(Rectangle((0, 0), 1, 1, \n transform=ax_title.transAxes, fc=clr1, edgecolor='black',\n alpha=ALPHA_solid))\n ax_title.text(0.5, 0.4, txt, fontsize=20, ha='center', va='center')\n \n if freqs is not None:\n ax_bar=figH.add_axes([x0+x, (1-H_major)+y, 2*W+2*x, H_bar])\n ax_bar.yaxis.set_visible(False)\n ax_bar.set_xlim(0,1)\n ax_bar.set_xticks(np.linspace(0,1,6))\n ax_bar.set_xticklabels([])\n ax_bar.add_patch(Rectangle((0,0), freqs[0], 1,\n transform=ax_bar.transAxes, fc=clr1, edgecolor='none',\n alpha=ALPHA_solid))\n ax_bar.add_patch(Rectangle((freqs[0],0), 1-freqs[0], 1,\n transform=ax_bar.transAxes, fc=clr2, edgecolor='none',\n alpha=ALPHA_solid))\n \n for row_id in range(start_xs.shape[0]):\n # Plot the original image.\n ax_img = figH.add_axes([x0+x, (1-H_major*(row_id+2))+y, \n W_im, H_minor])\n plt.axis('off')\n _framed_pic(ax_img, start_xs[row_id])\n \n # Plot the manipulated image.\n ax_img = figH.add_axes([x0+x+W_im+x, (1-H_major*(row_id+2))+y, \n W_im, H_minor])\n plt.axis('off')\n _framed_pic(ax_img, end_xs[row_id])\n","sub_path":"Experiments/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"440451873","text":"import tkinter as tk\r\nimport tkinter.font as tf\r\nimport control.Controller as Con\r\nimport accessories.Accessories as Acc\r\nimport accessories.MessageBoxes as MB\r\nimport gui.commonGUIClasses.GroupsGUI as Gr\r\n\r\n\r\nclass UpdateStore(tk.Frame):\r\n def __init__(self, master, controller: Con.Controller, *args, **kwargs):\r\n tk.Frame.__init__(self, master, *args, **kwargs)\r\n\r\n self.parent = master\r\n self.controller = controller\r\n self.font = tf.Font(family='Helvetica', size=18, weight='bold')\r\n\r\n self.groupsFrame = Gr.GroupsGUI(self, self.controller)\r\n\r\n self.areaLabel = tk.Label(self, text=Acc.area, bg=Acc.green, font=self.font, fg=Acc.white)\r\n self.areaEntry = tk.Entry(self, font=self.font, justify='center')\r\n self.areaEntry.bind(Acc.enterKey, self.resume)\r\n\r\n self.button = tk.Button(self, relief=tk.GROOVE, text=Acc.ok,\r\n command=self.resume, bg=Acc.green, font=self.font)\r\n self.configureParts()\r\n\r\n def resume(self, event=None):\r\n if self.check():\r\n if MB.confirmation():\r\n self.groupsFrame.setArea(self.getArea())\r\n self.clear()\r\n\r\n def check(self):\r\n if not self.groupsFrame.check():\r\n return False\r\n if not self.groupsFrame.checkArea(self.getArea()):\r\n return False\r\n return True\r\n\r\n def configureParts(self):\r\n self.groupsFrame.grid(row=0, column=0, columnspan=2, sticky=\"news\")\r\n\r\n self.areaEntry.grid(row=1, column=0, sticky=\"ew\")\r\n self.areaLabel.grid(row=1, column=1, sticky=\"ew\")\r\n\r\n self.button.grid(row=2, column=0, columnspan=2, sticky=\"ew\")\r\n\r\n self.columnconfigure(0, weight=2)\r\n self.columnconfigure(1, weight=1)\r\n\r\n for i in range(3):\r\n self.rowconfigure(i, weight=1)\r\n\r\n def getArea(self):\r\n return self.areaEntry.get().strip()\r\n\r\n def clear(self):\r\n self.areaEntry.delete(0, 'end')\r\n self.groupsFrame.clear()\r\n","sub_path":"persons/gui/private/UpdateStore.py","file_name":"UpdateStore.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"544554325","text":"#!/usr/bin/python\r\n#-*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport sys\t\t\t\t\t\t\t\r\nimport io\t\t\t\t\t\t\t\r\nimport time\t\t\t\t\t\t\t\r\nimport datetime\t\t\t\t\t\t\r\nimport configparser\t\t\t\t\t\r\nimport logging\t\t\t\t\t\t\r\nimport codecs\t\t\t\t\t\t\r\nimport re\t\t\t\t\t\t\t\r\nimport pymysql\t\t\t\t\t\t\r\nimport pandas as pd\t\t\t\t\t\r\nimport xlsxwriter\r\nimport xlrd\r\n\r\n\"\"\" 创建一个从本地拉取excel指定特征工作簿的类 \"\"\"\r\nclass Fetch(object) :\r\n\tdef __init__(self) :\r\n\t\tself.config = configparser.RawConfigParser()\r\n\t\tself.config.read('./config.cfg')\r\n\t\tif not os.path.exists('./Log'):\r\n\t\t\tos.makedirs('./Log')\r\n\t\tlogging.basicConfig(filename='./Log/'+datetime.datetime.today().strftime(\"%Y%m%d\")+'.log'\r\n\t\t\t, level=logging.INFO\r\n\t\t\t, format='%(asctime)s %(message)s'\r\n\t\t\t, datefmt='%Y/%m/%d %I:%M:%S %p')\r\n\r\n\tdef findpath(self,locatedri,filetpye,filestring = \".\"):\r\n\t\tprint('findpath {0} Start'.format(filestring))\r\n\t\tlogging.info('findpath {0} Start'.format(filestring))\r\n\t\tTotalFiles = 0\r\n\t\tSuccessFiles = 0\r\n\t\tfor root, _, files in os.walk('{}'.format(locatedri)): \r\n\t\t\tfor onefile in files:\r\n\t\t\t\ttry :\r\n\t\t\t\t\tfilecompile = re.compile('{}'.format(filestring))\r\n\t\t\t\t\tfindlength = len(filecompile.findall(onefile))\r\n\t\t\t\t\tif onefile.lower().endswith('.{}'.format(filetpye)) and findlength > 0:\r\n\t\t\t\t\t\tTotalFiles+= 1\r\n\t\t\t\t\t\tfilepath = os.path.join(root, onefile)\r\n\t\t\t\t\t\tSuccessFiles+= 1\r\n\t\t\t\texcept Exception as inst:\r\n\t\t\t\t\tprint(\"find filepath fail\")\r\n\t\t\t\t\tprint(inst)\r\n\t\tprint(\"Total Number of Files: \"+ str(TotalFiles) +\"; Success Files: \" +str(SuccessFiles))\r\n\t\tlogging.info(\"Total Number of Files: \"+ str(TotalFiles) +\"; Success Files: \" +str(SuccessFiles))\r\n\r\n\t\treturn filepath\r\n\r\n\tdef getdataframe(self,filepath,sheetname) :\r\n\t\tprint('getdataframe {0} Start'.format(sheetname))\r\n\t\tlogging.info('getdataframe {0} Start'.format(sheetname))\r\n\r\n\t\texcel_reader = pd.ExcelFile(filepath)\r\n\t\tsheet_names = excel_reader.sheet_names\r\n\t\tfilecompile = re.compile('{}'.format(sheetname))\r\n\t\ttargetsheet = []\r\n\t\tfor i in range(len(sheet_names)) :\r\n\t\t\tfindlength = len(filecompile.findall(sheet_names[i].strip().upper()))\r\n\t\t\tif findlength > 0 :\r\n\t\t\t\ttargetsheet.append(i)\r\n\t\tdf = pd.read_excel(filepath,targetsheet[0],header = 0)\r\n\t\tnewcol_name = ['No','meterid','MeterType','is_calculation','device_calculation','Spec','factory','building','Plant1','Plant2','share',\r\n\t\t\t\t\t\t'consum_type','area','floor','group','line','pd_line_meter','device','line_area','calculation_desc']\r\n\t\tdf.columns = newcol_name\r\n\r\n\t\treturn df\r\n","sub_path":"fetchexcel.py","file_name":"fetchexcel.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"403205739","text":"from sklearn import svm\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import metrics\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score\n\n\ndef train(train_filename, test_filename):\n train_file = open(train_filename, 'r')\n output_file = open(test_filename + '_svm_output', 'w')\n test_file = open(test_filename, 'r')\n\n train_utt, train_label = [], []\n test_utt, test_label = [], []\n\n for line in train_file:\n utt, label = line.strip().split('\\t')\n train_utt.append(utt)\n train_label.append(label)\n\n for line in test_file:\n utt, label = line.strip().split('\\t')\n test_utt.append(utt)\n test_label.append(label)\n\n vectorizer = CountVectorizer(ngram_range=(1, 2))\n train_features = vectorizer.fit_transform([utt for utt in train_utt])\n test_features = vectorizer.transform([utt for utt in test_utt])\n\n svmc = svm.SVC(kernel='linear', C=1, probability=True)\n\n scores = cross_val_score(svmc, train_features, train_label, cv=3)\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\n svmc.fit(train_features, [str(labels) for labels in train_label])\n\n predictions = svmc.predict(test_features)\n probabilities = svmc.predict_proba(test_features)\n\n # for pred, prob in zip(predictions, probabilities):\n # pred_idx = 0 if pred == -1 else 1\n # # print(prob[pred_idx]), pred, prob\n # if (prob[pred_idx] >= 0.98):\n # if(pred_idx == 0):\n # output_file.write('-1' + '\\n')\n # else:\n # output_file.write('1' + '\\n')\n # else:\n # output_file.write('0' + '\\n')\n\n test_acc = 0.0\n total_correct = 0.0\n\n for idx, pred in enumerate(predictions):\n if (pred == test_label[idx]):\n total_correct += 1\n else:\n print(test_utt[idx], pred, test_label[idx])\n output_file.write(str(pred) + '\\n')\n\n print(total_correct / len(test_label))\n train_file.close()\n test_file.close()\n output_file.close()\n\n # fpr, tpr, thresholds = metrics.roc_curve(np.asarray(test_label), predictions, pos_label=1)\n # print(\"Multinomial naive bayes AUC: {0}\".format(metrics.auc(fpr, tpr)))\n\n\nroot_data_dir = 'path_to_root_dir'\ntrain(root_data_dir + 'path_to_trainset', root_data_dir + 'path_to_testset')\n","sub_path":"classifiers/svm_classifier.py","file_name":"svm_classifier.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"58453936","text":"class FixedSizeHeap:\n def __init__(self, max_size):\n self.heap = [0]\n self.size = 0\n self.max_size = max_size\n \n def is_empty(self):\n return self.size == 0 \n \n def perc_up(self, i):\n while i//2 > 0:\n parent = i // 2\n if self.heap[i] < self.heap[parent]:\n temp = self.heap[parent]\n self.heap[parent] = self.heap[i]\n self.heap[i] = temp\n i = parent\n \n def perc_down(self, i):\n while (i*2) <= self.size:\n min_child = self.min_child(i)\n if self.heap[i] > self.heap[min_child]:\n temp = self.heap[i]\n self.heap[i] = self.heap[min_child]\n self.heap[min_child] = temp\n i = min_child\n\n def min_child(self, i):\n if i * 2 + 1 > self.size:\n return i * 2\n else:\n if self.heap[i*2] < self.heap[i*2+1]:\n return i*2\n else:\n return i*2 + 1\n \n def insert(self, k):\n self.k = k\n self.heap.append(k)\n self.size += 1\n self.perc_up(self.size) \n if self.size > self.max_size:\n self.del_max()\n \n \n def find_min(self):\n return self.heap[1]\n \n def del_min(self):\n return_value = self.heap[1]\n self.heap[1] = self.heap[self.size]\n self.heap.pop()\n self.size -= 1\n self.perc_down(1)\n return return_value\n\n def del_max(self):\n self.heap.remove(max(self.heap))\n self.size -= 1\n return heap\n \n \nif __name__ =='__main__':\n heap = FixedSizeHeap(5)\n heap.insert(6)\n heap.insert(4)\n heap.insert(8)\n heap.insert(2)\n heap.insert(7)\n print(heap.heap)\n heap.insert(5)\n print(heap.heap)\n heap.del_min()\n print(heap.heap)\n heap.insert(9)\n print(heap.heap)\n heap.insert(8)\n print(heap.heap)\n \n \n \n'''\n1. The big-O notation of the algorithm is O(n) because I use the remove method and it remove the last element.\n2. If the problem is removing the smallest number, change the self.heap.remove(min(self.heap)) which will remove the minimum value.\n'''","sub_path":"2014 summer/lab#9.py","file_name":"lab#9.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"489864637","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# COPYRIGHT (C) NEC CORPORATION 2016\n\nimport logging\nimport six\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import forms\nfrom horizon import messages\nfrom openstack_dashboard.api import base\n\nfrom nec_portal.api.cloudify import Cloudify as cloudify_api\n\n\nLOG = logging.getLogger(__name__)\n\nTOSCA_TEMPLATES_LIST_URL = 'project/tosca_templates/index.html'\nTOSCA_DEPLOYMENTS_LIST_URL = 'project/tosca_deployments/index.html'\n\n\nclass BuildParameterFieldMixin(object):\n \"\"\"Methods in this class are copying some implementation from other panel.\n Because this panel has to work without other panels.\n So, basically we don't have to change methods in this class\n in order to reflect original changes easily.\n \"\"\"\n def _build_parameter_fields(self, parameters):\n \"\"\"Create input field object at form.\n :param parameters: (dict)\n \"\"\"\n self.fields['deployment_id'] = forms.CharField(\n label=_('Deployment Name'),\n max_length=255,\n required=True,\n help_text=_('Input deployment name.'))\n\n for key, value in parameters.iteritems():\n field = None\n\n field_key = self.param_prefix + key\n # Set common field properties\n field_args = {\n 'initial': value.get('default', None),\n 'label': key,\n 'help_text': value.get('description', None),\n }\n param_type = value.get('type', None)\n\n # Create a field object\n if param_type == 'integer':\n field = forms.IntegerField(**field_args)\n\n elif param_type == 'boolean':\n field = forms.BooleanField(**field_args)\n\n else:\n field = forms.CharField(**field_args)\n\n if field:\n self.fields[field_key] = field\n\n\nclass CreateForm(forms.SelfHandlingForm, BuildParameterFieldMixin):\n param_prefix = '__param_'\n url_param_prefix = '__url_param_'\n\n def __init__(self, *args, **kwargs):\n super(CreateForm, self).__init__(*args, **kwargs)\n\n initial = kwargs['initial']\n\n # Create url kwargs fields.\n url_kwargs = initial.pop('url_kwargs')\n self._build_url_kwargs_fields(url_kwargs)\n\n # Create parameter fields.\n parameters = initial.pop('parameters')\n self._build_parameter_fields(parameters)\n\n def _build_url_kwargs_fields(self, url_kwargs):\n \"\"\"Create hidden field object at form.\"\"\"\n for key, value in url_kwargs.items():\n field = forms.CharField(widget=forms.HiddenInput())\n field.initial = value\n self.fields[self.url_param_prefix + key] = field\n\n def _get_url_kwargs_fields(self, data):\n prefix_length = len(self.url_param_prefix)\n return {\n k[prefix_length:]: v for k, v in six.iteritems(data)\n if k.startswith(self.url_param_prefix)\n }\n\n def _get_params_list(self, data):\n \"\"\"Get input parameter values on html submit form\n :param data: Handle data\n \"\"\"\n prefix_length = len(self.param_prefix)\n return {\n k[prefix_length:]: v for (k, v) in six.iteritems(data)\n if k.startswith(self.param_prefix)\n }\n\n def handle(self, request, data):\n url_kwargs_list = self._get_url_kwargs_fields(data)\n params_list = self._get_params_list(data)\n for key, value in url_kwargs_list.items():\n if key != 'template_id':\n params_list[key] = value\n\n try:\n cloudify_api(\n url_for_orchestration=(\n base.url_for(self.request, 'orchestration')\n ),\n token_id=self.request.user.token.id\n ).create_deployment(\n url_kwargs_list['template_id'],\n data.get('deployment_id'),\n params_list\n )\n except Exception as e:\n LOG.error(e)\n messages.error(\n request,\n _('An error has occurred while processing your request.'))\n\n messages.success(request, _('Succeeded to registrate a request.'))\n\n return True\n","sub_path":"nec_portal/dashboards/project/tosca_deployments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"447272112","text":"#!/usr/bin/python\r\n\r\nimport sys\r\nfrom time import time\r\nimport logging\r\nsys.path.append(\"../DatasetProcessing/\")\r\nfrom vectorize_split_dataset import preprocess\r\n\r\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\r\n\r\n##Divide Dataset into Test vs Train\r\nfeatures_train, features_test, labels_train, labels_test = preprocess()\r\n\r\n#########################################################\r\nfrom sklearn.naive_bayes import GaussianNB\r\nclf = GaussianNB()\r\nt0 = time()\r\nclf.fit(features_train, labels_train)\r\npred = clf.predict(features_test)\r\nprint(\"training time:\", round(time()-t0, 3), \"s\")\r\nprint(clf.score(features_test, labels_test))\r\n\r\n##Printing Metrics for Training and Testing\r\nprint(\"------------------------------------------------------\")\r\nprint(\"No. of Testing Features:\"+str(len(features_test)))\r\nprint(\"No. of Testing Features Label:\"+str(len(labels_test)))\r\nprint(\"No. of Training Features:\"+str(len(features_train)))\r\nprint(\"No. of Training Features Label:\"+str(len(labels_train)))\r\nprint(\"No. of Predicted Features:\"+str(len(pred)))\r\nprint(\"------------------------------------------------------\")\r\n\r\n### Saving model METHOD 2###\r\n# from sklearn.externals import joblib\r\n# model_file = \"./createdModel/naiveBayesModel.pkl\"\r\n# joblib.dump(clf, model_file)\r\n# print(\"classifier saved!!!!!!\")\r\n# clf1 = joblib.load(model_file)\r\n# print(\"classifier loaded!!!!!\")\r\n# pred = clf.predict(features_test)\r\n# print(clf.score(features_test, labels_test))\r\n\r\n##Calculating Classifier Performance Metrics\r\nfrom sklearn.metrics import classification_report\r\ny_true = labels_test\r\ny_pred = pred\r\nlabels = ['0','1']\r\ntarget_names = ['class 0', 'class 1']\r\nprint(classification_report(y_true, y_pred, target_names=target_names, labels=labels))\r\nprint(\"------------------------------------------------------\")\r\n\r\nfrom vectorize_split_dataset import preprocessLine\r\narrayTest = []\r\n\r\ninputPath=\"../ValidationData/validate_start.txt\"\r\noutputPath=\"../ValidationData/validate_completed.txt\"\r\nprint(\"Labelling Data\")\r\n##Opening Text file\r\noutput_file = open(outputPath, \"w\")\r\nfp = open(inputPath, 'r')\r\nline = fp.readline()\r\nwhile line:\r\n #print(line)\r\n line = fp.readline()\r\n pLine = line.strip()\r\n if(pLine != \"\"):\r\n del arrayTest[:]\r\n arrayTest.append(pLine)\r\n features_tobelabelled = preprocessLine(arrayTest)\r\n producedLabel = clf.predict(features_tobelabelled)[0]\r\n #print(producedLabel)\r\n print(producedLabel+\"\\t\"+pLine)\r\n output_file.write(producedLabel+\"\\t\"+pLine+\"\\n\")\r\n #text_file.write(\"$$$$$\\n\")\r\n#end loop\r\nfp.close()\r\noutput_file.close()\r\n\r\n\r\n","sub_path":"RCAAnalysis1/Algorithms/NB1.py","file_name":"NB1.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"98419020","text":"# encoding: utf-8\nimport pytest\n\nfrom wellcomeml.ml.bert_vectorizer import BertVectorizer\n\nEMBEDDING_TYPES = [\n \"mean_second_to_last\",\n \"mean_last\",\n \"sum_last\",\n \"mean_last_four\",\n \"pooler\"\n]\n\n\n@pytest.fixture\ndef vec(scope='module'):\n vectorizer = BertVectorizer()\n\n vectorizer.fit()\n return vectorizer\n\n\n@pytest.mark.bert\ndef test_fit_transform_works(vec):\n X = [\"This is a sentence\"]\n\n assert vec.fit_transform(X).shape == (1, 768)\n\n\n@pytest.mark.bert\ndef test_embed_two_sentences(vec):\n X = [\n \"This is a sentence\",\n \"This is another one\"\n ]\n\n for embedding in EMBEDDING_TYPES:\n vec.sentence_embedding = embedding\n X_embed = vec.transform(X, verbose=False)\n assert X_embed.shape == (2, 768)\n\n\n@pytest.mark.bert\ndef test_embed_long_sentence(vec):\n X = [\"This is a sentence\"*500]\n\n for embedding in EMBEDDING_TYPES:\n vec.sentence_embedding = embedding\n X_embed = vec.transform(X, verbose=False)\n assert X_embed.shape == (1, 768)\n\n\n@pytest.mark.bert\ndef test_embed_scibert():\n X = [\"This is a sentence\"]\n vec = BertVectorizer(pretrained='scibert')\n vec.fit()\n\n for embedding in EMBEDDING_TYPES:\n vec.sentence_embedding = embedding\n X_embed = vec.transform(X, verbose=False)\n assert X_embed.shape == (1, 768)\n\n\n@pytest.mark.skip(\"Reason: Build killed or stalls. Issue #200\")\ndef test_save_and_load(tmpdir):\n tmpfile = tmpdir.join('test.npy')\n\n X = [\"This is a sentence\"]\n for pretrained in ['bert', 'scibert']:\n for embedding in EMBEDDING_TYPES:\n vec = BertVectorizer(\n pretrained=pretrained,\n sentence_embedding=embedding\n )\n X_embed = vec.fit_transform(X, verbose=False)\n\n vec.save_transformed(str(tmpfile), X_embed)\n\n X_loaded = vec.load_transformed(str(tmpfile))\n\n assert (X_loaded != X_embed).sum() == 0\n","sub_path":"tests/test_bert_vectorizer.py","file_name":"test_bert_vectorizer.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"136397912","text":"from buildbot.config import BuilderConfig\nfrom buildbot.plugins import steps, util\nfrom maxscale import workers\nfrom maxscale.config import constants\nfrom .support import common\n\n\ndef createBuildfactory():\n factory = util.BuildFactory()\n factory.addSteps(common.cloneRepository())\n factory.addStep(steps.ShellCommand(\n name=util.Interpolate(\"Register in the Docker Registry %(prop:dockerRegistryUkrl)s\"),\n command=[\"docker\", \"login\", util.Property(\"dockerRegistryUrl\"),\n \"--username\", constants.DOCKER_REGISTRY_USER_NAME,\n \"--password\", util.Secret(\"dockerRegistryPassword\")\n ],\n haltOnFailure=True\n ))\n factory.addSteps(common.downloadAndRunScript(\n name=util.Interpolate(\"Build docker image for %(prop:target)s\"),\n scriptName=\"build_maxscale_docker_image.py\",\n args=[\n \"--product\", util.Property(\"mdbciProductName\"),\n \"--product-version\", util.Property(\"target\"),\n \"--name\", util.Property(\"dockerProductName\"),\n \"--tag\", util.Property(\"target\"),\n \"--registry\", util.Property(\"dockerRegistryUrl\")\n ],\n workdir=util.Interpolate(\"%(prop:builddir)s/build/maxscale/\"),\n ))\n return factory\n\n\nBUILDERS = [\n BuilderConfig(\n name=\"build_docker_image\",\n workernames=workers.workerNames(),\n factory=createBuildfactory(),\n nextWorker=common.assignWorker,\n tags=[\"BUILD\"],\n collapseRequests=False\n )\n]\n","sub_path":"master/maxscale/builders/build_docker_image.py","file_name":"build_docker_image.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"162447685","text":"# wap to remove duplicate elements from a list\n\na = [10,30,40,60,70,30]\nb = []\n\nfor i in a :\n if i not in b:\n b.append(i)\nprint(a)\nprint(b)\n","sub_path":"com/org/comp/pgm48b.py","file_name":"pgm48b.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"526492297","text":"# -*- coding: utf-8 -*-\n\"\"\" Module with all nececcary functions for the recipes Tab.\nThis includes all functions for the Lists, DB and Buttos/Dropdowns.\n\"\"\"\n\nfrom collections import Counter\nfrom typing import List, Tuple\n\nfrom src import maker\n\nfrom src.display_controller import DP_CONTROLLER\nfrom src.database_commander import DB_COMMANDER\nfrom src.error_handler import logerror\nfrom src.models import Ingredient\nfrom src.config_manager import shared\n\n\ndef fill_recipe_box_with_ingredients(w):\n \"\"\" Asigns all ingredients to the Comboboxes in the recipe tab \"\"\"\n comboboxes_recipe = DP_CONTROLLER.get_comboboxes_recipes(w)\n ingredient_list = [x.name for x in DB_COMMANDER.get_all_ingredients(get_hand=False)]\n DP_CONTROLLER.fill_multiple_combobox(comboboxes_recipe, ingredient_list, clear_first=True)\n\n\ndef __check_enter_constraints(recipe_name: str, newrecipe: bool) -> Tuple[int, bool]:\n \"\"\"Checks if either the recipe already exists (new recipe) or if one is selected (update)\n Returns cocktail, got_error\"\"\"\n cocktail = DB_COMMANDER.get_cocktail(recipe_name)\n if cocktail is not None and newrecipe:\n DP_CONTROLLER.say_name_already_exists()\n return cocktail.id, True\n if cocktail is None:\n return 0, False\n return cocktail.id, False\n\n\ndef __validate_extract_ingredients(ingredient_names: List[str], ingredient_volumes: List[int]) -> Tuple[List[str], List[int], bool]:\n \"\"\"Gives a list for names and volumens of ingredients.\n If some according value is missing, informs the user.\n Returns [names], [volumes], is_valid\"\"\"\n names, volumes = [], []\n for name, volume in zip(ingredient_names, ingredient_volumes):\n if (name == \"\" and volume != \"\") or (name != \"\" and volume == \"\"):\n DP_CONTROLLER.say_some_value_missing()\n return [], [], False\n if name != \"\":\n names.append(name)\n volumes.append(volume)\n if len(names) == 0:\n DP_CONTROLLER.say_recipe_at_least_one_ingredient()\n return [], [], False\n conter_names = Counter(names)\n double_names = [x[0] for x in conter_names.items() if x[1] > 1]\n if len(double_names) != 0:\n DP_CONTROLLER.say_ingredient_double_usage(double_names[0])\n return [], [], False\n try:\n volumes = [int(x) for x in volumes]\n except ValueError:\n DP_CONTROLLER.say_needs_to_be_int()\n return [], [], False\n return names, volumes, True\n\n\ndef __enter_or_update_recipe(recipe_id, recipe_name, recipe_volume, recipe_alcohollevel, enabled, ingredient_data: List[Ingredient], comment):\n \"\"\"Logic to insert/update data into DB\"\"\"\n if recipe_id:\n DB_COMMANDER.delete_recipe_ingredient_data(recipe_id)\n DB_COMMANDER.set_recipe(recipe_id, recipe_name, recipe_alcohollevel, recipe_volume, comment, enabled)\n else:\n DB_COMMANDER.insert_new_recipe(recipe_name, recipe_alcohollevel, recipe_volume, comment, enabled)\n cocktail = DB_COMMANDER.get_cocktail(recipe_name)\n for ingredient in ingredient_data:\n is_alcoholic = int(ingredient.alcohol > 0)\n DB_COMMANDER.insert_recipe_data(cocktail.id, ingredient.id, ingredient.amount,\n is_alcoholic, ingredient.recipe_hand)\n return cocktail\n\n\n@logerror\ndef enter_recipe(w, newrecipe: bool):\n \"\"\" Enters or updates the recipe into the db\"\"\"\n recipe_input = DP_CONTROLLER.get_recipe_field_data(w)\n recipe_name, selected_name, ingredient_names, ingredient_volumes, enabled, comment = recipe_input\n if not recipe_name:\n DP_CONTROLLER.say_enter_cocktailname()\n return\n if not newrecipe and not selected_name:\n DP_CONTROLLER.say_no_recipe_selected()\n return\n names, volumes, valid = __validate_extract_ingredients(ingredient_names, ingredient_volumes)\n if not valid:\n return\n\n recipe_id, error_message = __check_enter_constraints(recipe_name, newrecipe)\n if error_message:\n return\n\n recipe_volume = sum(volumes)\n ingredient_data = []\n recipe_volume_concentration = 0\n\n # first build the ingredient objects for machine add\n for ingredient_name, ingredient_volume in zip(names, volumes):\n ingredient = DB_COMMANDER.get_ingredient(ingredient_name)\n ingredient.amount = ingredient_volume\n ingredient.recipe_hand = False\n recipe_volume_concentration += ingredient.alcohol * ingredient_volume\n ingredient_data.append(ingredient)\n\n # build also the handadd data into an ingredient\n for ing in shared.handaddlist:\n ingredient = DB_COMMANDER.get_ingredient(ing.id)\n ingredient.amount = ing.amount\n ingredient.recipe_hand = True\n recipe_volume += ing.amount\n recipe_volume_concentration += ingredient.alcohol * ing.amount\n ingredient_data.append(ingredient)\n recipe_alcohollevel = int(recipe_volume_concentration / recipe_volume)\n\n cocktail = __enter_or_update_recipe(\n recipe_id, recipe_name, recipe_volume, recipe_alcohollevel, enabled, ingredient_data, comment\n )\n\n # remove the old name\n DP_CONTROLLER.remove_recipe_from_list_widgets(w, selected_name)\n DP_CONTROLLER.fill_list_widget_recipes(w, [recipe_name])\n DP_CONTROLLER.clear_recipe_data_maker(w, select_other_item=False)\n if enabled:\n maker.evaluate_recipe_maker_view(w, [cocktail])\n DP_CONTROLLER.clear_recipe_data_recipes(w, False)\n\n if newrecipe:\n DP_CONTROLLER.say_recipe_added(recipe_name)\n else:\n DP_CONTROLLER.say_recipe_updated(selected_name, recipe_name)\n\n\ndef load_recipe_view_names(w):\n \"\"\" Updates the ListWidget in the recipe Tab. \"\"\"\n cocktails = DB_COMMANDER.get_all_cocktails()\n recipe_list = [x.name for x in cocktails]\n DP_CONTROLLER.refill_recipes_list_widget(w, recipe_list)\n\n\n@logerror\ndef load_selected_recipe_data(w):\n \"\"\" Loads all Data from the recipe DB into the according Fields in the recipe tab. \"\"\"\n _, recipe_name, *_ = DP_CONTROLLER.get_recipe_field_data(w)\n if not recipe_name:\n return\n\n DP_CONTROLLER.clear_recipe_data_recipes(w, True)\n cocktail = DB_COMMANDER.get_cocktail(recipe_name)\n DP_CONTROLLER.set_recipe_data(w, cocktail)\n\n\n@logerror\ndef delete_recipe(w):\n \"\"\" Deletes the selected recipe, requires the Password \"\"\"\n if not DP_CONTROLLER.check_recipe_password(w):\n DP_CONTROLLER.say_wrong_password()\n return\n _, recipe_name, *_ = DP_CONTROLLER.get_recipe_field_data(w)\n if not recipe_name:\n DP_CONTROLLER.say_no_recipe_selected()\n return\n\n DB_COMMANDER.delete_recipe(recipe_name)\n DP_CONTROLLER.remove_recipe_from_list_widgets(w, recipe_name)\n DP_CONTROLLER.clear_recipe_data_recipes(w, False)\n DP_CONTROLLER.clear_recipe_data_maker(w)\n DP_CONTROLLER.say_recipe_deleted(recipe_name)\n\n\n@logerror\ndef enableall_recipes(w):\n \"\"\"Set all recipes to enabled \"\"\"\n disabled_cocktails = DB_COMMANDER.get_all_cocktails(get_enabled=False)\n DB_COMMANDER.set_all_recipes_enabled()\n maker.evaluate_recipe_maker_view(w, disabled_cocktails)\n DP_CONTROLLER.clear_recipe_data_recipes(w, True)\n DP_CONTROLLER.say_all_recipes_enabled()\n","sub_path":"src/recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"586322623","text":"from django import forms\nfrom django.forms import ModelForm, extras\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom crime_app import models\nfrom django.forms.models import inlineformset_factory\nfrom datetime import datetime\n\nclass LoginForm(AuthenticationForm):\n\n\n\n\tusername = forms.CharField(widget=forms.EmailInput(attrs={\n\t\t\t\t\t\t\t'class': 'form-control',\n\t\t\t\t\t\t\t'autofocus': '',\n\t\t\t\t\t\t\t'required': '',\n\t\t\t\t\t\t\t'placeholder': 'Email'\n\t\t\t\t\t\t\t}))\n\tpassword = forms.CharField(widget=forms.PasswordInput(attrs={\n\t\t\t\t\t\t\t'class': 'form-control',\n\t\t\t\t\t\t\t'placeholder': 'Password'\n\t\t\t\t\t\t\t}))\n\n\nclass DistrictForm(ModelForm):\n\n\tclass Meta:\n\t\tmodel = models.District\n\t\tfields = '__all__'\n\t\twidgets = {\n\t\t\t'name': forms.TextInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder': 'District Name',\n\t\t\t\t})\n\t\t}\n\n\nclass PersonForm(ModelForm):\n\n\tbdate = forms.DateField(widget=extras.SelectDateWidget(years=range(datetime.now().year,1900,-1)))\n\n\tdef __init__(self, *args, **kw):\n\t\tsuper(ModelForm, self).__init__(*args, **kw)\n\t\tself.fields['lname'].label = \"Last Name\"\n\t\tself.fields['fname'].label = \"First Name\"\n\t\tself.fields['mname'].label = \"Middle Name\"\n\t\tself.fields['district'].label = \"Address\"\n\t\tself.fields['bdate'].label = \"Birthday\"\n\t\tself.fields.keyOrder = [\n \t'fname',\n \t'lname',\n \t'mname',\n\t\t\t'district',\n\t\t\t'bdate',\n\t\t\t'sex' \t\n ]\n\n\tclass Meta:\n\t\tmodel = models.Person\n\t\tfields = '__all__'\n\t\twidgets = {\n\t\t\t'lname': forms.TextInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder' : 'Last Name'\n\t\t\t}),\n\t\t\t'fname': forms.TextInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder' : 'First Name'\n\t\t\t}),\n\t\t\t'mname': forms.TextInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder' : 'Middle Name'\n\t\t\t}),\n\n\t\t\t'district':forms.Select(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t}),\n\n\t\t\t\"\"\"\n\t\t\t'bdate':forms.DateInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t}),\t\n\t\t\t\"\"\"\n\n\t\t\t'sex': forms.Select(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t}),\n\n\t\t}\n\nclass CrimeForm(ModelForm):\n\n\twhen = forms.DateField(widget=extras.SelectDateWidget(years=range(datetime.now().year,1900,-1)))\n\n\tdef __init__(self, district=None ,*args, **kw):\n\t\tsuper(ModelForm, self).__init__(*args, **kw)\n\t\tself.fields['suspect'].label = \"Suspect\"\n\n\t\tif district:\n\t\t\tself.fields['suspect'].queryset = models.Suspect.objects.filter(active=district)\n\t\n\tclass Meta:\n\t\tmodel = models.Crime\n\t\tfields = '__all__'\n\t\twidgets = {\n\t\t\t'agent' : forms.Select(attrs={\n\t\t\t\t'class' : 'form-control'\n\t\t\t\t}),\n\t\t\t'district': forms.Select(attrs={\n\t\t\t\t'class' : 'form-control'\n\t\t\t\t}),\n\t\t\t'name' : forms.TextInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder' : 'Crime Name'\n\t\t\t}),\n\t\t\t'category' : forms.Select(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t}),\n\t\t}\n\n\n\nclass AgentForm(ModelForm):\n\tclass Meta:\n\t\tmodel = models.Agent\n\t\tfields = ['post']\n\t\twidgets = {\n\t\t\t'post' : forms.Select(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t})\n\t\t}\n\nclass SuspectForm(ModelForm):\n\tclass Meta:\n\t\tmodel = models.Suspect\n\t\tfields = ['active']\n\t\twidgets = {\n\t\t\t'post' : forms.Select(attrs={\n\t\t\t\t\t'class' : 'form-control',\n\t\t\t\t})\n\t\t}\n\nclass UserForm(ModelForm):\n\n\tdef __init__(self, *args, **kw):\n\t\tsuper(ModelForm, self).__init__(*args, **kw)\n\t\tself.fields['email'].label = \"Email\"\n\t\tself.fields['password'].label = \"Password\"\n\t\t\n\tclass Meta:\n\t\tmodel = models.User\n\t\tfields = ['email','password']\n\t\twidgets = {\n\t\t\t'email' : forms.EmailInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder': 'Email',\n\t\t\t}),\n\t\t\t'password' : forms.PasswordInput(attrs={\n\t\t\t\t'class': 'form-control',\n\t\t\t\t'placeholder': 'Password',\n\t\t\t}),\n\t\t}\n\nAgentInLineFormset = inlineformset_factory(models.Person, models.Agent, form=AgentForm)\nUserInLineFormset = inlineformset_factory(models.Agent, models.User, form=UserForm)\n\n","sub_path":"cs165/crime_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"645035469","text":"#!/usr/bin/env python\n# coding: utf-8\n# updated!!!!\n\n'''\nFor more information and details about the algorithm, please refer to PhD thesis of Wojciech Stach\nLEARNING AND AGGREGATION OF FUZZY COGNITIVE MAPS – AN\nEVOLUTIONARY APPROACH\nby\nWojciech Stach\n\n'''\nimport numpy as np\nimport copy\nimport tqdm.auto as tq\nimport matplotlib.pylab as plt\nimport matplotlib\n\n#matplotlib.use(\"TkAgg\") # nice feature, it will plot and update fitness function during learning process !!!! do NOT use in the jupyter notebook !!!\n\nclass rcga:\n '''\n RCGA algrithm for creating FCM based on the sample valuee,\n nConcepts - number of concepts (nodes), concetps: initial concepts values,\n Pmutation: probability of mutation (default 0.5), Precombination: probability of crossover (0.9),\n population_size (default 100), max_generations: max nubmer of steps (def 100000),\n numberofsteps - number of simulation steps, should be the same as in the historical data,\n maxfitness - fitness value after which learning process can be stopped \n '''\n\n def __init__(self, concepts, Pmutation=None, Precombination=None, population_size=None,\n max_generations=None, historicaldata=None, fcm=None,\n numberofsteps=None, tournamentP=None, tournamentK=None, lbd=None,maxfitness=None):\n\n # GENERAL PARAMS\n # types of mutations are randomly choosen according to authors of the article W.Stach et al. 2005\n self.mutation_methods = ['random', 'nonuniform', 'Muhlenbein']\n # types of selection are randomly choosen according to authors of the article W.Stach et al. 2005\n self.selection_methods = ['rulette', 'tournament']\n # proability of cell mutatiing\n self.prob_mutation = 0.5 if Pmutation is None else Pmutation\n self.prob_recombination = 0.9 if Precombination is None else Precombination\n self.tournamentP = 1 if tournamentP is None else tournamentP\n self.tournamentK = 5 if tournamentK is None else tournamentK # or 10....\n self.lbd = 1 if lbd is None else lbd # this is the operator of the sigmoid function, in a lot of papers it's set to 1 (elpiniki), Stach suggested 5\n\n # GENERATION PROPERTIES\n # size of the population, number of chromosomes in each population\n self.population_size = 100 if population_size is None else population_size\n if self.population_size % 2 != 0:\n raise ValueError('Population size must be an EVEN number')\n # nmax number of generations\n self.max_generations = 100000 # 300000 if max_generations is None else max_generations\n self.current_gen = 0\n self.generations = np.zeros((self.population_size, len(concepts[0]), len(concepts[0]) - 1))\n self.nConcepts = len(concepts[0])\n\n # HISTORICAL DATA\n # historical data obtained from fcm simulations or observations (in the format columns - concepts, rows - simulation steps)\n if historicaldata is None and fcm is None:\n raise ValueError('Cannot run the learning process without previous FCM architecture or historical data!!!')\n self.data = historicaldata\n # fcm which we are optimizing\n self.fcm = fcm\n\n # FITNESS FUNCTION\n self.generation_fitness = np.zeros((1, self.population_size))\n self.maxfitness = 0.999 if maxfitness is None else maxfitness\n self.concepts_for_testing = concepts\n # number of steps we have to run the simulation in order to calculate fintess function (in Stach paper - 1 step)\n self.numberofsteps = 2 # 5 if numberofsteps is None else numberofsteps # suggested 1\n # termination conditions\n self.termination = False\n\n def intitialize(self):\n # initialize 1st population\n self.generations = np.random.uniform(low=-1, high=1,\n size=(self.population_size, self.nConcepts, self.nConcepts - 1))\n\n\n\n # -------------------- FITNESS OF THE GENERATION --------------------------------------\n\n def simulateFCM(self, concepts, weights, nsteps):\n '''\n we have to simulate fcm with current weights in order to calculate fitness function\n concepts should be given as a np.array((1,nConcepts))\n :param concepts: conept vector\n :param weights: weight array\n :param nsteps: number of time step for the FCM simulation\n :return: concepts values after nsteps\n '''\n\n\n # VERY IMPORTANT\n # weights as np.array((nConcepts,nConcepts-1)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n assert weights.shape == (self.nConcepts, self.nConcepts - 1), 'wrong encoding'\n\n\n for j in range(1, nsteps):\n newvalues = np.zeros((concepts.shape[0]))\n for i in range(concepts.shape[0]):\n idx = list(range(concepts.shape[0]))\n idx.remove(i)\n newvalues[i] = round(1 / (1 + np.exp(-(concepts[i] + concepts[idx] @ weights[i]))), 8)\n\n\n concepts = newvalues\n return concepts\n\n def calculate_fitness(self, weights):\n '''\n calculate fitness for each of the chromosome\n :param weights: generated weight array, then tested\n :return: fitness of the chromosome (how well this weight matrix did)\n '''\n # difference\n alpha = 1 / ((self.numberofsteps - 1) * self.nConcepts* self.data.shape[0]) \n # we are countin L1\n # let's say we have both historical data and fcm, so we can simply\n # simulate with new weights and calculate difference to obtain the fitness function\n error = 0\n for row, testcase in zip(self.data,self.concepts_for_testing):\n error += np.sum(\n np.abs(np.subtract(row, self.simulateFCM(testcase, weights, self.numberofsteps))))\n return 1 / (100 * alpha*error + 1)\n\n # -------------------- CROSSOVER --------------------------------------\n\n def crossover(self):\n '''\n crossover - swiping the values between the chromosomes in the generation e.g. 0:15 weights from weights1 are swaped with\n weights 15:: in weights2\n :return: crossedovered pair\n '''\n crossover_pairs = self.generations\n a = list(np.random.choice([False, True], p=[1 - self.prob_recombination, self.prob_recombination],\n size=self.population_size).astype(int) * range(self.population_size))\n a = list(filter(lambda x: x != 0, a))\n # we are applying one point corssover and mixing 1st with 2nd, 3rd with 4th and so on...\n for i in range(0, len(a), 2): # population size (defaul 100), every even idx\n # choosing if the crossover will happen\n # 1 take two crossover pairs\n chromA = crossover_pairs[i]\n chromB = crossover_pairs[i + 1]\n # 2 flatten them\n chromA = np.reshape(chromA, (self.nConcepts * (self.nConcepts - 1)))\n chromB = np.reshape(chromB, (self.nConcepts * (self.nConcepts - 1)))\n # 3 randomly choose the 'crossing point'\n point = np.random.choice(range(self.nConcepts * (self.nConcepts - 1)))\n # 4 swap the values\n chromA[point:] = chromB[point:]\n chromB[:point] = chromA[:point]\n # 5 reshape to (nconcepts,nconcepts)\n chromA = np.reshape(chromA, (self.nConcepts, self.nConcepts - 1))\n chromB = np.reshape(chromB, (self.nConcepts, self.nConcepts - 1))\n # after crossover, crossover_pairs are the latest generation\n\n self.generations = crossover_pairs\n\n # -------------------- MUTATION --------------------------------------\n def mutation(self):\n '''\n randomly chooses one of implemented mutation technique and applies it on the wieght matrix\n both random and nmutation use techniqes described in Genetic learning offuzzy cognitive maps\n Wojciech Stach, Lukasz Kurgan∗, Witold Pedrycz, Marek Reforma\n :return:\n '''\n mut = np.random.choice(['random','nonuniform'])\n if mut =='random':\n self.randommutation()\n elif mut =='nonuniform':\n self.numutation()\n\n\n def randommutation(self):\n '''\n randomly chooses one of implemented mutation technique and applies it on the wieght matrix\n both random and nmutation use techniqes described in Genetic learning offuzzy cognitive maps\n Wojciech Stach, Lukasz Kurgan∗, Witold Pedrycz, Marek Reforma\n :return:\n '''\n # applying mutation\n # choosing x % indexes for mutation\n a = list(np.random.choice([False, True], p=[1 - self.prob_mutation, self.prob_mutation], size=self.population_size).astype(int) * range(self.population_size))\n a = list(filter(lambda x: x != 0, a))\n for i in a:\n # muation is happening with probability\n\n # random method\n j = np.random.choice(range(self.nConcepts), size=1)\n k = np.random.choice(range(self.nConcepts - 1), size=1)\n\n self.generations[i, j,k] = np.random.uniform(-1,1)\n\n def numutation(self):\n '''\n randomly chooses one of implemented mutation technique and applies it on the wieght matrix\n both random and nmutation use techniqes described in Genetic learning offuzzy cognitive maps\n Wojciech Stach, Lukasz Kurgan∗, Witold Pedrycz, Marek Reforma\n :return:\n '''\n # choosing p % of chromosomes in the generation\n a = list(np.random.choice([False, True], p=[1 - self.prob_mutation, self.prob_mutation],\n size=self.population_size).astype(int) * range(self.population_size))\n a = list(filter(lambda x: x != 0, a))\n # randomly choose max 3 elements in the chromosome and change their vals\n d = round((self.max_generations-self.current_gen)/(self.max_generations/2))\n for i in a:\n # randomly choosing d% of the elements to mutate, it decreases with the n of generations\n\n for change in range(d):\n j = np.random.choice(range(self.nConcepts), size=1)\n k = np.random.choice(range(self.nConcepts - 1), size=1)\n self.generations[i, j, k] = np.random.uniform(-1, 1)\n\n\n # -------------------- SELECTION OF THE BEST CANDIDATES FOR THE NEXT GENERATION --------------------------------------\n\n def selection(self):\n '''\n selecting the candidates from the last generation to the new generation\n as paper suggestd we are randomly choosing the way to choose gene for crossover\n ref: Genetic learning offuzzy cognitive maps\n Wojciech Stach, Lukasz Kurgan∗, Witold Pedrycz, Marek Reforma\n calls one of the selection methods rullete or tournament\n '''\n\n\n cross = np.random.choice(['rulette', 'tournament'])\n if cross == 'rulette':\n crossover_pairs = self.rulette()\n elif cross == 'tournament':\n crossover_pairs = self.tournament()\n\n def rulette(self):\n '''\n choosing candidates for crossover with probability according to the fitness function of each chromosome\n more information https://en.wikipedia.org/wiki/Selection_(genetic_algorithm)\n :return:\n '''\n\n selection = np.zeros((self.population_size, self.nConcepts, self.nConcepts - 1))\n # initial probability list\n p = self.generation_fitness[-2] / np.sum(self.generation_fitness[-2])\n for i in range(self.population_size):\n # choice with probability, choosing index of chromosome\n selection[i] = self.generations[np.random.choice(list(range(self.population_size)), p=list(\n p))] # 'last' population is still an array of zeros\n # selected chromosomes pass to next generation\n self.generations = selection\n\n def tournament(self):\n '''\n we choose randomly k chromosomes from the generation, then we would choose the best one with probability p,\n the 2nd best with p*(1-p), 3rd best wih p*((1-p)^2) and so on\n more information https://en.wikipedia.org/wiki/Selection_(genetic_algorithm)\n :return:\n '''\n # if p == 1, we would always choose the 'fittest one' from the k candidates\n selection = np.zeros((self.population_size, self.nConcepts, self.nConcepts - 1))\n\n for j in range(self.population_size):\n # choose k random chromosomes or rather their indexes\n candidates = np.random.choice(list(range(self.population_size)), size=self.tournamentK)\n # choosing candidate\n if self.tournamentP == 1:\n # get fitness of each candidate\n chosen = (0, 0) # index,fitness\n for index in candidates:\n if self.generation_fitness[-2, index] > chosen[1]:\n chosen = (index, self.generation_fitness[-2, index])\n # choosing crossovers to create new gen\n selection[j] = self.generations[chosen[0]]\n \n self.generations = selection\n\n # -------------------- check termination --------------------------------------\n\n def check_termination(self):\n '''\n checking for termination conditions\n 1 if max n of generations was reached\n 2 fitness fucntion is dope, less than threshold, then choosing the best gene of the generation\n :return:\n '''\n\n if self.current_gen <2:\n return\n elif (self.current_gen >= self.max_generations) or (np.any(self.generation_fitness[-2] >= self.maxfitness)):\n self.termination = True\n\n # -------------------- expands dimensions --------------------------------------\n\n def expand_dims(self):\n '''\n making space for one more generations\n\n :return:\n '''\n self.generation_fitness = np.append(self.generation_fitness, np.zeros((1, self.population_size)), axis=0)\n\n # -------------------- RUNNING THE OPTIMIZATION PROCESS --------------------------------------\n \n \n def run(self):\n '''\n running the learning process for you, just wait and enjoy :)\n :return:\n '''\n # run the optimization process\n # if we start from 1st step, randomly initialize first generation\n self.intitialize()\n self.current_gen += 1\n # calculate fitness for 1st gen\n # there was some issue so better deepcopy before calling f\n for i in range(self.population_size):\n chromosome = copy.deepcopy(self.calculate_fitness(self.generations[i]))\n self.generation_fitness[0, i] = chromosome\n\n # update termination condition\n self.check_termination()\n\n # ploting fitness\n # interactive mode\n plt.ion()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n line1, = ax.plot(list(range(self.current_gen)), np.max(self.generation_fitness[0]))\n fig.canvas.draw()\n plt.show(block=False)\n # plt.show()\n # if it is not true\n while not (self.termination):\n\n # NEW GENERATION\n self.current_gen += 1\n # print(self.current_gen)\n if self.current_gen % 100 == 0:\n print(f'We are at {self.current_gen}/{self.max_generations}')\n print(f'max fitness function so far is {np.max(self.generation_fitness[-2])}')\n line1.set_xdata(list(range(self.current_gen-2)))\n line1.set_ydata(np.max(self.generation_fitness[:-1],axis=1))\n # re-drawing the figure\n ax.relim()\n ax.autoscale_view(True, True, True)\n fig.canvas.draw()\n plt.pause(0.02)\n\n # to flush the GUI events\n # fig.canvas.flush_events()\n # print(f'sample weights {self.generations[-1,30]}')\n\n # 1. expanding dims for new generation\n self.expand_dims()\n\n # 2. crossover with probability pCross\n self.crossover()\n # 3. mutate with probability pMutate\n self.mutation()\n\n # 4. calculate fitness\n for i in range(self.population_size):\n chromosome = self.calculate_fitness(copy.deepcopy(self.generations[i]))\n self.generation_fitness[-2, i] = chromosome\n\n # 5. selection process - > new generation is being created\n self.selection()\n\n # 6. update termination condiation\n self.check_termination()\n\n # return the most fitted candidate of last generation\n return self.generations[np.where(self.generation_fitness[-1] == np.max(self.generation_fitness[-1]))]\n\ndef simulateFCM(concepts, weights, nsteps):\n '''\n simulates fcm in ordert to create historical data\n :param concepts: initial values of concetps (can be multiple initial vectors)\n :param weights: weight matrix\n :param nsteps: n of timesteps\n :return: historical data which has to be fed to the algorithm\n '''\n # concepts should be given as a np.array((1,nConcepts))\n # weights as np.array((nConcepts,nConcepts-1)) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n for j in range(1, nsteps):\n newvalues = np.zeros((concepts.shape[0]))\n for i in range(concepts.shape[0]):\n idx = list(range(concepts.shape[0]))\n idx.remove(i)\n newvalues[i] = round(1 / (1 + np.exp(-(concepts[i] + concepts[idx] @ weights[i]))), 8)\n \n concepts = newvalues\n return concepts\n\ndef reshapeW(W,mode):\n '''\n\n :param W: weights\n # mode \"in\" - reshape to n,n-1\n # mode \"out\" - reshape to n,n\n :return reshaped weight matrix\n '''\n\n\n if mode == \"in\": \n out = np.zeros((W.shape[0],W.shape[1]-1))\n for i in range(W.shape[0]):\n a = W[:,i].tolist()\n a.pop(i)\n out[i] = a \n return out\n if mode == \"out\":\n out = np.zeros((W.shape[0],W.shape[1]+1))\n for i in range(W.shape[0]):\n a = W[i].tolist() \n a.insert(i,0.0)\n out[:,i] = a\n return out\n \n","sub_path":"fcmpy/ML/genetic/rcga.py","file_name":"rcga.py","file_ext":"py","file_size_in_byte":18232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"147184965","text":"import os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport six.moves.urllib as urllib\nimport tarfile\n\nfrom utils.app_utils import WebcamVideoStream, HLSVideoStream\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\n\nCWD_PATH = os.path.dirname(os.path.realpath(__file__))\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\n# download from https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md\nMODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29'\nMODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017' #fast\n#MODEL_NAME = 'faster_rcnn_resnet101_kitti_2018_01_28'\n#MODEL_NAME = 'faster_rcnn_resnet101_coco_11_06_2017' #medium speed\n#MODEL_NAME = 'faster_rcnn_nas_coco_2018_01_28'\n#MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'\n\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nPATH_TO_MODELS =os.path.join(CWD_PATH, \"model\")\nPATH_TO_CKPT = os.path.join(PATH_TO_MODELS, MODEL_NAME, 'frozen_inference_graph.pb')\nPATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', 'mscoco_label_map.pbtxt')\nPATH_TO_TGZ = os.path.join(PATH_TO_MODELS, MODEL_FILE)\nNUM_CLASSES = 90\n\n\n# Set up camera constants\nIM_WIDTH = 480\nIM_HEIGHT = 360\n\nDOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\nfileAlreadyExists = os.path.isfile(PATH_TO_CKPT)\nif not fileAlreadyExists:\n if not os.path.exists(PATH_TO_MODELS):\n os.makedirs(PATH_TO_MODELS)\n download_url =DOWNLOAD_BASE + MODEL_FILE\n print('Downloading frozen inference graph: '+download_url)\n opener = urllib.request.URLopener()\n opener.retrieve(download_url, PATH_TO_TGZ)\n tar_file = tarfile.open(PATH_TO_TGZ)\n tar_file.extractall(path= PATH_TO_MODELS)\n\n\nNUM_CLASSES = 90\n\n# Loading label map\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\ndef detect_objects(image_np, sess, detection_graph):\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n\n # Each box represents a part of the image where a particular object was detected.\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n\n # Actual detection.\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n sboxes = np.squeeze(boxes)\n sclasses = np.squeeze(classes).astype(np.int32)\n sscores = np.squeeze(scores)\n\n for i in range(int(num_detections[0])):\n if sclasses[i] in category_index.keys():\n class_name = category_index[sclasses[i]]['name']\n else:\n class_name = 'N/A'\n display_str = str(class_name)\n #print(display_str)\n ymin, xmin, ymax, xmax = sboxes[i]\n x = int(((xmin+xmax)/2)*image_np.shape[1])\n y = int(((ymin+ymax)/2)*image_np.shape[0])\n\n\n # Draw a circle at center of object\n cv2.circle(image_np, (x, y), 5, (75, 13, 180))\n\n\n # Visualization of the results of a detection.\n #vis_util.visualize_boxes_and_labels_on_image_array(\n # image_np,\n # sboxes,\n # sclasses,\n # sscores,\n # category_index,\n # use_normalized_coordinates=True,\n # max_boxes_to_draw=int(num_detections[0]),\n # line_thickness=8)\n return image_np\n\n\nif __name__ == '__main__':\n # Load a (frozen) Tensorflow model into memory.\n print(\"Loading frozen tensorflow model.\")\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n print('Reading from webcam.')\n video_capture = WebcamVideoStream(src=0,\n width=IM_WIDTH,\n height=IM_HEIGHT).start()\n\n while True:\n frame = video_capture.read()\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame_detected = detect_objects(frame_rgb, sess, detection_graph)\n\n output_rgb = cv2.cvtColor(frame_detected, cv2.COLOR_RGB2BGR)\n cv2.imshow('Video', output_rgb)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n sess.close()\n video_capture.stop()\n cv2.destroyAllWindows()\n","sub_path":"src/tutorial/live-object-tf/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"631818746","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Lex\"\n# Date: 2017/11/19\n\nimport time\n\nclass Date:\n def __init__(self,year,month,day):\n self.year = year\n self.month = month\n self.day = day\n\n @staticmethod #相当于给类扩展功能\n def now(): #用Date.now()形式去产生实例,该实例用的是当前时间\n t = time.localtime() #获取结构化的时间格式\n obj = Date(t.tm_year,t.tm_mon,t.tm_mday) #新建实例并且返回\n print('from now')\n return obj\n\n @staticmethod\n def tomorrow(): #用Date.tomorrow()形式去产生实例,该用例用的是明天的时间\n t = time.localtime(time.time()+86400)\n obj = Date(t.tm_year,t.tm_mon,t.tm_mday)\n return obj\n\n# d1 = Date.now()\n# print(d1.year,d1.month,d1.day)\n# d2 = Date.tomorrow()\n# print(d2.year,d2.month,d2.day)\n# d1.now() #TypeError: now() takes 0 positional arguments but 1 was given\n\nd1 = Date(2012,12,12)\nd1.now()\n# d_n1 = Date.now()\n# d_n2 = d1.now()\n# print(d_n1.year,d_n1.month,d_n1.day)\n# print(d_n2.year,d_n2.month,d_n2.day)\n# Date.now()\n","sub_path":"Day28/staticmethod.py","file_name":"staticmethod.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"31428026","text":"import numpy as np\nimport pandas as pd\n\nfrom helpers.utils import compute_returns\n\nDATA_FILE = '../data_examples/btc_price_2017-09-13T03:45:28+00:00.csv'\nDATA = pd.read_csv(DATA_FILE, sep=',', parse_dates=True, index_col=0)\n\nnp.set_printoptions(threshold=np.nan)\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.options.display.float_format = '{:.4f}'.format\n\n\ndef run():\n d = pd.DataFrame(DATA[['timestamp', 'last']])\n d = d.head(10000)\n print(d)\n e = pd.DataFrame(d)\n print(len(e['last'].resample('5Min').ohlc().replace(np.nan, 0)))\n\n exit(1)\n\n d['returns'] = compute_returns(d['last'])\n print(d['returns'].head())\n\n print(d['returns'].rolling(window=2, center=False).mean().head())\n\n print(d['returns'])\n\n sr_column = 'sharpe_ratio_{}'.format(num_average_ticks)\n # is to make a forward apply not a backward apply as people usually do.\n d[sr_column] = pd.rolling_apply(d['returns'][::-1],\n window=num_average_ticks,\n func=sharpe_ratio,\n center=False).fillna(0)[::-1]\n\n print(d.tail(100))\n\n labels = ['SELL', 'HOLD', 'BUY']\n d['signals'] = pd.qcut(d[sr_column], q=[0, 0.05, 0.95, 1], labels=[0, 1, 2])\n\n print(d.head(100))\n print(d['signals'].head(100))\n d['signals'].astype(np.float).plot()\n import matplotlib.pyplot as plt\n plt.show()\n\n\nif __name__ == '__main__':\n print(run())\n","sub_path":"data/generate_buy_sell_labels.py","file_name":"generate_buy_sell_labels.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"61748277","text":"import docker\nimport json\nimport io\nimport os\nimport pandas as pd\nimport shutil\nimport threading\n\nfrom apis.models import FailureProneFile, FixingCommit, MetricsFile, Repository, Task\n\n\nclass BackendMetrics:\n FAILURE_PRONE_FILES_FILENAME = 'failure_prone_files.json'\n\n def __init__(self, repo_id: str, language: str, label: str = 'ALL'):\n\n if language not in ('ansible', 'tosca'):\n raise ValueError(f'Language {language} not supported.')\n\n self.repository = Repository.objects.get(pk=repo_id)\n self.language = language\n\n if label.upper() not in ('CONFIGURATION_DATA', 'CONDITIONAL', 'DEPENDENCY', 'DOCUMENTATION', 'IDEMPOTENCY',\n 'SECURITY', 'SERVICE', 'SYNTAX'):\n raise ValueError('Label not supported')\n else:\n self.label = label.upper()\n\n def extract(self):\n task = Task(state=Task.ACCEPTED, name=Task.EXTRACT_METRICS, repository=self.repository)\n task.save()\n\n labeled_files = []\n commits = []\n for commit in FixingCommit.objects.filter(repository=self.repository):\n labels = [label_dict['label'] for label_dict in commit.labels]\n\n if self.label in labels:\n commits.append(commit.sha)\n\n for file in FailureProneFile.objects.filter(fixing_commit__in=commits):\n labeled_files.append({\n 'filepath': file.filepath,\n 'commit': file.commit,\n 'fixing_commit': file.fixing_commit.sha\n })\n\n if not labeled_files:\n return task.id, Task.COMPLETED\n\n try:\n path_to_task = os.path.join('/tmp', 'radondp_tasks', str(task.id))\n path_to_failure_prone_files = os.path.join(path_to_task, self.FAILURE_PRONE_FILES_FILENAME)\n os.makedirs(path_to_task)\n\n with open(path_to_failure_prone_files, 'w') as f:\n json.dump(labeled_files, f)\n\n except Exception as e:\n print(e)\n return task.id, Task.ERROR\n\n thread_name = f'{self.repository.full_name.replace(\"/\", \"_\")}_metrics_thread'\n metrics_thread = threading.Thread(target=self.run_task, name=thread_name, args=(task,))\n metrics_thread.start()\n\n return task.id, task.state\n\n def run_task(self, task: Task):\n\n path_to_task = os.path.join('/tmp', 'radondp_tasks', str(task.id))\n volumes = {\n path_to_task: {\n 'bind': '/app',\n 'mode': 'rw'\n }\n }\n\n task.state = Task.RUNNING\n task.save()\n\n command = 'repo-miner extract-metrics {0} {1} {2} product release . '.format(self.repository.url,\n self.FAILURE_PRONE_FILES_FILENAME,\n self.language)\n docker_client = docker.from_env()\n container_name = f'{self.repository.full_name}-metrics-extractor'\n container_name = container_name.replace('/', '_')\n container = docker_client.containers.run(image='radonconsortium/repo-miner:0.9.1',\n name=container_name,\n command=command,\n detach=True,\n volumes=volumes)\n\n result = container.wait()\n container.remove()\n\n # For debug\n print(self.repository.full_name, result)\n\n task.state = Task.ERROR # Temporary, in case next steps fail\n task.save()\n\n if result['StatusCode'] == 0:\n path_to_csv = os.path.join(path_to_task, 'metrics.csv')\n data = pd.read_csv(path_to_csv)\n data['labels'] = self.label\n\n res = io.StringIO()\n data.to_csv(res, index=False)\n obj, created = MetricsFile.objects.get_or_create(repository=self.repository, language=self.language,\n defaults=dict(file=res.getvalue()))\n\n if not created and obj:\n obj.file = res.getvalue()\n obj.save()\n\n task.state = Task.COMPLETED\n task.save()\n\n try:\n shutil.rmtree(path_to_task)\n except Exception as e:\n print(e)\n","sub_path":"backend/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"638928508","text":"import csv\nimport xml.dom.minidom\nfrom sentimentAnalyzer import SentimentAnalyzer\n\nclass SentimentEvaluation(object):\n\n\tdef __init__(self, logger):\n\t\tself.logger = logger\n\t\tself.analyzerType = None\n\n\tdef execute(self, analyzer):\n\t\tself.analyzerType = analyzer.__class__.__name__\n\t\tself.logger.info('Init Evaluation Module for: ' + self.analyzerType)\n\t\tdatabase = self.getTwitterDatabase()\n\t\tanalyzerResults = self.analyzeDatabaseTopics(analyzer, database)\n\t\tevaluationResults = self.evaluate(database, analyzerResults)\n\t\tself.writeResults(evaluationResults)\n\n\tdef getTwitterDatabase(self):\n\t\tdatabase = {}\n\t\tfilename = 'data/mapping.xml'\n\t\tself.logger.info('Reading file: ' + filename)\n\n\t\tDOMTree = xml.dom.minidom.parse(filename)\n\t\ttopics = DOMTree.getElementsByTagName(\"topic\")\n\n\t\tfor topic in topics:\n\t\t\ttopicNameNode = topic.getElementsByTagName('name')[0]\n\t\t\ttopicName = str(topicNameNode.firstChild.data)\n\n\t\t\ttopicSentimentNode = topic.getElementsByTagName('sentiment')[0]\n\t\t\ttopicSentiment = str(topicSentimentNode.firstChild.data)\n\t\t\ttopicTuple = (topicName, topicSentiment)\n\n\t\t\tsampling = topic.getElementsByTagName('sampling')[0]\n\t\t\tsamples = sampling.getElementsByTagName('sample')\n\n\t\t\tmessages = {}\n\t\t\tfor sample in samples:\n\t\t\t\tsampleMessage = sample.firstChild.data\n\t\t\t\tsampleAttributesKeys = sample.attributes.keys()\n\t\t\t\tsampleSentiment = sample.attributes['sentiment'].value\n\t\t\t\tmessages[sampleMessage] = sampleSentiment\n\n\t\t\tdatabase[topicTuple] = messages\n\n\t\treturn database\n\t\n\tdef analyzeDatabaseTopics(self, analyzer, database):\n\t\tresults = {}\n\t\tif database:\n\t\t\tfor row in database:\n\t\t\t\ttopic, topicSentiment = row\n\t\t\t\ttopicSampling = database[row].keys()\n\t\t\t\tresults[topic] = analyzer.analyzeTopic(topic, topicSampling)\n\t\treturn results\n\t\n\tdef evaluate(self, correctMap, predictedMap):\n\t\ttotal = len(correctMap)\n\t\tbasicMeasures = self.getBasicEvaluationMeasures(correctMap, predictedMap)\n\t\tprecisionRecallMeasures = self.getPrecisionRecallMeasuresPerClass(basicMeasures)\n\t\tf1Measures = self.getF1MeasurePerClass(precisionRecallMeasures)\n\t\tmacroF1Measure = self.getMacroF1Measure(f1Measures)\n\t\tcoverageMeasure = self.getCoverageMeasure(basicMeasures, total)\n\t\taccuracyMeasure = self.getAccuracyMeasure(basicMeasures, total)\n\n\t\tmeasuresMap = {}\n\t\tmeasuresMap['basicMeasures'] = basicMeasures\n\t\tmeasuresMap['precisionRecallMeasures'] = precisionRecallMeasures\n\t\tmeasuresMap['macroF1'] = macroF1Measure\n\t\tmeasuresMap['coverage'] = coverageMeasure\n\t\tmeasuresMap['accuracy'] = accuracyMeasure\n\t\treturn measuresMap\n\n\tdef getBasicEvaluationMeasures(self, correctMap, predictedMap):\n\t\ta, b, c, d, e, f, g, h, i = 0, 0, 0, 0, 0, 0, 0, 0, 0\n\t\tfor topic, topicSentiment in correctMap:\n\t\t\tpredictedSentiment = predictedMap[topic]\n\t\t\tif topicSentiment == SentimentAnalyzer.SENTIMENT_POSITIVE:\n\t\t\t\tif predictedSentiment == SentimentAnalyzer.SENTIMENT_POSITIVE:\n\t\t\t\t\ta += 1\n\t\t\t\telif predictedSentiment == SentimentAnalyzer.SENTIMENT_NEUTRAL:\n\t\t\t\t\tb += 1\n\t\t\t\telif predictedSentiment == SentimentAnalyzer.SENTIMENT_NEGATIVE:\n\t\t\t\t\tc += 1\n\t\t\telif topicSentiment == SentimentAnalyzer.SENTIMENT_NEUTRAL:\n\t\t\t\tif predictedSentiment == SentimentAnalyzer.SENTIMENT_POSITIVE:\n\t\t\t\t\td += 1\n\t\t\t\telif predictedSentiment == SentimentAnalyzer.SENTIMENT_NEUTRAL:\n\t\t\t\t\te += 1\n\t\t\t\telif predictedSentiment == SentimentAnalyzer.SENTIMENT_NEGATIVE:\n\t\t\t\t\tf += 1\n\t\t\telif topicSentiment == SentimentAnalyzer.SENTIMENT_NEGATIVE:\n\t\t\t\tif predictedSentiment == SentimentAnalyzer.SENTIMENT_POSITIVE:\n\t\t\t\t\tg += 1\n\t\t\t\telif predictedSentiment == SentimentAnalyzer.SENTIMENT_NEUTRAL:\n\t\t\t\t\th += 1\n\t\t\t\telif predictedSentiment == SentimentAnalyzer.SENTIMENT_NEGATIVE:\n\t\t\t\t\ti += 1\n\n\t\treturn {'a':a, 'b':b, 'c':c, 'd':d, 'e':e, 'f':f, 'g':g, 'h':h, 'i':i}\n\n\tdef getPrecisionRecallMeasuresPerClass(self, bms):\n\t\tprecisionPositive = bms['a']\n\t\tprecisionPositiveExpression = (bms['a'] + bms['d'] + bms['g'])\n\t\tif precisionPositiveExpression:\n\t\t\tprecisionPositive /= float(precisionPositiveExpression)\n\n\t\tprecisionNeutral = bms['e']\n\t\tprecisionNeutralExpression = (bms['b'] + bms['e'] + bms['h'])\n\t\tif precisionNeutralExpression:\n\t\t\tprecisionNeutral /= float(precisionNeutralExpression)\n\n\t\tprecisionNegative = bms['i']\n\t\tprecisionNegativeExpression = (bms['c'] + bms['f'] + bms['i'])\n\t\tif precisionNegativeExpression:\n\t\t\tprecisionNegative /= float(precisionNegativeExpression)\n\t\t\n\t\trecallPositive = bms['a']\n\t\trecallPositiveExpression = (bms['a'] + bms['b'] + bms['c'])\n\t\tif recallPositiveExpression:\n\t\t\trecallPositive /= float(recallPositiveExpression)\n\n\t\trecallNeutral = bms['e']\n\t\trecallNeutralExpression = (bms['d'] + bms['e'] + bms['f'])\n\t\tif recallNeutralExpression:\n\t\t\trecallNeutral /= float(recallNeutralExpression)\n\n\t\trecallNegative = bms['i']\n\t\trecallNegativeExpression = (bms['g'] + bms['h'] + bms['i'])\n\t\tif recallNegativeExpression:\n\t\t\trecallNegative /= float(recallNegativeExpression)\n\t\t\n\t\treturn {\n\t\t\t'p': {'p':precisionPositive, 'n':precisionNegative, 'u':precisionNeutral},\n\t\t\t'r': {'p':recallPositive, 'n':recallNegative, 'u':recallNeutral}\n\t\t}\n\n\tdef getF1MeasurePerClass(self, prms):\n\t\tf1Positive = 0\n\t\tf1PositiveFirstExpression = (2 * prms['p']['p'] * prms['r']['p'])\n\t\tf1PositiveSecondExpression = (prms['p']['p'] + prms['r']['p'])\n\t\tif f1PositiveFirstExpression and f1PositiveSecondExpression:\n\t\t\tf1Positive = f1PositiveFirstExpression / float(f1PositiveSecondExpression)\n\n\t\tf1Negative = 0\n\t\tf1NegativeFirstExpression = (2 * prms['p']['n'] * prms['r']['n'])\n\t\tf1NegativeSecondExpression = (prms['p']['n'] + prms['r']['n'])\n\t\tif f1NegativeFirstExpression and f1NegativeSecondExpression:\n\t\t\tf1Negative = f1NegativeFirstExpression / float(f1NegativeSecondExpression)\n\n\t\tf1Neutral = 0\n\t\tf1NeutralFirstExpression = (2 * prms['p']['u'] * prms['r']['u'])\n\t\tf1NeutralSecondExpression = (prms['p']['u'] + prms['r']['u'])\n\t\tif f1NeutralFirstExpression and f1NeutralSecondExpression:\n\t\t\tf1Neutral = f1NeutralFirstExpression / float(f1NeutralSecondExpression)\n\n\t\treturn {'F1Positive':f1Positive, 'F1Negative':f1Negative, 'F1Neutral':f1Neutral}\n\n\tdef getMacroF1Measure(self, f1ms):\n\t\tf1sum = 0\n\t\tfor f1 in f1ms:\n\t\t\tf1sum += f1ms[f1]\n\t\treturn f1sum / float(3)\n\n\tdef getAccuracyMeasure(self, bms, total):\n\t\treturn (bms['a']+bms['e']+bms['i']) / float(total)\n\n\tdef getCoverageMeasure(self, bms, total):\n\t\treturn (total - (bms['b'] + bms['e'] + bms['h'])) / float(total)\n\t\n\tdef writeResults(self, measuresMap):\n\t\toutputFile = 'evaluation/' + self.analyzerType + '-measures.csv'\n\t\twith open(outputFile, 'w+') as csvfile:\n\t\t\theaders = ['measure', 'value']\n\t\t\twriter = csv.DictWriter(csvfile, delimiter=';', lineterminator='\\n', fieldnames=headers)\n\t\t\tfor measure in measuresMap:\n\t\t\t\twriter.writerow({'measure': measure, 'value' : measuresMap[measure]})\n","sub_path":"sentimentEvaluation.py","file_name":"sentimentEvaluation.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"178453801","text":"from django import forms\nfrom .models import Patient, Report\n\nclass PatientForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Patient\n\t\tfields = [\n\t\t\t\"Patient_Name\",\n\t\t\t\"Patient_Blood_Group\",\n \t\t\"Patient_Birth_Date\",\n \t\t\"Patient_Phone_Number\",\n\t\t]\n\n\nclass ReportForm(forms.ModelForm):\n\tPatient_Notes = forms.CharField(required=False)\n\tclass Meta:\n\t\tmodel = Report\n\t\tfields = [\n\t\t\t\"patient\",\n \t\t\"Patient_Age\",\n \t\t\"Patient_Weight\",\n \t\t\"Patient_Length\",\n \t\t\"HC\",\n \t\t\"S_and_S\",\n \t\t\"Patient_Diagnosis\",\n \t\t\"Patient_Treatment\",\n \t\t\"Patient_Notes\",\n\t\t]","sub_path":"data/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"454842582","text":"from flask import Flask\r\nfrom flask import jsonify\r\nfrom flask import request\r\nfrom datetime import datetime\r\nfrom dateutil.relativedelta import relativedelta\r\nfrom flask_pymongo import PyMongo\r\nfrom dbHelper import dbHelper\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['MONGO_DBNAME'] = 'university'\r\napp.config['MONGO_URI'] = 'mongodb://localhost:27017/university'\r\n\r\nmongo = PyMongo(app)\r\nDB = dbHelper(mongo)\r\n\r\nPARAMS = ['first_name', 'last_name', 'pin', 'sex'];\r\n\r\n@app.route(\"/\")\r\n@app.route('/students')\r\ndef get_all_students():\r\n students = DB.get_all_students()\r\n return success_response(None, students)\r\n \r\n\r\n@app.route('/students/', methods=['GET'])\r\n@app.route('/students/', methods=['GET'])\r\ndef get_students_by_filter(age=None, last_name=None):\r\n datetime_filter_from = None\r\n datetime_filter_to = None\r\n\r\n if age:\r\n try:\r\n max_birth_year = datetime.now().date() - relativedelta(years=age)\r\n datetime_filter_to = datetime.combine(max_birth_year, datetime.min.time())\r\n datetime_filter_from = datetime_filter_to - relativedelta(years=1)\r\n except:\r\n return unsuccess_response('Age parameter is incorrect')\r\n\r\n filtered_students = DB.get_students_by_birthdate_range_or_lastname(datetime_filter_from, datetime_filter_to, last_name)\r\n return success_response(None, filtered_students)\r\n\r\n\r\n@app.route('/student', methods=['POST'])\r\ndef add_student():\r\n students = mongo.db.students\r\n content = request.json\r\n\r\n student = get_values_or_none_by_param_names(PARAMS,content)\r\n student['birthdate'] = get_date_from_content('birthdate', content, '%m/%d/%Y') \r\n\r\n if student['first_name'] is None or student['last_name'] is None or student['pin'] is None:\r\n return unsuccess_response('First Name, Last Name and Personal identity number are required')\r\n\r\n student_id = students.insert(student)\r\n new_student_by_id = DB.get_one_by_id(student_id)\r\n return success_response('Created new student',new_student_by_id)\r\n\r\n\r\n@app.route('/student', methods=['PUT'])\r\ndef edit_student():\r\n\r\n students = mongo.db.students\r\n content = request.json\r\n\r\n student = get_values_or_none_by_param_names(PARAMS,content)\r\n student['birthdate'] = get_date_from_content('birthdate', content, '%m/%d/%Y') \r\n\r\n target_student = students.find_one({\"pin\": student['pin']})\r\n if target_student is None:\r\n return unsuccess_response('Doesn\\'t exists student with this pin')\r\n\r\n # remove empty items from dictionary\r\n filtered = {k: v for k, v in student.items() if v is not None}\r\n student.clear()\r\n student.update(filtered)\r\n\r\n students.update({\"pin\": student['pin']}, {\"$set\": student})\r\n updated_student = DB.get_one_by_pin(student['pin']) \r\n return success_response('Student data changed', updated_student)\r\n\r\n\r\n@app.route('/student', methods=['DELETE'])\r\ndef delete_student():\r\n students = mongo.db.students\r\n student = {}\r\n student['pin'] = request.json['pin'] if 'pin' in request.json else None\r\n target_student = students.find_one({\"pin\": student['pin']})\r\n if target_student is None:\r\n return unsuccess_response('Doesn\\'t exists student with this pin')\r\n\r\n students.remove({\"pin\": student['pin']})\r\n\r\n target_student = students.find_one({\"pin\": student['pin']})\r\n if target_student is None:\r\n return success_response( 'Succsessfully removed')\r\n else :\r\n return unsuccess_response('Doesn\\'t removed') \r\n\r\n\r\n\r\ndef success_response(msg=None, \r\n result=None):\r\n return json_response(msg, result, True)\r\n\r\ndef unsuccess_response(msg=None, \r\n result=None): \r\n return json_response(msg, result, False)\r\n\r\ndef json_response(msg=None, \r\n result=None, \r\n status=False):\r\n return jsonify({'success': status, 'message':msg, 'result': result})\r\n\r\ndef get_values_or_none_by_param_names(params, \r\n content):\r\n \"\"\"Get values for parameters from content object\r\n args:\r\n params - list of parameter names\r\n content - object, which contains request.json data\r\n return:\r\n dictionary of parameters\r\n \"\"\" \r\n student = {}\r\n for param in params: \r\n student[param] = content[param] if param in content else None\r\n return student\r\n\r\ndef get_date_from_content(param_name, \r\n content, \r\n date_format):\r\n \"\"\"Get datetime object from content \r\n args:\r\n param_name - parameter name, which we want to convert as datetime object\r\n content - object, which contains request.json data\r\n date_format - date format, which gives us directions how to convert string param as datetime object\r\n return:\r\n datetime value\r\n \"\"\" \r\n return datetime.strptime(content[param_name], date_format) if param_name in content else None\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"students_rest_api.py","file_name":"students_rest_api.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"246980603","text":"# 完成插入并更新操作\r\nimport pymysql\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nclass MySQL:\r\n def __init__(self, host, port, user, pwd, db):\r\n self.host = host\r\n self.port = int(port)\r\n self.user = user\r\n self.pwd = pwd\r\n self.db = db\r\n if not self.db:\r\n raise NameError(\"没有设置数据库信息\")\r\n try:\r\n self.conn = pymysql.connect(\r\n host=self.host,\r\n port=self.port,\r\n user=self.user,\r\n passwd=self.pwd,\r\n db=self.db\r\n )\r\n except Exception as e:\r\n print(e)\r\n raise AttributeError(\"连接数据库失败\")\r\n\r\n self.cur = self.conn.cursor()\r\n if not self.cur:\r\n raise NameError(\"获取游标失败\")\r\n\r\n def execQuery(self, query):\r\n # 查询语句请用 pandas.read_sql()\r\n self.cur.execute(query)\r\n resList = self.cur.fetchall()\r\n # 查询完毕必须关闭连接\r\n return resList\r\n\r\n def execNonQuery(self, sql):\r\n self.cur.execute(sql)\r\n self.conn.commit()\r\n\r\n def ex(self):\r\n self.conn.close()\r\n\r\n\r\ndef save_to_sql(conn, data, table, ifclose=True, chunksize=8000):\r\n '''\r\n Params:\r\n conn:\r\n - instance of MySQL class or MSSQL class\r\n data:\r\n - DataFrame\r\n --Note: data的列名必须与表列名一致(不区分大小写)\r\n table:\r\n - str, 要插入的表名称\r\n '''\r\n conn.execNonQuery('USE %s' % conn.db)\r\n\r\n # 获取字段名称\r\n data = data.reset_index(drop=True)\r\n data_col_name = [col.lower() for col in data.columns.values]\r\n rows = len(data)\r\n tab_col = pd.read_sql('DESCRIBE %s' % table, conn.conn, index_col='Field')\r\n tab_col.index = tab_col.index.str.lower()\r\n tab_col_type = tab_col.Type.to_dict()\r\n chunk_num = int(np.ceil(rows / chunksize))\r\n for cn in range(chunk_num):\r\n string = []\r\n for dcn in data_col_name:\r\n tp = tab_col_type[dcn]\r\n if tp.lower()[:8] == 'datetime':\r\n temp_str = r\"str_to_date('%s','%%Y-%%m-%%d %%H:%%i:%%s')\"\r\n elif tp.lower()[:4] == 'date':\r\n temp_str = r\"str_to_date('%s','%%Y-%%m-%%d')\"\r\n else:\r\n temp_str = r\"'%s'\"\r\n if dcn == data_col_name[0]:\r\n temp_str = '(' + temp_str\r\n elif dcn == data_col_name[-1]:\r\n temp_str = temp_str + ')'\r\n string.append(temp_str)\r\n\r\n chunk_data = data.iloc[cn * chunksize:(cn + 1) * chunksize] if cn < chunk_num - 1 else data.iloc[\r\n cn * chunksize:]\r\n\r\n temp_sql = ','.join(string * len(chunk_data)) % tuple(chunk_data.values.ravel())\r\n\r\n temp_sql = temp_sql.replace('nan', 'null')\r\n temp_sql = temp_sql.replace('\\'null\\'', 'null')\r\n insert_col = ','.join(['`%s`' % col for col in data_col_name])\r\n update_col = ','.join(['`%s`=Values(`%s`)' % (col, col) for col in data_col_name])\r\n sql = 'INSERT INTO %s.%s (%s) VALUES ' % (conn.db, table, insert_col) + \\\r\n temp_sql + ' ON DUPLICATE KEY UPDATE ' + update_col + ';'\r\n\r\n sql = sql.encode('utf-8')\r\n conn.execNonQuery(sql)\r\n if ifclose:\r\n conn.ex()\r\n\r\n","sub_path":"QH_update/检验部分/insert_update.py","file_name":"insert_update.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"504733105","text":"# -*- coding:utf-8 -*-\nimport os\nimport time\nimport datetime\n\nfrom flask import Flask, g\nfrom models._base import db\n\n\ndef config_str_to_obj(cfg):\n if isinstance(cfg, basestring):\n module = __import__('config', fromlist=[cfg])\n return getattr(module, cfg)\n return cfg\n\n\ndef create_app(config=None):\n app = Flask(\n __name__,\n template_folder='templates',\n )\n\n if 'RUN_SETTINGS' in os.environ:\n app.config.from_envvar('RUN_SETTINGS')\n\n config = config_str_to_obj(config)\n app.config.from_object(config)\n app.config.from_envvar('APP_CONFIG', silent=True)\n\n app.static_folder = app.config.get('STATIC_FOLDER')\n app.config.update({'SITE_TIME': datetime.datetime.utcnow()})\n\n register_hook(app)\n register_jinja(app)\n register_routes(app)\n register_leancloud(app)\n register_database(app)\n\n return app\n\n\ndef register_hook(app):\n from .utils.user import get_current_user\n\n @app.before_request\n def load_current_user():\n g.user = get_current_user()\n if g.user:\n g._before_request_time = time.time()\n\n\ndef register_routes(app):\n from .controllers import front, account, manager, category, article\n\n app.register_blueprint(front.bp, url_prefix='')\n app.register_blueprint(account.bp, url_prefix='/account')\n app.register_blueprint(manager.bp, url_prefix='/manager')\n app.register_blueprint(category.bp, url_prefix='/manager/category')\n app.register_blueprint(article.bp, url_prefix='/article')\n return app\n\n\ndef register_jinja(app):\n return app\n\n\ndef register_database(app):\n db.init_app(app)\n db.app = app\n return app\n\n\ndef register_leancloud(app):\n import leancloud\n leancloud.init(app.config['LEANCLOUND_APPID'], app.config['LEANCLOUND_MASTER'])","sub_path":"runyu/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"647323670","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom . import models, serializers\n\nfrom common.decorators import session_authorize, meta_data_response, catch_exception\nfrom common.response import MetaDataResponse\nfrom common.utils.model_utils import check_pk_existence\nfrom common.exceptions import NotAcceptableError\nfrom customer.models import Customer\n\nfrom activity.models import register_customer_state\nfrom activity.model_constants import AADHAAR_SUBMIT_STATE, AADHAAR_DETAIL_SUBMIT_STATE\n\n\nclass AadhaarCreate(APIView):\n\n @catch_exception\n @meta_data_response()\n @session_authorize('customer_id')\n def post(self, request, auth_data):\n if auth_data.get('authorized'):\n serializer = serializers.AadhaarSerializer(data=request.data)\n if serializer.is_valid():\n serializer.validate_foreign_keys()\n serializer.save()\n register_customer_state(\n AADHAAR_SUBMIT_STATE, auth_data['customer_id'])\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response({}, status=status.HTTP_400_BAD_REQUEST)\n return Response({}, status.HTTP_401_UNAUTHORIZED)\n\n\nclass AadhaarDetail(APIView):\n\n @catch_exception\n @meta_data_response()\n @session_authorize()\n def get(self, request, auth_data, *args, **kwargs):\n if auth_data.get('authorized'):\n aadhaar_object = get_object_or_404(\n models.Aadhaar, customer_id=auth_data['customer_id'])\n serializer = serializers.AadhaarSerializer(aadhaar_object)\n return Response(serializer.data, status.HTTP_200_OK)\n return Response({}, status.HTTP_401_UNAUTHORIZED)\n\n @catch_exception\n @meta_data_response()\n @session_authorize()\n def put(self, request, auth_data, *args, **kwargs):\n if auth_data.get('authorized'):\n aadhaar_object = get_object_or_404(\n models.Aadhaar, customer_id=auth_data['customer_id'])\n serializers.AadhaarSerializer().validate_foreign_keys(request.data)\n aadhaar_object_updated = serializers.AadhaarSerializer().update(\n aadhaar_object, request.data)\n register_customer_state(\n AADHAAR_DETAIL_SUBMIT_STATE, aadhaar_object_updated.customer_id)\n return Response(serializers.AadhaarSerializer(aadhaar_object_updated).data, status.HTTP_200_OK)\n return Response({}, status=status.HTTP_401_UNAUTHORIZED)\n\n @catch_exception\n @meta_data_response()\n @session_authorize()\n def delete(self, request, auth_data, *args, **kwargs):\n if auth_data.get('authorized'):\n aadhaar_object = get_object_or_404(\n models.Aadhaar, customer_id=auth_data['customer_id'])\n aadhaar_object.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response({}, status.HTTP_401_UNAUTHORIZED)\n","sub_path":"upwards/aadhaar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"526368001","text":"#!usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# _Pearsonr: x e y tienen que ser del tipo np.array()\ndef _pearsonr(x, y):\n #productos que ha calificado x \n x_index = np.nonzero(x)[0]\n y_index = np.nonzero(y)[0]\n\n mx = x[x_index].mean()\n my = y[y_index].mean()\n \n # Interseccion de los productos vistos por x e y\n interseccion = np.intersect1d(x_index,y_index)\n\n xm = x[interseccion] - mx\n ym = y[interseccion] - my\n \n r_num = np.add.reduce(xm * ym)\n r_den = np.sqrt(np.sum(xm*xm) * np.sum(ym*ym))\n\n if(r_den != 0):\n r = r_num / r_den\n else:\n r = 0\n\n return r\n\ndef _similitud(u_a, v, u_a_ratings, m_ratings, user_test, u_a_similarity, type='coseno'): \n if(type == 'coseno'):\n user_similarity = []\n # Usuarios que han calificado el producto v\n for u in m_ratings[:,v].nonzero()[0]:\n # Se filtran lo usuarios que pertenecen al conjunto de test\n if u not in user_test:\n if(u_a_similarity[u] == 0):\n if(u != u_a):\n temp = m_ratings[u].toarray()[0]\n cosine = cosine_similarity([u_a_ratings], [temp])[0][0]\n user_similarity.append((u, cosine))\n u_a_similarity[u] = cosine\n else:\n user_similarity.append((u,0))\n else:\n user_similarity.append((u, u_a_similarity[u]))\n \n elif(type == 'pearson'):\n user_similarity = []\n # Usuarios que han visto el producto v\n for u in m_ratings[:,v].nonzero()[0]:\n # Se filtran lo usuarios que pertenecen al conjunto de test\n if u not in user_test:\n if(u_a_similarity[u] == 0):\n if(u != u_a):\n pearson = _pearsonr(u_a_ratings, m_ratings[u].toarray()[0])\n user_similarity.append((u, pearson))\n u_a_similarity[u] = pearson\n else:\n user_similarity.append((u,0))\n else:\n user_similarity.append((u, u_a_similarity[u]))\n \n return user_similarity\n\ndef user_mean_std(m_ratings, n_users):\n u_mean = np.zeros(n_users)\n u_std = np.zeros(n_users)\n for u in range(0, n_users):\n #aux = matrix[u].sum()/matrix[u].nnz # Promedio de ratings del usuario u \n aux = m_ratings[u][m_ratings[u].nonzero()] # Array de los ratings del usuario u\n u_mean[u] = np.mean(aux)\n u_std[u] = np.std(aux)\n return u_mean, u_std\n\ndef one_user_mean_std(u_ratings):\n u_mean = np.mean(u_ratings)\n u_std = np.std(u_ratings)\n return u_mean, u_std\n\ndef k_neighborhood(user_similarity, k):\n # lista de usuarios odenador descendentemente de acuerdo a la similitud con el u_a\n user_similarity.sort(key=lambda tup: tup[1], reverse=True)\n\n u_neighbor = []\n i = 0\n for u in user_similarity:\n u_neighbor.append(u)\n i += 1\n if(i == k):\n break\n\n return u_neighbor\n\n# Crea 5 folds de la forma [test] [train], utilizando una distribucion uniforme\n# cada lista contiene el indice de los usarios de test y de train\ndef k_fold_(matrix, n_users):\n #Se obtienen un arreglo de los usuarios ordenados por cantidad de ratings: users = (N° ratings, indice_usuario)\n users = []\n for u in range (0, n_users):\n users.append((matrix[u].nnz, u))\n users.sort()\n users = users[::-1]\n\n fold_1 = []\n fold_2 = []\n fold_3 = []\n fold_4 = []\n fold_5 = []\n \n k = 5\n # se construyen los fold de manera uniforme.\n for i, (n_ratings, u) in enumerate(users):\n if(i%k == 0):\n fold_1.append(u)\n elif(i%k == 1):\n fold_2.append(u)\n elif(i%k == 2):\n fold_3.append(u)\n elif(i%k == 3):\n fold_4.append(u)\n elif(i%k == 4):\n fold_5.append(u)\n # se crea una lista de los folds para facilitar el acceso a estos mediante un for\n folds = [fold_1, fold_2, fold_3, fold_4, fold_5]\n\n # Se crean una lista de la forma [test, train] usando cada fold como test al menos una vez\n k_folds = []\n\n for k, fold in enumerate(folds):\n train = []\n test = fold\n if(k == 0):\n for i in range(0, len(folds)):\n if (i != k):\n train = train + folds[i]\n k_folds.append([test,train])\n elif(k == 1):\n for i in range(0, len(folds)):\n if (i != k):\n train = train + folds[i]\n k_folds.append([test,train])\n elif(k == 2):\n for i in range(0, len(folds)):\n if (i != k):\n train = train + folds[i]\n k_folds.append([test,train])\n elif(k == 3):\n for i in range(0, len(folds)):\n if (i != k):\n train = train + folds[i]\n k_folds.append([test,train])\n elif(k == 4):\n for i in range(0, len(folds)):\n if (i != k):\n train = train + folds[i]\n k_folds.append([test,train]) \n\n return k_folds","sub_path":"code/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"279502937","text":"import rclpy\nfrom rclpy.node import Node\n\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nimport json\nimport os\n\nclass MinimalSubscriber(Node):\n\n def __init__(self):\n # Load json config\n #absolute_path = os.path.dirname(os.path.abspath(__file__))\n #file_path = os.path.join(absolute_path, 'config.json')\n file_path = '/home/valle/dev_ws/src/sid_pubsub/sid_pubsub/config.json'\n f =open(file_path) \n config = json.load(f)\n f.close()\n super().__init__('minimal_subscriber')\n self.subscription = self.create_subscription(\n Twist,\n config['robot_command_topic'],\n self.listener_callback,\n 10)\n self.subscription # prevent unused variable warning\n\n def listener_callback(self, msg):\n self.get_logger().info('I heard: \"%s\"' % msg)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n minimal_subscriber = MinimalSubscriber()\n\n rclpy.spin(minimal_subscriber)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n minimal_subscriber.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","sub_path":"subscriber_member_function.py","file_name":"subscriber_member_function.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"605824162","text":"# def cumprimentar(nome: str) -> str:\n# return f'Olá, {nome}'\n\n# print(cumprimentar('Liniker'))\n\n\ndef cabecalho(texto: str, alinhamento: bool = True) -> str:\n if alinhamento:\n return f\"{texto.title()}\\n {'-' * len(texto)}\"\n else:\n return f\" {texto.title()} \".center(50, \"#\")\n\n\nprint(cabecalho('Geek University'))\n\nprint(cabecalho('Geek University', alinhamento=False))\n\nprint(cabecalho('liniker', alinhamento='oliveira')) # Em python uma string é considerada True\n","sub_path":"GeekUniversity/aula04.py","file_name":"aula04.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"456161605","text":"from django.db.models import Q\n\nfrom profiles.models import Profile, InviteLead\n\n\nclass InviteHandler:\n\n @staticmethod\n def check_and_process_invite_lead(profile_uuid: str):\n \"\"\"\n Check if a profile was created on basis of a lead and mark it.\n\n If a newly registered profile was created using a mail/mobile registered in your\n invite leads, then the referrer user should be notified/gifted.\n \"\"\"\n profile = Profile.objects.get(profile_uuid)\n assert profile is not None\n\n invite_lead = InviteLead.objects.filter(\n (Q(invited_type=InviteLead.InviteMedium.EMAIL) & Q(invited_contact=profile.user.email)) |\n (Q(invited_type=InviteLead.InviteMedium.MOBILE) & Q(invited_contact=profile.mobile_number))\n ).first()\n\n if invite_lead:\n invite_lead.has_registered = True\n invite_lead.save()\n","sub_path":"profiles/controllers/invite_handler.py","file_name":"invite_handler.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"646067965","text":"\"\"\"\n.. warning::\n This module is deprecated. Use :mod:`feincms3.renderer.RegionRenderer`\n instead.\n\"\"\"\n\nimport warnings\nfrom collections import deque\nfrom functools import wraps\n\nfrom content_editor.contents import contents_for_item\nfrom django.core.cache import cache\nfrom django.utils.functional import SimpleLazyObject\nfrom django.utils.html import mark_safe\n\n\n__all__ = (\"Regions\", \"matches\", \"cached_render\")\n\n\nwarnings.warn(\n \"feincms3.regions is deprecated. Switch to feincms3.renderer.RegionRenderer now.\",\n DeprecationWarning,\n stacklevel=2,\n)\n\n\ndef cached_render(fn):\n \"\"\"\n Decorator for ``Regions.render`` methods implementing caching behavior\n \"\"\"\n\n @wraps(fn)\n def render(self, region, context=None):\n key = self.cache_key(region) if self.cache_key else None\n if key:\n result = cache.get(key)\n if result is not None:\n return result\n result = fn(self, region, context)\n if key:\n cache.set(key, result, timeout=self.timeout)\n return result\n\n return render\n\n\nclass Regions:\n \"\"\"\n ``Regions`` uses ``content_editor.contents.Contents`` and the\n ``feincms3.renderer.TemplatePluginRenderer`` to convert a list of plugins\n into a rendered representation, most often a HTML fragment.\n\n This class may also be instantiated directly but using the factory methods\n (starting with ``from_``) below is probably more comfortable.\n \"\"\"\n\n @classmethod\n def from_contents(cls, contents, *, renderer, **kwargs):\n \"\"\"\n Create and return a regions instance using the bare minimum of a\n contents instance and a renderer. Additional keyword arguments are\n forwarded to the regions constructor.\n \"\"\"\n return cls(contents=contents, renderer=renderer, **kwargs)\n\n @classmethod\n def from_item(cls, item, *, renderer, inherit_from=None, timeout=None, **kwargs):\n \"\"\"\n Create and return a regions instance for an item (for example a page,\n an article or anything else managed by django-content-editor).\n\n The item's plugins are determined by what is registered with the\n renderer. The plugin instances themselves are loaded lazily, and\n loading every time can be avoided completely by specifying a\n ``timeout``.\n \"\"\"\n if timeout is not None and \"cache_key\" not in kwargs:\n key = f\"{item._meta.label_lower}-{item.pk}\"\n kwargs[\"cache_key\"] = lambda region: f\"{key}-{region}\"\n return cls.from_contents(\n SimpleLazyObject(\n lambda: contents_for_item(\n item, renderer.plugins(), inherit_from=inherit_from\n )\n ),\n renderer=renderer,\n timeout=timeout,\n **kwargs,\n )\n\n def __init__(self, *, contents, renderer, cache_key=None, timeout=None):\n self.contents = contents\n self.renderer = renderer\n self.cache_key = cache_key\n self.timeout = timeout\n self.handlers = {\n key[7:]: getattr(self, key)\n for key in dir(self)\n if key.startswith(\"handle_\")\n }\n\n @property\n def regions(self):\n return self.contents.regions\n\n @cached_render\n def render(self, region, context=None):\n \"\"\"\n Main function for rendering.\n\n Starts the generator and assembles all fragments into a safe HTML\n string.\n \"\"\"\n return mark_safe(\"\".join(self.generate(self.contents[region], context)))\n\n def generate(self, items, context):\n \"\"\"\n Inspects all items in the region for a ``subregion`` attribute and\n passes control to the subregions' respective rendering handler, named\n ``handle_``. If ``subregion`` is not set or is falsy\n ``handle_default`` is invoked instead. This method raises a\n ``KeyError`` exception if no matching handler exists.\n\n You probably want to call this method when overriding ``render``.\n \"\"\"\n items = deque(items)\n while items:\n subregion = getattr(items[0], \"subregion\", None) or \"default\"\n yield from self.handlers[subregion](items, context)\n\n def handle_default(self, items, context):\n \"\"\"\n Renders items from the queue using the renderer instance as long as the\n items either have no ``subregion`` attribute or whose ``subregion``\n attribute is an empty string.\n \"\"\"\n while True:\n yield self.renderer.render_plugin(items.popleft(), context)\n if not items or not matches(items[0], subregions={None, \"\"}):\n break\n\n\ndef matches(item, *, plugins=None, subregions=None):\n \"\"\"\n Checks whether the item matches zero or more constraints.\n\n ``plugins`` should be a tuple of plugin classes or ``None`` if the type\n shouldn't be checked.\n\n ``subregions`` should be set of allowed ``subregion`` attribute values or\n ``None`` if the ``subregion`` attribute shouldn't be checked at all.\n Include ``None`` in the set if you want ``matches`` to succeed also when\n encountering an item without a ``subregion`` attribute.\n \"\"\"\n if plugins is not None and not isinstance(item, plugins):\n return False\n if subregions is not None and getattr(item, \"subregion\", None) not in subregions:\n return False\n return True\n","sub_path":"feincms3/regions.py","file_name":"regions.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"349799004","text":"import argparse\nimport scipy\nimport os\nimport numpy as np\nimport json\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom scipy import ndimage\nfrom tqdm import tqdm\nfrom math import ceil\nfrom glob import glob\nfrom PIL import Image\nimport dataloaders\nimport models\nfrom utils.helpers import colorize_mask\nimport time\n\nimport pycuda.autoinit\nimport numpy as np\nimport pycuda.driver as cuda\nimport tensorrt as trt\n##################################################\nclass HostDeviceMem(object):\n def __init__(self, host_mem, device_mem):\n \"\"\"Within this context, host_mom means the cpu memory and device means the GPU memory\n \"\"\"\n self.host = host_mem\n self.device = device_mem\n\n def __str__(self):\n return \"Host:\\n\" + str(self.host) + \"\\nDevice:\\n\" + str(self.device)\n\n def __repr__(self):\n return self.__str__()\n\n\ndef do_inference(context, bindings, inputs, outputs, stream, batch_size=1):\n # Transfer data from CPU to the GPU.\n [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]\n # Run inference.\n context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)\n # Transfer predictions back from the GPU.\n [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]\n # Synchronize the stream\n stream.synchronize()\n # Return only the host outputs.\n return [out.host for out in outputs]\n\ndef postprocess_the_outputs(h_outputs, shape_of_output):\n h_outputs = h_outputs.reshape(*shape_of_output)\n return h_outputs\n\ndef allocate_buffers(engine):\n inputs = []\n outputs = []\n bindings = []\n stream = cuda.Stream()\n for binding in engine:\n size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size\n dtype = trt.nptype(engine.get_binding_dtype(binding))\n print(binding)\n print(size)\n print(dtype)\n # Allocate host and device buffers\n host_mem = cuda.pagelocked_empty(size, dtype)\n device_mem = cuda.mem_alloc(host_mem.nbytes)\n # Append the device buffer to device bindings.\n bindings.append(int(device_mem))\n # Append to the appropriate list.\n if engine.binding_is_input(binding):\n inputs.append(HostDeviceMem(host_mem, device_mem))\n else:\n outputs.append(HostDeviceMem(host_mem, device_mem))\n return inputs, outputs, bindings, stream\n\n\ndef to_numpy(tensor):\n return tensor.detach().cpu().numpy().astype(np.float32) if tensor.requires_grad else tensor.cpu().numpy().astype(np.float32)\n######################################################\n\ndef pad_image(img, target_size):\n rows_to_pad = max(target_size[0] - img.shape[2], 0)\n cols_to_pad = max(target_size[1] - img.shape[3], 0)\n padded_img = F.pad(img, (0, cols_to_pad, 0, rows_to_pad), \"constant\", 0)\n return padded_img\n\ndef sliding_predict(model, image, num_classes, flip=True):\n image_size = image.shape\n tile_size = (int(image_size[2]//2.5), int(image_size[3]//2.5))\n overlap = 1/3\n\n stride = ceil(tile_size[0] * (1 - overlap))\n \n num_rows = int(ceil((image_size[2] - tile_size[0]) / stride) + 1)\n num_cols = int(ceil((image_size[3] - tile_size[1]) / stride) + 1)\n total_predictions = np.zeros((num_classes, image_size[2], image_size[3]))\n count_predictions = np.zeros((image_size[2], image_size[3]))\n tile_counter = 0\n\n for row in range(num_rows):\n for col in range(num_cols):\n x_min, y_min = int(col * stride), int(row * stride)\n x_max = min(x_min + tile_size[1], image_size[3])\n y_max = min(y_min + tile_size[0], image_size[2])\n\n img = image[:, :, y_min:y_max, x_min:x_max]\n padded_img = pad_image(img, tile_size)\n tile_counter += 1\n padded_prediction = model(padded_img)\n if flip:\n fliped_img = padded_img.flip(-1)\n fliped_predictions = model(padded_img.flip(-1))\n padded_prediction = 0.5 * (fliped_predictions.flip(-1) + padded_prediction)\n predictions = padded_prediction[:, :, :img.shape[2], :img.shape[3]]\n count_predictions[y_min:y_max, x_min:x_max] += 1\n total_predictions[:, y_min:y_max, x_min:x_max] += predictions.data.cpu().numpy().squeeze(0)\n\n total_predictions /= count_predictions\n return total_predictions\n\n\ndef multi_scale_predict(model, image, scales, num_classes, device, flip=False):\n input_size = (image.size(2), image.size(3))\n upsample = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)\n total_predictions = np.zeros((num_classes, image.size(2), image.size(3)))\n\n image = image.data.data.cpu().numpy()\n for scale in scales:\n scaled_img = ndimage.zoom(image, (1.0, 1.0, float(scale), float(scale)), order=1, prefilter=False)\n scaled_img = torch.from_numpy(scaled_img).to(device)\n scaled_prediction = upsample(model(scaled_img).cpu())\n\n if flip:\n fliped_img = scaled_img.flip(-1).to(device)\n fliped_predictions = upsample(model(fliped_img).cpu())\n scaled_prediction = 0.5 * (fliped_predictions.flip(-1) + scaled_prediction)\n total_predictions += scaled_prediction.data.cpu().numpy().squeeze(0)\n\n total_predictions /= len(scales)\n return total_predictions\n\n\ndef save_images(image, GT, mask, output_path, image_file, palette, original_size):\n\t# Saves the image, the model output and the results after the post processing\n zero_pad = 256 * 3 - len(palette)\n for i in range(zero_pad):\n palette.append(0)\n\n w, h = image.size\n\n if original_size:\n w, h =original_size\n\n image_file = os.path.basename(image_file).split('.')[0]\n colorized_mask = colorize_mask(mask, palette)\n GT = GT.convert('P')\n GT.putpalette(palette)\n\n\n if image.size != original_size:\n image = image.resize(size=original_size, resample=Image.BILINEAR)\n if colorized_mask.size != original_size:\n colorized_mask = colorized_mask.resize(size=original_size, resample=Image.NEAREST)\n if GT != original_size:\n GT = GT.resize(size=original_size, resample=Image.NEAREST)\n\n blend = Image.blend(image, colorized_mask.convert('RGB'), 0.5)\n\n colorized_mask.save(os.path.join(output_path, image_file+'.png'))\n output_im = Image.new('RGB', (w*4, h))\n output_im.paste(image, (0,0))\n output_im.paste(GT, (w,0))\n output_im.paste(colorized_mask, (w*2,0))\n output_im.paste(blend, (w*3,0))\n output_im.save(os.path.join(output_path, image_file+'_colorized.png'))\n # mask_img = Image.fromarray(mask, 'L')\n # mask_img.save(os.path.join(output_path, image_file+'.png'))\n\ndef my_pixel_accuracy(output, target):\n output = np.asarray(output)\n target = np.asarray(target)\n pixel_labeled = np.sum(target >= 0)\n pixel_correct = np.sum((output == target) * (target >= 0))\n assert pixel_correct <= pixel_labeled, \"Correct area should be smaller than Labeled\"\n return pixel_correct, pixel_labeled\ndef class_pixel_accuracy(output, target, cls):\n output = np.asarray(output)\n target = np.asarray(target)\n pixel_labeled = np.sum(target == cls)\n pixel_correct = np.sum((output == target) * (target == cls))\n assert pixel_correct <= pixel_labeled, \"Correct area should be smaller than Labeled\"\n pix_acc = (pixel_correct/pixel_labeled) if pixel_labeled!=0 else 1\n return pixel_correct, pixel_labeled, pix_acc\ndef inter_over_union(output, target, num_class):\n output = np.asarray(output) + 1\n target = np.asarray(target) + 1\n output = output * (target > 0)\n\n intersection = output * (output == target)\n area_inter, _ = np.histogram(intersection, bins=num_class, range=(1, num_class))\n area_pred, _ = np.histogram(output, bins=num_class, range=(1, num_class))\n area_lab, _ = np.histogram(target, bins=num_class, range=(1, num_class))\n area_union = area_pred + area_lab - area_inter\n assert (area_inter <= area_union).all(), \"Intersection area should be smaller than Union area\"\n iou = np.divide(area_inter,area_union,out=np.ones(area_inter.shape,dtype=float),where=area_union!=0)\n return area_inter, area_union, iou\n\n\ndef main():\n args = parse_arguments()\n config = json.load(open(args.config))\n\n # Dataset used for training the model\n dataset_type = config['train_loader']['type']\n loader = getattr(dataloaders, config['train_loader']['type'])(**config['train_loader']['args'])\n to_tensor = transforms.ToTensor()\n #normalize = transforms.Normalize(loader.MEAN, loader.STD)\n num_classes = loader.dataset.num_classes\n palette = loader.dataset.palette\n base_size = loader.dataset.base_size\n\n print(config['arch']['type'])\n # Model\n model = getattr(models, config['arch']['type'])(num_classes, **config['arch']['args'])\n availble_gpus = list(range(torch.cuda.device_count()))\n device = torch.device('cuda:0' if len(availble_gpus) > 0 else 'cpu')\n\n checkpoint = torch.load(args.model)\n if isinstance(checkpoint, dict) and 'state_dict' in checkpoint.keys():\n checkpoint = checkpoint['state_dict']\n if 'module' in list(checkpoint.keys())[0] and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n model.load_state_dict(checkpoint)\n model.to(device)\n model.eval()\n\n if not os.path.exists('outputs'):\n os.makedirs('outputs')\n\n image_files = sorted(glob(os.path.join(args.images, f'*.{args.extension}')))\n\n # with open(\"fp16_model.engine\", 'rb') as f, trt.Runtime(trt.Logger(trt.Logger.WARNING)) as runtime, torch.no_grad():\n\n # engine = runtime.deserialize_cuda_engine(f.read())\n # inputs, outputs, bindings, stream = allocate_buffers(engine) # input, output: host # bindings\n # shape_of_output = (1, num_classes, 128, 128)\n\n # with engine.create_execution_context() as context:\n\n # tbar = tqdm(image_files, ncols=100)\n # total_image=0\n # total_pixel_correct=0\n # total_pixel_labeled=0\n # cls_total_IOU = np.zeros(num_classes)\n # cls_total_pix_acc = np.zeros(num_classes)\n # cls_total_pix_correct=np.zeros(num_classes)\n # cls_total_pix_labeled=np.zeros(num_classes)\n # Total_Inference_Time=0\n\n # for img_file in tbar:\n # total_image += 1 \n # image = Image.open(img_file).convert('RGB')\n # original_size=image.size\n\n # image_name = os.path.basename(img_file)\n # target=Image.open(\"/home/ubuntu/TM2/mask/\"+image_name)\n\n # if base_size:\n # image = image.resize(size=(base_size, base_size), resample=Image.BILINEAR)\n # target = target.resize(size=(base_size, base_size), resample=Image.NEAREST)\n \n # ticks = time.time()\n # input = to_tensor(image).unsqueeze(0)\n # trt_input_image = to_numpy(input)\n # inputs[0].host = trt_input_image.reshape(-1) \n # trt_outputs = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)\n # trt_feat = postprocess_the_outputs(trt_outputs[0], shape_of_output)\n # trt_prediction = F.interpolate(torch.from_numpy(trt_feat), size=(512,512), mode='bilinear', align_corners=True) \n # trt_prediction = trt_prediction.squeeze(0).cpu().numpy()\n # trt_prediction = F.softmax(torch.from_numpy(trt_prediction), dim=0).argmax(0).cpu().numpy()\n\n # Total_Inference_Time += time.time()-ticks\n\n # ####################################\n # _,_,iou = inter_over_union(trt_prediction, target, num_classes)\n # cls_total_IOU = cls_total_IOU + iou\n\n # pixel_correct, pixel_labeled=my_pixel_accuracy(trt_prediction,target)\n # total_pixel_correct+=pixel_correct\n # total_pixel_labeled+=pixel_labeled\n # for i in range(num_classes):\n # cls_pix_correct, cls_pix_labeled, acc=class_pixel_accuracy(trt_prediction,target,i)\n # cls_total_pix_correct[i]+=cls_pix_correct\n # cls_total_pix_labeled[i]+=cls_pix_labeled\n # cls_total_pix_acc[i]+=acc\n\n # save_images(image, target, trt_prediction, args.output, img_file, palette, original_size)\n\n # print(\"time used: {}\".format(Total_Inference_Time))\n # print(\"pix acc: {}\".format(total_pixel_correct/total_pixel_labeled))\n # print(\"class pix acc: {}\".format(cls_total_pix_correct/cls_total_pix_labeled))\n # print(\"avg class IOU: {}\".format(cls_total_IOU/total_image))\n # print(\"avg class pix_acc: {}\".format(cls_total_pix_acc/total_image))\n \n with torch.no_grad():\n tbar = tqdm(image_files, ncols=100)\n total_image=0\n total_pixel_correct=0\n total_pixel_labeled=0\n cls_total_IOU = np.zeros(num_classes)\n cls_total_pix_acc = np.zeros(num_classes)\n cls_total_pix_correct=np.zeros(num_classes)\n cls_total_pix_labeled=np.zeros(num_classes)\n Total_Inference_Time=0\n\n for img_file in tbar:\n total_image += 1 \n image = Image.open(img_file).convert('RGB')\n original_size=image.size\n\n image_name = os.path.basename(img_file)\n target=Image.open(\"/home/ubuntu/TM2/mask/\"+image_name)\n\n if base_size:\n image = image.resize(size=(base_size, base_size), resample=Image.BILINEAR)\n target = target.resize(size=(base_size, base_size), resample=Image.NEAREST)\n\n #input = normalize(to_tensor(image)).unsqueeze(0)\n ticks = time.time()\n input = to_tensor(image).unsqueeze(0)\n\n if args.mode == 'multiscale':\n prediction = multi_scale_predict(model, input, scales, num_classes, device)\n elif args.mode == 'sliding':\n prediction = sliding_predict(model, input, num_classes)\n else:\n prediction = model(input.to(device))\n if config['arch']['type'][:2] == 'IC':\n prediction = prediction[0]\n elif config['arch']['type'][-3:] == 'OCR':\n prediction = prediction[0]\n elif 'Nearest' in config['arch']['type']:\n prediction = prediction[0]\n elif 'Inference' in config['arch']['type']:\n prediction = F.interpolate(prediction, size=(512,512), mode='bilinear', align_corners=True) \n elif config['arch']['type'][:3] == 'Enc':\n prediction = prediction[0]\n elif config['arch']['type'][:5] == 'DANet':\n prediction = prediction[0]\n prediction = prediction.squeeze(0).cpu().numpy()\n\n prediction = F.softmax(torch.from_numpy(prediction), dim=0).argmax(0).cpu().numpy()\n\n Total_Inference_Time += time.time()-ticks\n\n _,_,iou = inter_over_union(prediction, target, num_classes)\n cls_total_IOU = cls_total_IOU + iou\n\n\n\n pixel_correct, pixel_labeled=my_pixel_accuracy(prediction,target)\n total_pixel_correct+=pixel_correct\n total_pixel_labeled+=pixel_labeled\n for i in range(num_classes):\n cls_pix_correct, cls_pix_labeled, acc=class_pixel_accuracy(prediction,target,i)\n cls_total_pix_correct[i]+=cls_pix_correct\n cls_total_pix_labeled[i]+=cls_pix_labeled\n cls_total_pix_acc[i]+=acc\n\n save_images(image, target, prediction, args.output, img_file, palette, original_size)\n\n print(\"time used: {}\".format(Total_Inference_Time))\n print(\"pix acc: {}\".format(total_pixel_correct/total_pixel_labeled))\n print(\"class pix acc: {}\".format(cls_total_pix_correct/cls_total_pix_labeled))\n print(\"avg class IOU: {}\".format(cls_total_IOU/total_image))\n print(\"avg class pix_acc: {}\".format(cls_total_pix_acc/total_image))\n\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='Inference')\n parser.add_argument('-c', '--config', default='VOC',type=str,\n help='The config used to train the model')\n parser.add_argument('-mo', '--mode', default='normal', type=str,\n help='Mode used for prediction: either [multiscale, sliding]')\n parser.add_argument('-m', '--model', default='model_weights.pth', type=str,\n help='Path to the .pth model checkpoint to be used in the prediction')\n parser.add_argument('-i', '--images', default=None, type=str,\n help='Path to the images to be segmented')\n parser.add_argument('-o', '--output', default='outputs', type=str, \n help='Output Path')\n parser.add_argument('-e', '--extension', default='png', type=str,\n help='The extension of the images to be segmented')\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n main()\n","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":17368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"423143205","text":"# coding: utf-8\nimport datetime as dt\n\nimport pytest\nimport mock\nimport freezegun\nimport testfixtures\n\nimport mailtank\nfrom rsstank import app, send_feeds\nfrom rsstank.models import db, AccessKey, Feed, FeedItem\nfrom . import TestCase, fixtures\n\n\ndef get_first_send_interval_as_datetimes(utc_now=None):\n utc_start_time, utc_end_time = \\\n app.config['RSSTANK_DEFAULT_FIRST_SEND_INTERVAL']\n if not utc_now:\n utc_now = dt.datetime.utcnow()\n utc_today = utc_now.date()\n utc_interval_start = dt.datetime.combine(utc_today, utc_start_time)\n utc_interval_end = dt.datetime.combine(utc_today, utc_end_time)\n return utc_interval_start, utc_interval_end\n\n\nclass TestSendFeeds(TestCase):\n \"\"\"Тесты внутренностей ./manage.py send_feeds.\"\"\"\n\n def setup_method(self, method):\n TestCase.setup_method(self, method)\n self.access_key = AccessKey(content='123', is_enabled=True,\n namespace='test')\n self.disabled_access_key = AccessKey(content='456', is_enabled=False,\n namespace='test2')\n db.session.add(self.access_key)\n db.session.add(self.disabled_access_key)\n db.session.commit()\n\n def test_feed_item_to_context_entry(self):\n feed_item = fixtures.create_feed_item(seed=1)\n\n expected_context_entry = {\n 'category': u'Category 1',\n 'link': u'http://66.ru/1.rss',\n 'description': u'Description 1',\n 'title': u'Title 1',\n 'author': u'Author 1',\n 'guid': u'1',\n 'pub_date': '2013-11-20 12:00:00',\n 'comments': u'http://66.ru/comments/1/',\n 'enclosure': {\n 'length': '',\n 'type': '',\n 'url': '',\n },\n 'source': {\n 'content': '',\n 'url': '',\n },\n }\n assert feed_item.to_context_entry() == expected_context_entry\n\n feed_item.enclosure_url = 'http://66.ru/logo.png'\n feed_item.enclosure_type = 'image/png'\n expected_context_entry['enclosure'] = {\n 'url': feed_item.enclosure_url,\n 'type': feed_item.enclosure_type,\n 'length': '',\n }\n assert feed_item.to_context_entry() == expected_context_entry\n\n feed_item.source_url = 'http://www.nytimes.com/2013/09/12/opinion/putin.html'\n expected_context_entry['source'] = {\n 'url': feed_item.source_url,\n 'content': '',\n }\n assert feed_item.to_context_entry() == expected_context_entry\n\n def test_feed_is_it_time_to_send_1(self):\n \"\"\"Тестирует `Feed.is_it_time_to_send` ни разу не посланного фида.\"\"\"\n feed = fixtures.create_feed('http://example.com/example.rss', self.access_key)\n assert not feed.last_sent_at\n\n utc_interval_start, utc_interval_end = get_first_send_interval_as_datetimes()\n # Середина сегодняшнего интервала, в который можно впервые посылать фиды\n utc_interval_median = \\\n utc_interval_start + (utc_interval_end - utc_interval_start) / 2\n\n with freezegun.freeze_time(utc_interval_median):\n assert feed.is_it_time_to_send()\n\n with freezegun.freeze_time(utc_interval_end + dt.timedelta(hours=1)):\n assert not feed.is_it_time_to_send()\n\n def test_feed_is_it_time_to_send_2(self):\n \"\"\"Тестирует `Feed.is_it_time_to_send` фида, посланного ранее.\"\"\"\n feed = fixtures.create_feed('http://example.com/example.rss', self.access_key)\n feed.last_sent_at = dt.datetime.utcnow().replace(microsecond=0)\n feed.sending_interval = 60 * 60 * 24\n\n with freezegun.freeze_time(feed.last_sent_at + dt.timedelta(hours=23)):\n assert not feed.is_it_time_to_send()\n\n with freezegun.freeze_time(feed.last_sent_at + dt.timedelta(hours=23, seconds=59)):\n assert not feed.is_it_time_to_send()\n\n with freezegun.freeze_time(feed.last_sent_at + dt.timedelta(hours=24)):\n assert feed.is_it_time_to_send()\n\n def test_feed_are_there_items_to_send(self):\n feed = fixtures.create_feed('http://example.com/example.rss', self.access_key)\n feed.access_key = self.access_key\n feed.last_sent_at = dt.datetime.utcnow() - dt.timedelta(days=3)\n db.session.add(feed)\n\n assert not feed.are_there_items_to_send()\n\n for i in range(10):\n feed_item = fixtures.create_feed_item(i)\n feed_item.created_at = dt.datetime.utcnow() - dt.timedelta(days=i)\n feed.items.append(feed_item)\n db.session.commit()\n\n assert feed.are_there_items_to_send()\n\n feed.last_sent_at = dt.datetime.utcnow() + dt.timedelta(days=1)\n assert not feed.are_there_items_to_send()\n\n def test_send_feed_boundary_cases(self):\n feed = fixtures.create_feed('http://example.com/example-1.rss', self.access_key)\n feed.access_key = self.access_key\n db.session.add(feed)\n db.session.commit()\n\n # Притворяемся глупыми и зовёт send_feed с фидом, у которого нет\n # новых элементов. Ожидаем споткнуться о проверку:\n with pytest.raises(AssertionError):\n send_feeds.send_feed(feed)\n\n # Добавляем в него элементов\n for i in range(1, 3):\n feed_item = fixtures.create_feed_item(i)\n feed_item.created_at = dt.datetime.utcnow() + dt.timedelta(days=i)\n feed.items.append(feed_item)\n db.session.commit()\n\n class MailtankErrorStub(mailtank.MailtankError):\n def __init__(self, code, message):\n self.code = code\n self.message = message\n\n # Делаем вид, что Mailtank API вернул 503\n mailtank_error_stub = MailtankErrorStub(503, 'Whoops')\n with mock.patch('mailtank.Mailtank.create_mailing',\n autospec=True, side_effect=mailtank_error_stub):\n with testfixtures.LogCapture() as l:\n send_feeds.send_feed(feed)\n\n # Проверяем, что происшествие отражено в логах\n log_record = l.records[-1]\n assert log_record.levelname == 'WARNING'\n log_message = log_record.getMessage()\n assert repr(mailtank_error_stub) in log_message\n assert repr(feed) in log_message\n\n def test_unique_items_in_mailing(self):\n feed = fixtures.create_feed('http://example.com/example-1.rss', self.access_key)\n feed.sending_interval = 60 * 60 * 24\n feed.access_key = self.access_key\n\n # Создаем два элемента с разными `pub_date` и одинаковыми `guid`\n item1 = fixtures.create_feed_item(1)\n item2 = fixtures.create_feed_item(1)\n item3 = fixtures.create_feed_item(3)\n\n item2.pub_date = dt.datetime(2013, 11, 21, 12, 00, 00) - dt.timedelta(days=2)\n feed.items.extend([item2, item1, item3])\n db.session.add(feed)\n db.session.commit()\n\n # Проверяем, что в рассылке не будет дублирующихся элементов фида\n with mock.patch('mailtank.Mailtank.create_mailing',\n autospec=True) as create_mailing_mock:\n send_feeds.send_feed(feed)\n\n call, args = create_mailing_mock.call_args\n context = args['context']\n assert len(context['items']) == 2\n assert context['items'][1]['pub_date'] == \\\n item1.pub_date.strftime('%Y-%m-%d %H:%M:%S')\n\n def test_context_contains_channel_data(self):\n feed = fixtures.create_feed('http://example.com/example-1.rss', self.access_key)\n item = fixtures.create_feed_item(1)\n feed.items.append(item)\n db.session.add(feed)\n db.session.commit()\n\n with mock.patch('mailtank.Mailtank.create_mailing',\n autospec=True) as create_mailing_mock:\n send_feeds.send_feed(feed)\n\n call, args = create_mailing_mock.call_args\n context = args['context']\n assert context['channel']['link'] == feed.channel_link\n assert context['channel']['description'] == feed.channel_description\n assert context['channel']['title'] == feed.channel_title\n assert context['channel']['image_url'] == feed.channel_image_url\n\n def test_main(self):\n # Создаём фид номер раз\n feed_1 = fixtures.create_feed('http://example.com/example-1.rss', self.access_key)\n feed_1.sending_interval = 60 * 60 * 24\n feed_1.access_key = self.access_key\n db.session.add(feed_1)\n\n # Добавляем в него элементы датированные от \"сегодня минус 9 дней\" до\n # \"вчера\"\n for i in range(1, 10):\n feed_item = fixtures.create_feed_item(i)\n feed_item.created_at = dt.datetime.utcnow() - dt.timedelta(days=i)\n feed_1.items.append(feed_item)\n db.session.commit()\n\n # Создаём фид номер два\n feed_2 = fixtures.create_feed('http://example.com/example-2.rss', self.access_key)\n feed_2.sending_interval = 60 * 60 * 24\n feed_2.access_key = self.access_key\n db.session.add(feed_2)\n\n # Добавляем в него элементы датированные от \"сегодня минус 3 дня\" до\n # \"вчера\"\n for i in range(1, 3):\n feed_item = fixtures.create_feed_item(i)\n feed_item.created_at = dt.datetime.utcnow() - dt.timedelta(days=i)\n feed_2.items.append(feed_item)\n db.session.commit()\n\n # Случай номер 1\n # ==============\n # Заявляем, что в последний раз посылали первый фид три дня назад\n feed_1.last_sent_at = dt.datetime.utcnow() - dt.timedelta(days=4)\n\n # Замораживаем время где-нибудь в будущем, но точно вне интервала,\n # допускающего посылку вида впервые\n _, utc_interval_end = get_first_send_interval_as_datetimes(\n utc_now=dt.datetime.utcnow() + dt.timedelta(days=1))\n freezed_utc_now = utc_interval_end + dt.timedelta(seconds=1)\n with freezegun.freeze_time(freezed_utc_now):\n with mock.patch('mailtank.Mailtank.create_mailing',\n autospec=True) as create_mailing_mock:\n send_feeds.main()\n\n # Проверяем, что create_mailing позвался однажды\n assert create_mailing_mock.call_count == 1\n\n # С верным контекстом и целью\n _, kwargs = create_mailing_mock.call_args\n\n context = kwargs['context']\n assert len(context['items']) == 4\n\n target = kwargs['target']\n target_tags = target['tags']\n assert len(target_tags) == 1\n assert target_tags[0] == feed_1.tag\n\n # Проверяем, что вызов команды обновил `last_sent_at` фида\n assert feed_1.last_sent_at == freezed_utc_now\n\n # Случай номер 2\n # ==============\n # Запускаем команду в это же время во второй раз. Ничего не должно произойти\n with mock.patch('mailtank.Mailtank.create_mailing',\n autospec=True) as create_mailing_mock:\n with freezegun.freeze_time(freezed_utc_now):\n send_feeds.main()\n assert not create_mailing_mock.called\n\n # Случай номер 3\n # ==============\n # (*) Добавляем новые элементы в первый фид, датируя их будущим\n for i in range(10, 15):\n feed_item = fixtures.create_feed_item(i)\n feed_item.created_at = feed_1.last_sent_at + dt.timedelta(hours=i)\n feed_1.items.append(feed_item)\n db.session.commit()\n\n # И посылаем рассылку гарантированно по истечению `sending_interval` первого\n # фида, притом так, чтобы текущее время попало в интервал, допускающий\n # посылку фидов впервые (что должно вызвать посылку второго фида)\n freezed_utc_now = feed_1.last_sent_at + dt.timedelta(\n days=1, seconds=feed_1.sending_interval)\n _, freezed_utc_now = get_first_send_interval_as_datetimes(\n utc_now=freezed_utc_now)\n\n with freezegun.freeze_time(freezed_utc_now):\n with mock.patch('mailtank.Mailtank.create_mailing',\n autospec=True) as create_mailing_mock:\n send_feeds.main()\n\n # Проверяем, что создались _две_ рассылки (для первого фида\n # это вторая рассылка, для второго -- первая)\n assert create_mailing_mock.call_count == 2\n\n contexts_by_target_tag = {}\n for _, kwargs in create_mailing_mock.call_args_list:\n tag = kwargs['target']['tags'][0]\n context = kwargs['context']\n contexts_by_target_tag[tag] = context\n\n # На _два_ различных тега\n assert set(contexts_by_target_tag.keys()) == {feed_1.tag, feed_2.tag}\n # Проверяем, что из первого фида послались элементы,\n # добавленные после последней его посылки (см. (*)), ...\n len(contexts_by_target_tag[feed_1.tag]['items']) == 4\n # ...а второй фид послался впервые, захватив все два своих элемента.\n len(contexts_by_target_tag[feed_2.tag]['items']) == 2\n\n assert feed_1.last_sent_at == freezed_utc_now\n assert feed_2.last_sent_at == freezed_utc_now\n","sub_path":"tests/test_send_feeds.py","file_name":"test_send_feeds.py","file_ext":"py","file_size_in_byte":14402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"184992404","text":"from django.contrib import admin\nfrom .models import (\n Post\n)\n# Register your models here.\n\n\nclass PostAdmin(admin.ModelAdmin):\n search_fields = ['title_es', 'title_en']\n list_display = [ 'short_title_es', 'short_description_es', 'datePublished', 'id','draft']\n # list_display = ['name', 'experience_es']\n\n\n fieldsets = [\n ('Post general Info ', {'fields': ['draft',\n 'datePublished',\n 'thumb_img']}),\n ('Post Content', {'fields': [\n ('title_es',\n 'title_en'),\n ('description_es',\n 'description_en'),\n 'content_es',\n 'content_en',\n 'tags_es',\n 'tags_en']}),\n ]\n\nadmin.site.register(Post,PostAdmin)\n","sub_path":"gaia/gaia/blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"581924005","text":"import sys\n\nif(len(sys.argv) < 2):\n print(\"Please enter an input file.\")\n sys.exit()\n\nname = \"\"\nvalues = []\n\nwith open(sys.argv[1], \"r\") as f:\n content = f.readlines()\ncontent = [x.strip() for x in content]\n\nif(len(content) < 2):\n print(\"File is in incorrect format.\")\n print(\"Must be at least 2 lines:\\nOne for the name,\")\n print(\"And one for at least one enumeration.\")\n sys.exit()\n\nname = content[0]\nvalues = content[1:]\n\nwith open(name + \".h\", \"w\") as f:\n f.write(\"extern const char* {}_NAMES[];\\n\".format(name.upper()))\n f.write(\"typedef enum {\\n\")\n for value in values:\n f.write(\" {}_{},\\n\".format(name.upper(), value))\n f.seek(-2, 1)\n f.truncate()\n f.write(\"\\n{} {};\".format(\"}\", name.upper()))\n\nwith open(name + \".c\", \"w\") as f:\n f.write(\"const char* {}_NAMES[] = {{\\n\".format(name.upper()))\n for value in values:\n f.write(\" \\\"{}\\\",\\n\".format(value))\n f.seek(-2, 1)\n f.truncate()\n f.write(\"\\n};\")\n","sub_path":"enum.py","file_name":"enum.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"331078411","text":"#!/usr/bin/env python\n\nimport os\nimport pexpect\nfrom data_processing import DataProcess\nimport re\n\n\nclass StoreData(DataProcess):\n \"\"\"\n Loads the data files from local file system hadoop distributed file system\n \"\"\"\n\n def __init__(self, input_path, output_path, backup_path):\n super(StoreData, self).__init__(input_path, output_path, backup_path)\n\n def store_data(self):\n \"\"\"\n stores the data from local file system to hadoop file system.\n :return:\n \"\"\"\n hdfs_cmd = \"hdfs dfs\"\n self.logger.info(\"Starting to load the data from {} to hdfs {}\".format(self.input_path, self.output_path))\n files = self.check_path(self.input_path)\n if files:\n status = self.create_backup_path()\n assert status, \"Error: could not create backup folder\"\n for file in files:\n self.logger.info(\"------------file = {}\".format(file))\n season = re.search(r'.*_(.*)\\..*', file).group(1)\n hdfs_path = os.path.join(self.output_path, season)\n mkdir = '{} -mkdir -p {}'.format(hdfs_cmd, hdfs_path)\n file_path = os.path.join(self.input_path, file)\n put_cmd = '{} -put {} {}'.format(hdfs_cmd, file_path, hdfs_path)\n pexpect.run(mkdir)\n self.wait_timeout(2)\n self.logger.info(\"Copying {} to hdfs {}\".format(file_path, hdfs_path))\n pexpect.run(put_cmd)\n self.logger.info(\"Copied....\")\n status = self.take_backup(file_path)\n if not status:\n self.logger.info(\"Error: could not backup the file {}\".format(file_path))\n self.wait_timeout(6)\n else:\n self.logger.info(\"There are no files to copy to hdfs. Checking after 10secs\")\n self.wait_timeout(10)\n\n\nclass TestStoreData(object):\n \"\"\"\n unit tests for StoreData class\n \"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.input_path = '/home/hduser/hadoop/app/Bigdata/transfer_out'\n cls.backup_path = '/home/hduser/hadoop/app/Bigdata/stored_backup'\n cls.output_path = '/user/hduser/app/data'\n cls.sd = StoreData(cls.input_path, cls.output_path, cls.backup_path)\n\n def test_stored(self):\n \"\"\"\n unittest to verify the functionality of stored process\n :return:\n \"\"\"\n self.sd.store_data()\n files = self.sd.check_path(self.sd.backup_path)\n assert files, \"The copying of files to hdfs failed\"\n\n","sub_path":"data_analysis/stored.py","file_name":"stored.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"468605637","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 04 09:46:58 2019\r\n\r\n@author: admin\r\n\"\"\"\r\nimport spellchecker\r\n#from spellchecker import spellchecker\r\ndir(spellchecker)\r\nformat(spellchecker)\r\n\"\"\r\n\r\nspell= spellchecker()\r\n#sugestion = spellchecker()\r\nfiles = [\"come\",\"bads\",\"recive\",\"geverment\"]\r\nfor word in file:\r\n print ('{spell.correction(word)}')\r\n print ('{{spell.candidates(word)}')\r\n #print (f'{word}:{spell.suggestion(word)}')","sub_path":"ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"351909738","text":"#girilen dereceye göre odanın durumunu belirtir.\n\ndef oda_sicakligi(girilen):\n if girilen < 22:\n print(\"Soğuk Odalar..!\")\n elif (girilen >=22) and (girilen <=25):\n print(\"Oda ideal Sicaklikta!!\")\n else:\n print(\"Oda Cok Sicak!!!\")\n\noda_derece = input(\"Oda sicakligini giriniz:\")\noda_derece = float(oda_derece)\noda_sicakligi(oda_derece)\n","sub_path":"11-oda_sicakligi.py","file_name":"11-oda_sicakligi.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"309627122","text":"\"\"\" transforms.py (By: Charley Zhang, July 2020)\nDefine transform objects that allow for deterministic transforms.\n\nConfig Dict Specification:\n\n\"\"\"\n\nimport random\nimport numbers\nimport collections\nfrom PIL import Image, ImageFilter\nimport numpy as np\n\nimport torch\nimport torchvision.transforms as TF\nimport torchvision.transforms.functional as TFF\n\nfrom lib.utils import images\n\n\nclass GeneralTransform:\n \"\"\" Transform object capable of handling images, and masks.\n Makes some assumptions for simplicity:\n 1 - .transform() takes PILs and .reverse() takes tensors\n 2 - same spatial and img_type transforms are done for X and Y\n \"\"\"\n SUPPORTED_TRANSFORMS = set(['resize', 'togray', 'hflip', 'vflip', \n 'bcsh.jitter', 'gamma', 'totensor', 'topil', 'normmeanstd', \n 'gaussianblur', 'crop', 'rtss.affine'])\n LABEL_TRANSFORMS = set(['resize', 'hflip', 'vflip', 'totensor', 'topil', \n 'crop', 'rtss.affine'])\n INTERPOLATIONS = { 0: Image.NEAREST, 2: Image.BILINEAR, 3: Image.BICUBIC }\n\n def __init__(self, transform_list):\n assert isinstance(transform_list, collections.Sequence)\n self.transforms_cfg = transform_list\n self.transforms_det = None # deterministic transforms to be populated\n\n def transform(self, im, label=False, shake=False, token=False):\n \"\"\" Transform an image. \n Parameters\n img - PIL object to transform\n shake - if True, shakes up random values for transforms\n token - used in reverse() to reverse the transformations\n Returns\n [transformed_img, reverse_token]\n \"\"\"\n assert isPIL(im), f\"Got type {type(im)}.\"\n if shake or not self.transforms_det:\n self._shake()\n \n tokens = []\n for tname, settings in self.transforms_det:\n if label and tname not in GeneralTransform.LABEL_TRANSFORMS:\n continue\n im, tok = self._transform_im(\n im, tname, settings, reverse=False, label=label\n )\n if tok:\n tokens.append(tok)\n if token:\n return im, tokens\n return im\n\n def reverse(self, im, tokens, only_unnorm=True):\n # assert isinstance(im, torch.Tensor), f\"Got type {type(im)}.\"\n if only_unnorm:\n t_names = list(zip(*tokens))[0]\n if 'normmeanstd' in t_names:\n settings = tokens[t_names.index('normmeanstd')][1]\n im, _ = self._transform_im(\n im, 'normmeanstd', settings, reverse=True)\n if 'topil' in t_names:\n im, _ = self._transform_im(im, 'topil', True, reverse=True)\n return im\n for tname, settings in reversed(tokens):\n im = self._transform_im(im, tname, settings, reverse=True)\n return im\n\n def _transform_im(self, im, tname, settings, reverse=False, label=False):\n imsize = images.get_dimensions(im)[-2:]\n if tname == 'resize':\n out_size = settings[:2]\n it = 0 if label else 3 # 3 = bicubic, 2 = bilinear\n return resize(im, out_size, interpolation=it), ['resize', [*imsize]]\n elif tname == 'crop': # settings = h, w, ty, tx (h & w: ratio or abs)\n if 0 <= sum(settings) <= 2:\n h = int(min(imsize[0] * settings[0], imsize[0]))\n w = int(min(imsize[1] * settings[1], imsize[1]))\n else:\n h = int(min(settings[0], imsize[0]))\n w = int(min(settings[1], imsize[1]))\n ty = int(imsize[0] * settings[2])\n tx = int(imsize[1] * settings[3])\n assert 0 <= ty + h <= imsize[0] and 0 <= tx + w <= imsize[1]\n return crop(im, ty, tx, h, w), None\n elif tname == 'hflip':\n return hflip(im), ['hflip', True]\n elif tname == 'vflip':\n return vflip(im), ['vflip', True]\n elif tname == 'gamma':\n assert 0 <= settings, f\"Invalid gamma settings: {settings}\"\n return gamma(im, settings), ['gamma', 1/settings]\n elif tname == 'gaussianblur':\n return gaussian_blur(im, settings), None\n elif tname == 'bcsh.jitter':\n assert len(settings) == 4, f\"Invalid jitter settings: {settings}\"\n im = brightness(im, settings[0])\n im = contrast(im, settings[1])\n im = saturation(im, settings[2])\n im = hue(im, settings[3])\n rev = [1/v for v in settings[:3]] + [-settings[3]]\n return im, ['bcsh.jitter', rev]\n elif tname == 'rtss.affine':\n angle, translate, scale, shear = settings\n assert -1 <= translate[0] <= 1 and -1 <= translate[1] <= 1\n translate = int(translate[0]*imsize[0]), int(translate[1] * imsize[1])\n rev = [-angle, [-t for t in translate], 1/scale, [-s for s in shear]]\n return affine(im, int(angle), translate, scale, shear), \\\n ['rtss.affine', rev]\n elif tname == 'togray':\n return togray(im, out_channels=1), None\n elif tname == 'totensor':\n return totensor(im), ['topil', True]\n elif tname == 'topil':\n return topil(im), ['totensor', True]\n elif tname == 'normmeanstd':\n assert len(settings) == 2, f\"means, stds not lists: {settings}\"\n im = normalize(im, *settings, reverse=reverse)\n return im, ['normmeanstd', settings]\n else:\n raise ValueError(f\"Transform name '{tname}' is not support.\")\n\n def _shake(self):\n \"\"\" Serves 2 main functions:\n (1) Sets self.transforms_det w/new random transforms based on cfg probs\n (2) Weeds out non-factors so that all entries are executed\n \"\"\"\n transforms_det = []\n for tname, settings in self.transforms_cfg:\n assert tname in GeneralTransform.SUPPORTED_TRANSFORMS\n if not settings:\n continue\n if tname in ('togray', 'totensor', 'topil'):\n transforms_det.append([tname, settings])\n elif tname == 'resize':\n if isinstance(settings, numbers.Number):\n transforms_det.append([tname, [settings, settings]])\n else:\n assert len(settings) == 2, f\"Given: {tname}, {settings}\"\n transforms_det.append([tname, settings])\n elif tname == 'crop':\n h_w_ty_tx = self._get_cropvals(settings)\n transforms_det.append([tname, h_w_ty_tx])\n elif tname == 'normmeanstd':\n assert len(settings) == 2, f\"Given: {tname}, {settings}\"\n transforms_det.append([tname, settings])\n elif tname == 'hflip' or tname == 'vflip':\n assert 0 <= settings <= 1.\n if random.uniform(0., 1.) <= settings:\n transforms_det.append([tname, True]) \n elif tname == 'gamma':\n g = _uniform_sample(settings, center=1, bounds=(0,float('inf')))\n transforms_det.append([tname, g])\n elif tname == 'gaussianblur':\n if isinstance(settings, numbers.Number):\n settings = (settings, settings)\n assert len(settings) == 2 and settings[0] <= settings[1]\n rad = round(_uniform_sample(settings, bounds=(0,float('inf'))))\n transforms_det.append([tname, rad])\n elif tname == 'bcsh.jitter':\n assert len(settings) == 4, f\"Given: {tname}, {settings}\"\n jittervals = self._get_jittervals(*settings)\n transforms_det.append([tname, jittervals])\n elif tname == 'rtss.affine':\n affinevals = self._get_affinevals(*settings)\n transforms_det.append([tname, affinevals]) \n self.transforms_det = transforms_det\n\n def _get_cropvals(self, settings):\n if isinstance(settings, numbers.Number):\n settings = [settings, settings] # either (ratioh, ratiow) or (h, w)\n assert len(settings) == 2, f\"Settings ({settings}) is not len 2.\"\n\n hwratios = []\n for v in settings:\n if isinstance(v, numbers.Number):\n assert 0 < v <= 1 and 0 < v <= 1\n hwratios.append(v)\n elif isinstance(v, collections.Sequence):\n assert len(v) == 2, f\"Invalid crop h, w settings: {settings}.\"\n hwratios.append(_uniform_sample(v))\n else:\n raise ValueError(f\"Gave invalid settings {settings} for crop.\")\n ty = random.uniform(0, 1. - hwratios[0])\n tx = random.uniform(0, 1. - hwratios[1])\n assert ty + hwratios[0] <= 1 and tx + hwratios[1] <= 1\n return hwratios[0], hwratios[1], ty, tx\n\n def _get_jittervals(self, brightness, contrast, saturation, hue):\n jittervals = []\n for v in (brightness, contrast, saturation):\n jittervals.append(_uniform_sample(\n v, center=1, bounds=(.001, float('inf')))\n )\n jittervals.append(_uniform_sample(hue, center=0, bounds=(-0.5, 0.5)))\n return jittervals\n\n def _get_affinevals(self, rot, translate, scale, shear):\n \"\"\"\n Parameters\n rot (num or seq) - num >= 0\n translate (num or seq) - \n \"\"\"\n if isinstance(rot, numbers.Number):\n assert rot >= 0\n rot = (-rot, rot)\n assert len(rot) == 2\n degree = _uniform_sample(rot, bounds=(-180, 180))\n\n translations = []\n if isinstance(translate, numbers.Number):\n translate = (translate, translate) # translate for x, y\n for trans in translate:\n if isinstance(trans, numbers.Number):\n trans = (-trans, trans)\n assert 2 == sum([-1 <= t <= 1 for t in trans])\n translations.append(_uniform_sample(trans, bounds=(-1,1)))\n \n if isinstance(scale, numbers.Number):\n if not scale:\n scale = 1\n assert scale > 0\n else:\n assert len(scale) == 2\n scale = _uniform_sample(scale, bounds=(0,float('inf')))\n \n shears = []\n if isinstance(shear, numbers.Number):\n shear = (shear, shear)\n for s in shear:\n if isinstance(s, numbers.Number):\n s = (-s, s)\n assert len(s) == 2\n shears.append(_uniform_sample(s, bounds=(-180,180)))\n\n return [degree, translations, scale, shears]\n\n\n### Tranform Helpers\n\ndef _uniform_sample(val, center=1, bounds=(0, float('inf'))):\n \"\"\" Sample from uniform distributions with parameters given.\n Parameters\n val (list or number) - list indicates range, val represents offset\n center (number) - only used if val is number\n bounds (list: 2 numbers) - indicates min & max bound to sample from\n \"\"\"\n if not val: # is None, 0, or an empty sequence\n return center\n elif isinstance(val, collections.Sequence):\n assert len(val) == 2, f\"Value seq not valid ({v}).\"\n assert val[0] <= val[1], f\"Req: v0 <= v1. Got: ({val[0]}, {val[1]}).\"\n return random.uniform(max(bounds[0], val[0]), min(bounds[1], val[1]))\n else:\n assert isinstance(val, numbers.Number), f\"Val ({val}) has invalid type.\"\n return random.uniform(\n max(bounds[0], center - val), min(bounds[1], center + val)\n ) \n\n\n### ======================================================================== ###\n### * ### * ### * ### * Main Raster ImageTransforms * ### * ### * ### * ###\n### ======================================================================== ###\n\ndef isPIL(obj):\n return True if 'PIL' in str(obj.__class__) else False\n\ndef resize(pil, out_size, interpolation=2):\n assert isPIL(pil), f\"Got type {type(pil)}.\"\n return TFF.resize(pil, out_size, interpolation=interpolation)\n\ndef crop(pil, tl_y, tl_x, h, w):\n assert isPIL(pil), f\"Got type {type(pil)}.\"\n return TFF.crop(pil, tl_y, tl_x, h, w)\n\ndef vflip(im):\n assert isPIL(im) or isinstance(im, torch.Tensor), f\"Got type {type(im)}.\"\n return TFF.vflip(im) # returns PIL\n\ndef hflip(im):\n assert isPIL(im) or isinstance(im, torch.Tensor), f\"Got type {type(im)}.\"\n return TFF.hflip(im) # returns PIL\n\ndef affine(pil, angle, translate, scale, shear):\n assert isPIL(pil), f\"Got type {type(pil)}.\"\n return TFF.affine(pil, angle, translate, scale, shear)\n\ndef rotate(pil, angle):\n assert isPIL(pil), f\"Got type {type(pil)}.\"\n return TFF.rotate(pil, angle)\n\ndef affine(im, angle, translate, scale, shear):\n assert isPIL(im), f\"Got type {type(im)}.\"\n assert -180 <= angle <= 180, f\"Invalid angle: {angle}\"\n return TFF.affine(im, angle, translate, scale, shear)\n\ndef totensor(im):\n assert isPIL(im) or isinstance(np.ndarray), f\"Got type {type(im)}.\"\n return TFF.to_tensor(im)\n\ndef topil(im):\n if isinstance(im, torch.Tensor):\n if im.shape[0] == 1 or im.shape[0] == 3:\n # im = im.permute(1,2,0) # im is still float32\n pass\n elif isinstance(im, np.ndarray):\n if im.shape[0] == 1 or im.shape[0] == 3:\n im = np.moveaxis(im, 0, -1)\n if np.max(im) > 1:\n im = im.astype(np.uint8)\n else:\n print(f\"WARNING: np image is full of floats.\")\n else:\n raise ValueError(f\"Expects np.array or tensor, got {type(im)}.\")\n # mode = 'gray' if im.shape[-1] == 1 else 'RGB'\n pil = TFF.to_pil_image(im, mode=None)\n return pil\n\ndef togray(pil, out_channels):\n assert isPIL(pil), f\"Expected PIL, got type {type(pil)}.\"\n return TFF.to_grayscale(pil, num_output_channels=out_channels)\n\ndef normalize(tens, mean, std, reverse=False):\n assert isinstance(tens, torch.Tensor), f\"Got type {type(tens)}.\"\n if reverse:\n meant = torch.tensor(mean)\n stdt = torch.tensor(std)\n C = tens.shape[0]\n assert meant.shape[0] == stdt.shape[0] == C\n return tens * stdt.view(C, 1, 1) + meant.view(C, 1, 1)\n return TFF.normalize(tens, mean, std)\n\ndef gaussian_blur(pil, radius):\n assert isPIL(pil), f\"Expected PIL, got type {type(pil)}.\"\n return pil.filter(ImageFilter.GaussianBlur(radius=radius))\n\n## Color Jitter\ndef brightness(im, factor):\n if factor == 1:\n return im\n assert isPIL(im) or isinstance(torch.Tensor), f\"Got type {type(im)}.\"\n return TFF.adjust_brightness(im, factor)\n\ndef contrast(im, factor):\n if factor == 1:\n return im\n assert isPIL(im) or isinstance(torch.Tensor), f\"Got type {type(im)}.\"\n return TFF.adjust_contrast(im, factor)\n\ndef hue(im, factor):\n if factor == 0:\n return im\n assert isPIL(im) or isinstance(torch.Tensor), f\"Got type {type(im)}.\"\n return TFF.adjust_hue(im, factor)\n\ndef saturation(im, factor):\n if factor == 1:\n return im\n assert isPIL(im) or isinstance(torch.Tensor), f\"Got type {type(im)}.\"\n return TFF.adjust_saturation(im, factor)\n\ndef gamma(pil, gamma, gain=1):\n if gamma == 1 and gain == 1:\n return pil\n assert isPIL(pil), f\"Got type {type(im)}.\"\n return TFF.adjust_gamma(pil, gamma, gain=gain)\n\n\n\n### ======================================================================== ###\n### * ### * ### * ### * Main Coordinate Transforms * ### * ### * ### * ###\n### ======================================================================== ###\n\nclass CoordinateTransform(GeneralTransform):\n \"\"\" Transform object capable of handling images, masks, and coordinates.\n Makes some assumptions for simplicity:\n 1 - .transform() takes PILs and .reverse() takes tensors\n 2 - same spatial and img_type transforms are done for X and Y\n \"\"\"\n COORD_TRANSFORMS = set(['resize', 'hflip', 'vflip', 'crop', 'rtss.affine'])\n\n def __init__(self, transform_list):\n super(CoordinateTransform, self).__init__(transform_list)\n\n def transform_coord(self, im, shake=False, token=False):\n pass\n\n\ndef resize_coord():\n pass\n\ndef crop_coord():\n pass\n\ndef flip_coord():\n pass\n\ndef affine_coord():\n pass\n","sub_path":"src/lib/data/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":16193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"549295226","text":"import socket\nimport cv2\nimport numpy\n\naddress=('10.177.59.38',8002)\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.connect(address)\ndef recvall(sock,count):\n buf=b''\n while count:\n newbuf=sock.recv(count)\n if not newbuf: return None\n buf+= newbuf\n count-= len(newbuf)\n return buf\n\nwhile 1:\n length=recvall(s,16)\n stringData=recvall(s,int(length))\n data=numpy.fromstring(stringData,dtype='uint8')\n decimg=cv2.imdecode(data,1)\n cv2.imshow('client',decimg)\n if cv2.waitKey(10)==27:\n break\ns.close()\ncv2.destroyAllWindows()\n","sub_path":"client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"122988989","text":"# 6.* Реализовать структуру данных «Товары».\n# Она должна представлять собой список кортежей.\n# Каждый кортеж хранит информацию об отдельном товаре.\n# В кортеже должно быть два элемента — номер товара и словарь с параметрами\n# (характеристиками товара: название, цена, количество, единица измерения).\n# Структуру нужно сформировать программно, т.е. запрашивать все данные у пользователя.\n# Пример готовой структуры:\n# [\n# (1, {“название”: “компьютер”, “цена”: 20000, “количество”: 5, “eд”: “шт.”}),\n# (2, {“название”: “принтер”, “цена”: 6000, “количество”: 2, “eд”: “шт.”}),\n# (3, {“название��: “сканер”, “цена”: 2000, “количество”: 7, “eд”: “шт.”})\n# ]\n# Необходимо собрать аналитику о товарах.\n# Реализовать словарь, в котором каждый ключ — характеристика товара,\n# например название, а значение — список значений-характеристик,\n# например список названий товаров.\n# Пример:\n# {\n# “название”: [“компьютер”, “принтер”, “сканер”],\n# “цена”: [20000, 6000, 2000],\n# “количество”: [5, 2, 7],\n# “ед”: [“шт.”]\n# }\n\nlist_num = int(input(\"Введите количество позиций: \"))\n# создание массива\ng_list = []\n\n# цикл добавления товаров\ni = 1\nwhile i <= list_num:\n # ввод данных\n g_name = input(\"Введите название: \")\n g_price = input(\"Введите цену: \")\n g_count = input(\"Введите количество: \")\n g_type = input(\"Введите ед. измерения: \")\n # запись в словарь\n add_arr = {\"Название\": g_name, \"Цена\": g_price, \"Количество\": g_count, \"Ед\": g_type}\n # создание кортежа\n g_cort = (i, add_arr)\n # добавление в массив\n g_list.append(g_cort)\n i += 1\nprint(g_list)\n\n# аналитика по товарам\n\nout_name = set()\nout_price = set()\nout_count = set()\nout_type = set()\n\na = 0\nwhile a < len(g_list):\n out_name.add(g_list[a][1][\"Название\"])\n out_price.add(g_list[a][1][\"Цена\"])\n out_count.add(g_list[a][1][\"Количество\"])\n out_type.add(g_list[a][1][\"Ед\"])\n a += 1\n\nstats = {\n \"Название\": list(out_name),\n \"Цена\": list(out_price),\n \"Количество\": list(out_count),\n \"Ед\": list(out_type)\n}\n\nprint(stats)\n","sub_path":"lesson2/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"510193761","text":"#!/python\nimport sys,os,copy\nfrom datetime import *\nimport json,re\nimport csv\nfrom decimal import Decimal\nimport progressbar\n\ndef add2date(idate,years=0,months=0,days=0):\n day = idate.day - 1 + days\n year = idate.year + years\n month = idate.month + months\n dyear,month = divmod(month,12)\n if month == 0:\n dyear -= 1\n month = 12\n rdate = datetime(year + dyear,month,1).date() + timedelta(days = day)\n return rdate\n\ndef loan_short(ibeg,iend):\n return (((iend - ibeg).days <= 365) or ((iend - ibeg).days == 366 and iend.day == ibeg.day))\n\ndef str2date(idate,fmt=None):\n idate = idate.strip()\n if not fmt is None:\n idate = datetime.strptime(idate,fmt)\n else:\n if idate == '':\n return None\n if idate.lower() in ['today','now']:\n return datetime.today().date()\n psplit = idate.find(' ')\n if psplit > 0:\n idate = idate[:psplit]\n psplit = idate.find('T')\n if psplit > 0:\n idate = idate[:psplit]\n try:\n year,month,day = map(int,idate.split('-'))\n except:\n try:\n year,month,day = map(int,idate.split('.'))\n except:\n try:\n year,month,day = map(int,idate.split('/'))\n except:\n return None\n if type(idate) is str:\n if year > 1700:\n idate = datetime(year,month,day).date()\n elif day < 100 and year < 100:\n idate = datetime(day + 2000,month,year).date()\n elif day > 1700:\n idate = datetime(day,month,year).date()\n return idate\n\ndef str2dec(idec):\n idec = idec.replace(\"'\",'').replace('\"','').replace(' ','')\n if idec == '' or idec.lower() == 'null':\n idec = 0\n else:\n if idec.find(',') >= 0:\n if idec.find('.') >= 0:\n idec = idec.replace(',','')\n else:\n if idec.count(',') > 1:\n idec = idec.replace(',','')\n else:\n idec = idec.replace(',','.')\n try:\n idec = Decimal(idec)\n except:\n idec = None\n return idec\n\ndef str2bool(ibool):\n ibool = ibool.replace(\"'\",'').replace('\"','').replace(' ','')\n ibool = ibool.lower()\n return (ibool in ['true','yes','1'])\n\ndef config(fname):\n if os.path.exists(fname):\n with open(fname,'r',encoding=detect_by_bom(fname)) as f:\n try:\n cfg = json.load(f)\n except IOError:\n print (\"Не могу прочитать файл конфигурации: \",fname)\n return None\n else:\n print(\"Файл конфигурации не найден: \",fname)\n return None\n return cfg\n\n\ndef months(d2,d1):\n if d2 > d1:\n d1,d2 = d2,d1\n return (d1.year - d2.year) * 12 + d1.month - d2.month\n\ndef years(d2,d1):\n if d2 > d1:\n d1,d2 = d2,d1\n return d1.year - d2.year\n\ndef days(d2,d1,days30=False):\n if d2 > d1:\n d1,d2 = d2,d1\n from datetime import timedelta\n if days30:\n rez = 0\n for d in (d1 - timedelta(n) for n in range(int((d1 - d2).days + 1))):\n rez += 1\n if ((d - d2).days != 0) and (d.day == 1):\n rez = rez + 30 - int((d - timedelta(days=1)).day)\n return rez - 1\n else:\n return (d1 - d2).days\n \ndef prep_txt(txt):\n txt = txt.replace('\\r','')\n txt = txt.replace(' ',' ')\n txt = txt.replace(' ',' ')\n txt = txt.replace(' ',' ')\n txt = txt.replace(' ',' ')\n txt = txt.replace(' ',' ')\n txt = txt.replace('\\n ','\\n')\n txt = txt.replace(' \\n','\\n')\n txt = txt.replace('` ','`')\n txt = txt.replace(' `','`')\n return txt\n\ndef printb(*objects,sep=' ',end='\\n',file=sys.stdout):\n enc = file.encoding\n if enc == 'UTF-8':\n print(*objects,sep=sep,end=end,file=file)\n else:\n f = lambda obj: str(obj).encode(enc,errors='backslashreplace').decode(enc)\n print(*map(f,objects),sep=sep,end=end,file=file)\n\ndef install(package):\n \"\"\" Install packages \"\"\"\n x = __file__.split('.')\n if len(x) > 1:\n if x[len(x) - 1] != 'py': return\n import subprocess\n rez = subprocess.Popen([sys.executable, \"-m\", \"pip\", \"install\", package],stdout = subprocess.PIPE,universal_newlines = True)\n for line in rez.stdout:\n if line.strip().startswith('Requirement already satisfied'):\n pass\n else:\n sys.stdout.write(line)\n\ndef connect(params):\n \"\"\" Connect to database server \"\"\"\n conn = None\n prm = {'type':'','host':'','database':'','jdbc_class':'','jdbc_path':'','url':'','user':'','password':'','driver':''}\n for i in range(len(prm)):\n try:\n prm[list(prm.keys())[i]] = params[list(prm.keys())[i]]\n except:\n prm[list(prm.keys())[i]] = ''\n if prm['type'] == 'pgsql':\n conn = connect_pgsql(prm['host'],prm['database'],prm['user'],prm['password'])\n elif prm['type'] == 'jdbc':\n conn = connect_jdbc(prm['jdbc_class'],prm['url'],prm['user'],prm['password'],prm['jdbc_path'])\n elif prm['type'] == 'odbc':\n conn = connect_odbc(prm['driver'])\n return conn\n\ndef connect_pgsql(host,base,user,pasw):\n \"\"\" Connect to the PostgreSQL database server \"\"\"\n install('psycopg2')\n import psycopg2\n conn = None\n try:\n conn = psycopg2.connect(host=host,database=base,user=user,password=pasw)\n except (Exception,RuntimeError) as error:\n print('Unable to connect!\\n')\n print(error)\n sys.exit(1)\n return conn\n\ndef connect_jdbc(jdbc_class,url,user,pwd,jdbc_path):\n \"\"\" Connect to the JDBC database server \"\"\"\n install('jaydebeapi')\n import jaydebeapi as jdbc\n conn = None\n try:\n conn = jdbc.connect(jdbc_class,[url,user,pwd],jdbc_path)\n conn.jconn.setAutoCommit(True)\n except (Exception,RuntimeError) as error:\n print('Unable to connect!\\n')\n print(error)\n sys.exit(1)\n return conn\n\ndef connect_odbc(drv):\n \"\"\" Connect to the ODBC database server \"\"\"\n install('pyodbc')\n import pyodbc\n conn = None\n d = ''\n driver = drv.split(';',1)\n try:\n d = re.search('{(.+?)}',driver[0]).group(1)\n except AttributeError:\n print('No driver info!')\n sys.exit(1)\n try:\n for x in pyodbc.drivers():\n if d in x:\n drv = x\n break\n conn = pyodbc.connect('DRIVER={' + drv + '};' + driver[1],autocommit=True)\n except (Exception,RuntimeError) as error:\n print('Unable to connect!\\n')\n print(error)\n sys.exit(1)\n return conn\n\ndef query(conn,xquery):\n \"\"\" Query data from table \"\"\"\n cur = conn.cursor()\n try:\n cur.execute(xquery)\n except (Exception,RuntimeError) as error:\n print(error)\n return cur\n\ndef insert(conn,tab,fld,val):\n \"\"\" Insert data into table \"\"\"\n try:\n cur = conn.cursor()\n sql = \"INSERT INTO \" + tab + \"(\" + fld + \") VALUES(\" + val + \");\"\n cur.execute(sql)\n cur.close()\n except (Exception,RuntimeError) as error:\n print(error)\n\ndef txt2dict(fname,maps,dates,dateformat,decimals,bools,skip,quotechar,delimiter):\n if fname == '':\n return []\n objects = []\n if os.path.exists(fname):\n with open(fname,'r',encoding=detect_by_bom(fname)) as f:\n try:\n _csv = csv.reader(f,delimiter=delimiter,quotechar=quotechar)\n except IOError:\n print (\"Ош��бка чтения файла: \",fname)\n sys.exit(1)\n row_count = sum(1 for row in _csv)\n f.seek(0)\n cols = next(_csv,None)\n for c in cols:\n c = prep_txt(c).strip()\n with progressbar.ProgressBar(max_value=row_count) as bar:\n for k,line in enumerate(_csv):\n obj = {}\n cod = ''\n for i,key in enumerate(cols):\n if key in maps.keys():\n cod = maps[key]\n else:\n continue\n val = prep_txt(line[i]).strip()\n if val in skip:\n continue\n if cod in dates:\n try:\n val = datetime.strptime(val,dateformat)\n except:\n val = str2date(val)\n elif cod in bools:\n val = str2bool(val)\n elif cod in decimals:\n val = str2dec(val)\n obj.update({cod:val})\n objects.append(obj)\n bar.update(k)\n else:\n print(\"Файл не найден: \",fname)\n sys.exit(1)\n return objects\n\ndef x_str(val,dateformat):\n if type(val) is datetime or type(val) is date:\n rez = val.strftime(dateformat)\n elif not type(val) is str:\n rez = str(val)\n else:\n rez = val\n return rez\n\ndef cbs_fill(template,data,dateformat):\n if isinstance(template,list):\n for elem in template:\n cbs_fill(elem,data,dateformat)\n else:\n if isinstance(template,dict):\n for field in template:\n if (isinstance(template[field],list) and len(template[field]) > 0 and (isinstance(template[field][0],list) or isinstance(template[field][0],dict))) or isinstance(template[field],dict):\n cbs_fill(template[field],data,dateformat)\n else:\n if not (field in ['code','value','values','rows','rowId']):\n if field in data.keys():\n template[field] = _str(data[field],dateformat)\n else:\n if field == 'code':\n if any(k in template.keys() for k in ('values','value','rowId','rows')):\n if template[field] in data.keys():\n if 'value' in template.keys():\n template['value'] = _str(data[template[field]],dateformat)\n elif 'rowId' in template.keys():\n template['rowId'] = _str(data[template[field]],dateformat)\n else:\n template[field] = data[field]\n\ndef cbs_group(obj,name,template,data,dateformat):\n exist = False\n prev = copy.deepcopy(template)\n cbs_fill(template,data,dateformat)\n if prev != template:\n for x in obj['groups']:\n if x['code'] == name:\n x['rows'].append(template[0])\n exist = True\n if not exist:\n obj['groups'].append({'code':name,'rows':template})\n\ndef cbs_nullify_callback(key,container):\n if not (key in ['code','value','values','rows','rowId']):\n if container[key] == '' or container[key] == 'None' or container[key] is None:\n del container[key]\n else:\n if key == 'code':\n if any(k in container.keys() for k in ('values','value','rowId','rows')):\n if 'value' in container.keys() and (container['value'] == '' or container['value'] == 'None'):\n del container[key]\n del container['value']\n if 'rowId' in container.keys() and container['rowId'] == '':\n del container[key]\n del container['rowId']\n\ndef nullify(container,callback=None,delete=False):\n for key in list(container):\n if isinstance(container,list) and not isinstance(key,(list,dict)):\n continue\n if isinstance(key,(list,dict)):\n nullify(key,callback=callback,delete=delete)\n elif key in container.keys() and isinstance(container[key],(dict,list)):\n nullify(container[key],callback=callback,delete=delete)\n elif callback is None:\n if container[key] == '' or container[key] == 'None' or container[key] is None:\n if delete:\n container.pop(key,None)\n else:\n container[key] = None\n else:\n callback(key,container)\n\ndef clean_empty(container):\n if not isinstance(container,(dict,list)):\n return container\n if isinstance(container,list):\n return [v for v in (clean_empty(v) for v in container) if v]\n return {k: v for k,v in ((k,clean_empty(v)) for k,v in container.items()) if v}\n\ndef cbs_clear(container):\n nullify(container,callback=cbs_nullify_callback,delete=True)\n return clean_empty(container)\n\ndef detect_by_bom(path,default='utf-8'):\n import codecs\n with open(path, 'rb') as f:\n raw = f.read(4)\n for enc,boms in ('utf-8-sig',(codecs.BOM_UTF8,)),('utf-16',(codecs.BOM_UTF16_LE,codecs.BOM_UTF16_BE)),('utf-32',(codecs.BOM_UTF32_LE,codecs.BOM_UTF32_BE)):\n if any(raw.startswith(bom) for bom in boms):\n return enc\n return default\n\ndef str_meta_comp(s1,s2,lang='ru',type='names'):\n import jellyfish\n from metaphone import doublemetaphone\n met1 = doublemetaphone(trans_lit(s1,lang,type))\n met2 = doublemetaphone(trans_lit(s2,lang,type))\n m = 0\n for m1 in met1:\n if m1 == '': continue\n for m2 in met2:\n if m2 == '': continue\n m = max(jellyfish.jaro_distance(m1,m2),m)\n return m\n\ndef trans_lit(txt,lang='ru',type='names'):\n if lang == 'ru':\n if type == 'names':\n abc = 'абвгдеёзийклмнопрстуфыэ'\n trn = 'abvgdeeziiklmnoprstufye'\n txt = txt.lower().translate(''.maketrans(abc,trn))\n txt = txt.replace('ж','zh').replace('х','kh').replace('ц','ts').replace('ч','ch').replace('ш','sh').replace('щ','shch').replace('ю','iu').replace('я','ia').replace('ъ','ie').replace('ь','')\n return txt\n\ndef nps_1c(nps):\n if len(nps) > 4:\n return (nps[0:4] + '.' + nps[4:])\n return nps\n","sub_path":"cbs-exp-1c/xfuncs.py","file_name":"xfuncs.py","file_ext":"py","file_size_in_byte":14210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"479495108","text":"##############################################################################\n#\n# HZ\n# Copyright (C) 2017.\n#\n##############################################################################\n\nfrom odoo.osv import expression\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, AccessError, ValidationError\nimport odoo.addons.decimal_precision as dp\nimport pdb\n\nclass SaleBonusConfig(models.Model):\n _name = 'sale.bonus.config'\n\n sale_amount = fields.Float(string='Sale Amount', required=True)\n bonus_trans = fields.Float(string='Bonus Transaksi (%)')\n bonus_referal = fields.Float(string='Bonus Referal (%)')\n bonus_sponsor = fields.Float(string='Bonus Sponsor (Rp.)')\n bonus_gen = fields.Float(string='Bonus Gen (%)')\n bonus_level = fields.Selection([('1', 'Level 1'), ('2', 'Level 2'), ('3', 'Level 3')], default='1', required=True)\n\nclass SaleLead(models.Model):\n _name = 'sale.lead'\n _rec_name = 'customer_name'\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n\n\n customer_name = fields.Char(string='Customer Name', required=True)\n company_name = fields.Char(string='Company Name')\n street = fields.Char(string='Alamat')\n bidang = fields.Char(string='Bidang')\n phone = fields.Char(string='Phone')\n email = fields.Char(string='Email')\n partner_id = fields.Many2one('res.partner', string='Partner')\n project_estimation = fields.Float(string='Project Estimation Amount')\n followup_date = fields.Datetime(string='Tanggal Ekspektasi Difollowup', required=True, default=fields.Datetime.now)\n notes = fields.Text(string='Notes')\n salesman_id = fields.Many2one('res.users', string='Salesman')\n referal_id = fields.Many2one('res.users', string='Referal', default=lambda self: self.env.user.id)\n order_id = fields.Many2one('sale.order', string='SO Number')\n state = fields.Selection([('draft', 'Draft'), ('waiting', 'Waiting Salesman'),('open', 'Follow UP'), ('done', 'Goal'), ('lose', 'Lose')], string='State', default='draft')\n\n \n @api.multi\n def accept_order(self):\n self.write({'salesman_id': self.env.user.id})\n\n @api.multi\n def confirm(self):\n self.write({'state': 'waiting'})\n\n @api.multi\n def convert_to_quotation(self):\n partner_id = self.env['res.partner'].create({\n 'name': self.customer_name,\n 'customer': True,\n 'phone': self.phone,\n 'email': self.email,\n 'street': self.street\n })\n so_id = self.env['sale.order'].create({'partner_id': partner_id.id, 'referal_id': self.referal_id.id})\n\n self.write({'state': 'done'})\n\n return {\n 'name': _('Quotation'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': so_id.id,\n 'res_model': 'sale.order',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n }\n\n @api.multi\n def convert_to_lose(self):\n self.write({'state': 'lose'})\n\nclass SalesOrder(models.Model):\n _inherit = 'sale.order'\n\n referal_id = fields.Many2one('res.users', string='Referal')\n\nclass ResUsers(models.Model):\n _inherit = 'res.users'\n\n @api.multi\n def _compute_total_bonus(self):\n for self in self:\n bonus_referal = 0\n bonus_sponsor = 0\n bonus_transaksi = 0\n bonus_gen = 0\n sale_ids = []\n # COMPUTE BONUS SPONSOR & TRANSAKSI\n for x in self.sales_ids:\n bonus_config = self.env['sale.bonus.config'].search([('sale_amount', '<=', x.amount_total), ('bonus_level', '=', self.env.user.bonus_level)], order=\"sale_amount desc\", limit=1)\n if not bonus_config:\n continue\n if x.state not in ('draft', 'cancel'):\n bonus_sponsor += bonus_config.bonus_sponsor\n bonus_transaksi += x.amount_total * (bonus_config.bonus_trans / 100)\n\n # COMPUTE BONUS REFERAL \n # SELECT DISTINCT PARTNER COZ BONUS APPLY JUST ONLY ONCE PER PARTNER\n self.env.cr.execute(\"\"\"SELECT DISTINCT ON(partner_id) partner_id, id FROM sale_order WHERE invoice_status = 'invoiced' AND referal_id = %s \"\"\" %(self.id,))\n sale_data = self.env.cr.dictfetchall()\n for ids in sale_data:\n sale_ids.append(ids['id'])\n \n orders = self.env['sale.order'].search([('id', 'in', sale_ids)])\n for x in orders:\n bonus_config = self.env['sale.bonus.config'].search([('sale_amount', '<=', x.amount_total), ('bonus_level', '=', self.env.user.bonus_level)], order=\"sale_amount desc\", limit=1)\n if not bonus_config:\n continue\n if x.state not in ('draft', 'cancel'):\n bonus_referal += x.amount_total * (bonus_config.bonus_referal / 100)\n\n # COMPUTE BONUS GEN\n for y in self.child_ids:\n for x in sale_ids:\n bonus_config = self.env['sale.bonus.config'].search([('sale_amount', '<=', x.amount_total), ('bonus_level', '=', self.env.user.bonus_level)], order=\"sale_amount desc\", limit=1)\n if not bonus_config:\n continue\n if x.state not in ('draft', 'cancel'):\n bonus_gen += x.amount_total * (bonus_config.bonus_gen / 100)\n\n self.bonus_sponsor = bonus_sponsor\n self.bonus_transaksi = bonus_transaksi\n self.bonus_referal = bonus_referal\n self.bonus_gen = bonus_gen\n self.total_bonus = bonus_referal + bonus_sponsor + bonus_transaksi + bonus_gen\n\n @api.one\n def _compute_bonus_due(self):\n self.bonus_due = self.total_bonus - self.paid_bonus\n\n @api.one\n def _compute_paid_bonus(self):\n paid = 0\n payment = self.env['account.payment'].search([('partner_id', '=', self.partner_id.id), ('payment_type', '=', 'outbound'), ('state', '=', 'posted')])\n for y in payment:\n paid += y.amount\n self.paid_bonus = paid\n\n\n @api.one\n def _compute_bonus_level(self):\n if self.parent_id:\n if self.parent_id.parent_id:\n self.bonus_level = '3'\n else:\n self.bonus_level = '2'\n else:\n self.bonus_level = '1'\n\n\n parent_id = fields.Many2one('res.users', string='Parent Referal')\n child_ids = fields.One2many('res.users', 'parent_id', string='Child Referal')\n bonus_referal = fields.Float(string='Total Bonus Referal', digits=dp.get_precision('Product Price'), compute=\"_compute_total_bonus\")\n bonus_sponsor = fields.Float(string='Total Bonus Sponsor', digits=dp.get_precision('Product Price'), compute=\"_compute_total_bonus\")\n bonus_transaksi = fields.Float(string='Total Bonus Transaksi', digits=dp.get_precision('Product Price'), compute=\"_compute_total_bonus\")\n bonus_gen = fields.Float(string='Total Bonus Gen', digits=dp.get_precision('Product Price'), compute=\"_compute_total_bonus\")\n total_bonus = fields.Float(string='Total Bonus', digits=dp.get_precision('Product Price'), compute='_compute_total_bonus')\n bonus_due = fields.Float(string='Bonus Due', digits=dp.get_precision('Product Price'), compute='_compute_bonus_due')\n paid_bonus = fields.Float(string='Paid Bonus', digits=dp.get_precision('Product Price'), compute='_compute_paid_bonus')\n sales_ids = fields.One2many('sale.order', 'referal_id', domain=[('invoice_status', '=', 'invoiced')], string='Sales Order')\n parent_id = fields.Many2one('res.users', string='Parent')\n bonus_level = fields.Selection([('1', 'Level 1'), ('2', 'Level 2'), ('3', 'Level 3')], compute='_compute_bonus_level')\n","sub_path":"addons/rjl_referal/models/referal.py","file_name":"referal.py","file_ext":"py","file_size_in_byte":7771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"190164491","text":"\n\ndef mainDict():\n age = {'Peter': 5, 'John': 7}\n\n#iterate through Dictionary\n for i in age:\n print(i)\n\n#key and value\n for i in age:\n print(\"Name = {0:s}, Age = {1:d}\".format(i, age[i]))\n\n#key and value alternate\n for i,j in age.items():\n print('Name = {0:s}, Age = {1:d}'.format(i, j))\n\n\n\ndef main():\n pets = ['cats', 'dogs', 'rabbits', 'hamsters']\n\n#contents of list\n for myPets in pets:\n print(myPets)\n\n#enumerate\n for index, myPets in enumerate(pets):\n print(index, myPets)\n\n\nif __name__ == '__main__':\n main()\n mainDict()","sub_path":"Learn_Python_In_One_Day/Interactive_809.py","file_name":"Interactive_809.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"500456517","text":"import discord\nfrom oldnight import Stream\n\n# discord.py Client instance\nClient = discord.Client()\n\n# oldnight.py Stream instance\nstream = Stream()\n\n\n@Client.event\nasync def on_ready():\n \"\"\"\n Login check\n\n \"\"\"\n print('We have logged in as {0.user}'.format(Client))\n\n\nasync def run_stream():\n \"\"\"\n This will turn on the stream and deliver notifications as new comments arrives.\n\n \"\"\"\n await Client.wait_until_ready()\n\n while not Client.is_closed():\n for item in stream.get_comments(sleep=35):\n timestamp = item[0]\n claim = item[2]\n author = item[1]\n content = item[3]\n discord_ids = item[4]\n embed = discord.Embed(title=\"Old Night\",\n description=f\"Someone is talking to you:\\n{author}\\n\\n\",\n color=0xEE8700)\n embed.set_thumbnail(url=Client.user.avatar_url_as(format='png'))\n embed.add_field(name=f\"{claim}\", value=f\"{timestamp}\\n{content}\")\n if isinstance(discord_ids, list):\n for discord_id in discord_ids:\n discord_id = int(discord_id)\n target = Client.get_user(discord_id)\n await target.send(embed=embed)\n if isinstance(discord_ids, str):\n discord_ids = int(discord_ids)\n target = Client.get_user(discord_ids)\n await target.send(embed=embed)\n\n# Run background loop\nClient.loop.create_task(run_stream())\n\n\n# Run discord Client\nClient.run('tokehere')\n","sub_path":"stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"304193307","text":"#! /usr/bin/env python\n\nimport os\n\n\ndef main():\n if not os.path.exists(\"sentences.csv\"):\n os.system(\"wget http://tatoeba.org/files/downloads/sentences.csv\")\n if not os.path.exists(\"links.csv\"):\n os.system(\"wget http://tatoeba.org/files/downloads/links.csv\")\n\n sentences = {}\n links = {}\n #ghost = {}\n \n senfile = open(\"sentences.csv\")\n while True:\n line = senfile.readline()\n if line == '':\n break\n line = line.strip()\n try:\n id, lang, sen = line.split('\\t')\n except ValueError:\n pass # some asshole entered an empty sentence\n sentences[id] = line\n\n \n linkfile = open(\"links.csv\", \"r\")\n fixedlinks = open(\"data/links.csv\", \"w\")\n while True:\n line = linkfile.readline()\n if line == '':\n break\n line = line.strip()\n main_sentence, translated_sentence = line.split('\\t')\n try:\n tr = sentences[translated_sentence]\n except KeyError:\n #ghost[translated_sentence] = 0\n continue # there is a sentence that is linked but doesnt exist\n \n try:\n tr_id, tr_lang, tr_sen = tr.split('\\t')[:3]\n except:\n continue\n \n try:\n sentences[main_sentence] += '\\t' + tr_lang + ':' + tr_sen\n except KeyError:\n #ghost[main_sentence] = 0\n continue\n \n main_lang = sentences[main_sentence].split('\\t')[1]\n fixedlinks.write(\"%s\\t%s\\t%s\\t%s\\n\" % (main_lang, main_sentence, tr_lang, translated_sentence))\n \n \n\n outfiles = {}\n\n trans = total = 0\n try:\n os.mkdir(\"data/\")\n except: pass\n for k in sentences:\n lang = sentences[k].split('\\t')[1]\n if not outfiles.has_key(lang):\n outfiles[lang] = file(\"data/\" + lang + \".csv\", \"w\")\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"preparefiles.py","file_name":"preparefiles.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"100546458","text":"import pandas as pd\nfrom enum import Enum\n\n\nclass Dataset(Enum):\n breast_cancer = 1\n hepatitis = 2\n\n\nclass Preprocessing:\n \"\"\"Acquisizione, Analisi e Pulizia dati\n\n Abbiamo utilizzato i seguenti dataset:\n - Dataset 1: breast cancer wisconsin.csv (Breast Cancer dataset):\n https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)\n - Dataset 2: hepatitis.csv (Hepatitis dataset):\n http://archive.ics.uci.edu/ml/datasets/Hepatitis\n \"\"\"\n\n DATASET1 = \"../data/breast_cancer_wisconsin.csv\"\n DATASET2 = \"../data/hepatitis.csv\"\n\n def __init__(self, dataset):\n \"\"\"acquizione dati\"\"\"\n # caricare i dataset nel dataframe di pandas\n if dataset == Dataset.breast_cancer:\n self.df = pd.read_csv(Preprocessing.DATASET1, na_values=\"?\")\n elif dataset == Dataset.hepatitis:\n self.df = pd.read_csv(Preprocessing.DATASET2, na_values=\"?\")\n else:\n raise IndexError(\"Dataset enumeration between 1 and 2\")\n\n def clean(self):\n print(\"Cleaning dataset ...\")\n self.df = self._remove_duplicate_rows()\n self.df = self._remove_empty_rows()\n\n # elimina le colonne id non rilevanti\n self.df.drop(\"id\", axis=1, inplace=True, errors=\"ignore\")\n print(\"\\nDropped irrelevant features\")\n\n # imposta la colonna della classe in prima posizione\n class_col = self.df.pop(\"Class\")\n self.df.insert(0, \"Class\", class_col)\n print(\"Set class column to the first position\")\n\n self.df.reset_index(drop=True, inplace=True)\n print(\"Reset indices\")\n\n def _remove_empty_rows(self):\n \"\"\"rimuovere le righe contenenti una colonna con un valore vuoto\"\"\"\n clean_df = self.df.dropna()\n rows_removed = Preprocessing._rows_removed(self.df, clean_df)\n percentage_removed = Preprocessing._percentage_removed(rows_removed, self.df)\n print(\n \"\\nRemoved empty rows: \\nRemoved {percentage}% of rows \\n{count} rows removed\".format(\n percentage=percentage_removed, count=rows_removed\n )\n )\n return clean_df\n\n def _remove_duplicate_rows(self):\n \"\"\"rimuove le righe con tutte le funzioni duplicate\"\"\"\n clean_df = self.df.drop_duplicates()\n rows_removed = Preprocessing._rows_removed(self.df, clean_df)\n percentage_removed = Preprocessing._percentage_removed(rows_removed, self.df)\n print(\n \"\\nRemoved duplicate rows: \\nRemoved {percentage}% of rows \\n{count} rows removed\".format(\n percentage=percentage_removed, count=rows_removed\n )\n )\n return clean_df\n\n def normalize_features(self):\n class_col = self.df.pop(\"Class\")\n self.df = (self.df - self.df.min()) / (self.df.max() - self.df.min())\n self.df.insert(0, \"Class\", class_col)\n\n _rows_removed = lambda df1, df2: abs(df1.shape[0] - df2.shape[0])\n _percentage_removed = lambda removed, df: round(100 * removed / df.shape[0], 2)\n\n @staticmethod\n def get_preprocessed_datasets(normalize_features=True):\n # Carica set di dati in un preprocessore\n cancer_preprocessor = Preprocessing(Dataset.breast_cancer)\n hepatitis_preprocessor = Preprocessing(Dataset.hepatitis)\n\n # Pulizia dei dati\n print(\"Cleaning hepatitis dataset\")\n hepatitis_preprocessor.clean()\n\n print(\"\\nCleaning breat cancer dataset\")\n cancer_preprocessor.clean()\n if normalize_features:\n hepatitis_preprocessor.normalize_features()\n cancer_preprocessor.normalize_features()\n return cancer_preprocessor.df, hepatitis_preprocessor.df\n\n @staticmethod\n def get_labels_features(df):\n \"\"\"separa le etichette e le caratteristiche in un set di dati e le restituisce\"\"\"\n dframe = df.copy(deep=True)\n labels = dframe.pop(\"Class\").to_numpy()\n features = dframe.to_numpy()\n return features, labels\n","sub_path":"Esame Icon/src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"426346274","text":"from flask import Blueprint, request, jsonify, g\nfrom utils.db_utils import redis\nfrom instance import config\nfrom utils.token_utils import TokenMaker\nimport datetime\nfrom utils.db_utils import mysqlpool\nimport base64\nimport json\nfrom utils.json_helper import DateEncoder\n\nuser = Blueprint(\"user\", __name__)\n\n\n@user.route(\"/login/\", methods=[\"POST\"])\ndef login():\n \"\"\"用户登录功能\"\"\"\n json_data = request.get_json()\n username = json_data.get(\"userName\")\n password = json_data.get(\"password\")\n\n # 通过用户表获取token 相关信息 并判断账号密码是否正确\n conn = mysqlpool.get_conn()\n with conn.swich_db(config.WOWRKSHEET01) as cursor:\n return_list = conn.query_one(\n \"select * from {} where UserName=%s and Password=%s\".format(config.TABLENAME16), [username, password])\n if return_list:\n # 生成token\n token = TokenMaker().generate_token(return_list[\"id\"], datetime.datetime.now())\n date_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n ip = request.remote_addr\n conn.update(\n \"update {} set LastLoginTime=%s,loginIp=%s where UserName=%s and Password=%s\".format(\n config.TABLENAME16),\n [date_time, ip, username, password])\n else:\n return jsonify({\n \"code\": -1,\n \"data\": \"账号或密码错误,请重新输入账号密码\"})\n\n # 缓存用户信息\n redis_value = base64.b64encode(json.dumps(return_list, cls=DateEncoder).encode(\"utf-8\"))\n redis.set(token, redis_value)\n redis.expire(token, config.TOKEN_LIVE_TIME)\n\n return jsonify({\n \"code\": 1,\n \"msg\": {\"roleId\": return_list[\"RoleID\"], \"token\": token, \"userId\": return_list[\"id\"],\n \"userName\": username}\n })\n\n\n@user.route(\"/queryUserByUserName/\", methods=[\"POST\"])\ndef queryUserByUserName():\n \"\"\"登陆用户信息查询接口\"\"\"\n json_data = request.get_json()\n username = json_data[\"userName\"]\n try:\n conn = mysqlpool.get_conn()\n with conn.swich_db(config.WOWRKSHEET01) as cursor:\n # 通过 roles_users表 查询用户关系的角色ID 一个用户可能关系多个角色\n rolesid = conn.query_all(\n \"select a.roleid,b.RoleName from {tableA} as a LEFT JOIN {tableB} as b on a.roleid= b.id where a.username=%s\".format(\n tableA=config.TABLENAME19, tableB=config.TABLENAME17), [username])\n rolesid_list = []\n rolesname_list = []\n for i in rolesid:\n rolesid_list.append(i[\"roleid\"])\n rolesname_list.append(i[\"RoleName\"])\n user_msg = conn.query_one(\n \"select a.id,a.DepartmentID,a.LastLoginTime,a.RealName,a.Mobile,a.Email,a.status,a.remark,a.loginIp,b.full_name,b.short_name from {tableA} as a LEFT JOIN {tableB} as b on a.DepartmentID = b.number where a.UserName=%s\".format(\n tableA=config.TABLENAME16, tableB=config.TABLENAME21), [username])\n # 匿名函数 if null 返回 \"\"\n ft = lambda x: \"\" if not x else x\n return_dic = {}\n return_dic[\"DepartmentID\"] = user_msg[\"DepartmentID\"]\n return_dic[\"Email\"] = ft(user_msg[\"Email\"])\n return_dic[\"LastLoginTime\"] = user_msg[\"LastLoginTime\"].strftime(\"%Y-%m-%d %H:%M:%S\")\n return_dic[\"Mobile\"] = ft(user_msg[\"Mobile\"])\n return_dic[\"RealName\"] = user_msg[\"RealName\"]\n return_dic[\"RoleID\"] = rolesid_list\n return_dic[\"RoleName\"] = rolesname_list\n return_dic[\"UserName\"] = username\n return_dic[\"full_name\"] = user_msg[\"full_name\"]\n return_dic[\"id\"] = user_msg[\"id\"]\n return_dic[\"loginIp\"] = user_msg[\"loginIp\"]\n return_dic[\"remark\"] = ft(user_msg[\"remark\"])\n return_dic[\"short_name\"] = ft(user_msg[\"short_name\"])\n return_dic[\"status\"] = user_msg[\"status\"]\n\n except Exception as e:\n raise e\n return jsonify({\n \"code\": 1,\n \"data\": {\n \"page\": 1,\n \"rows\": [return_dic],\n \"total\": 1\n }\n })\n\n\n@user.route(\"/signout/\", methods=[\"POST\"])\ndef signout():\n \"\"\"用户退出\"\"\"\n json_data = request.get_json()\n token = json_data.get(\"token\")\n\n redis.delete(token)\n\n return jsonify({\n \"code\": 1,\n \"data\": \"成功退出\"\n })\n","sub_path":"app/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"167307762","text":"import peewee as pw\nfrom playhouse.shortcuts import model_to_dict\nfrom db.models import *\nimport os\nfrom conf.conf_manager import ConfManager\n\n\nclass DBManager(object):\n def __init__(self):\n self.configs = ConfManager().get_config()\n if not os.path.isfile(self.configs['db_path']):\n self.create_tables()\n\n def create_tables(self):\n model_list = [Video, Cast, CastRecord, Genre, GenreRecord]\n db.connect()\n for m in model_list:\n if not m.table_exists():\n m.create_table()\n\n def add_video(self, video_info):\n try:\n video = Video.create(\n title=video_info['title'],\n video_id=video_info['video_id']\n )\n except peewee.IntegrityError:\n return False\n\n for cast in video_info['cast_list']:\n get, created = Cast.get_or_create(name=cast)\n CastRecord.create(video=video, cast=cast)\n\n def batch_add_video(self, video_list):\n with db.atomic():\n for video in video_list:\n Video.get_or_create(**video)\n\n def list_all_video(self):\n '''\n return list of dict of video objects\n '''\n # video_list = self.c.execute(\"SELECT * FROM Video\").fetchall()\n video_list = [model_to_dict(v) for v in Video.select()]\n for video in video_list:\n video['cast_list'] = self.get_video_cast(video['id'])\n video['genre_list'] = self.get_video_genre(video['id'])\n return video_list\n\n def list_all_cast(self):\n cast_list = [model_to_dict(c) for c in Cast.select()]\n return cast_list\n\n def get_video(self, pk):\n video_obj = Video.get(id=pk)\n return video_obj\n\n def get_video_cast(self, pk):\n q = CastRecord.select().where(CastRecord.video==pk)\n cast_list = [record.cast.name for record in q]\n return cast_list\n\n def get_video_genre(self, pk):\n q = GenreRecord.select().where(GenreRecord.video==pk)\n genre_list = [record.genre.name for record in q]\n return genre_list\n\n def query_by_video_id(self, video_id):\n q = Video.select().where(Video.video_id.contains(video_id))\n video_list = [model_to_dict(v) for v in q]\n for video in video_list:\n video['cast_list'] = self.get_video_cast(video['id'])\n video['genre_list'] = self.get_video_genre(video['id'])\n return video_list\n\n def query_cast_by_name(self, name):\n q = Cast.select().where(Cast.name.contains(name))\n cast_list = [model_to_dict(v) for v in q]\n return cast_list\n\n def query_video_by_cast(self, cast_obj):\n q = CastRecord.select().where(CastRecord.cast==cast_obj['name'])\n video_list = [model_to_dict(v.video) for v in q]\n for video in video_list:\n video['cast_list'] = self.get_video_cast(video['id'])\n video['genre_list'] = self.get_video_genre(video['id'])\n return video_list\n\n def add_cast(self, cast_name):\n Cast.create(name=cast_name)\n\n def update_video_id(self, pk, video_id):\n video_obj = Video.get(id=pk)\n video_obj.video_id = video_id\n video_obj.save()\n\n def update_video_title(self, pk, title):\n video_obj = Video.get(id=pk)\n video_obj.title = title\n video_obj.save()\n\n def update_video_cast(self, pk, cast_list):\n video_obj = Video.get(id=pk)\n # if created, reutrn (object,True),else reutrn (object,False)\n for cast in cast_list:\n cast_obj, created = Cast.get_or_create(name=cast)\n CastRecord.get_or_create(video=video_obj, cast=cast_obj)\n\n def update_video_genre(self, pk, genre_list):\n video_obj = Video.get(id=pk)\n for genre in genre_list:\n genre_obj, created = Genre.get_or_create(name=genre)\n GenreRecord.get_or_create(video=video_obj, genre=genre_obj)\n\n def is_video_empty(self):\n if self.list_all_video == []:\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n db = DBManager()\n print(db.query_video_by_cast({\"name\": \"平原心\"}))\n","sub_path":"videocat/db/db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"86338582","text":"import urllib.parse, urllib.request\nimport xml.dom.minidom\n\nFISHEYE_HOST=\"https://fisheye.devops.mnscorp.net/\"\n\ndef callFishEye(methodname, **args):\n data = urllib.parse.urlencode(args)\n fp = urllib.request.urlopen(FISHEYE_HOST+\"api/rest/\"+methodname, data)\n doc = xml.dom.minidom.parse(fp)\n fp.close()\n doc.normalize()\n root = doc.documentElement\n if root.tagName == \"response\":\n return ElementWrapper(root)\n raise Exception(root.toxml())\n\n# a very dodgy helper wrapper around DOM elements\nclass ElementWrapper:\n def __init__(self, element):\n self.element = element\n def __getitem__(self, key):\n k = key\n list = None\n if k[-7:] == \"__array\":\n k = k[:-7]\n list = []\n elif self.element.hasAttribute(k):\n # look for an attribute\n return self.element.getAttribute(k)\n for node in self.element.childNodes:\n if node.nodeType == node.ELEMENT_NODE and node.tagName == k:\n if list is None: return ElementWrapper(node)\n list.append(ElementWrapper(node))\n return list\n\n def has_key(self, key):\n return self[key] is not None\n\n def __repr__(self):\n return self.element.toxml()\n def __str__(self):\n rc = \"\"\n for node in self.element.childNodes:\n if node.nodeType == node.TEXT_NODE:\n rc = rc + node.data\n return rc\n\n\nauth = callFishEye(\"login\", username=\"wallboard\", password=\"5xPMtyyJUT\")['string']\n#auth = callFishEye(\"login\", username=\"wallboard\", password=\"5xPMtyyJUT\")['string']\n#auth = callFishEye(\"login\", username=\"username\", password=\"password\")['string']\nprint (\"Using auth:\", auth)\n\nreps = callFishEye(\"repositories\", auth=auth)['string__array']\nprint (\"found %i repositories\" % len(reps))\nprint (reps)\n\nrep = reps[0]\nrc = callFishEye(\"changesets\", auth=auth, rep=rep, path='/')['changesets']\nprint (\"recent changes:\")\ncsids = rc['csids']['string__array']\nprint (\"maxReturn=\", rc['maxReturn'], \"returned=\", len(csids))\n\n\ncsid = csids[0]\n\ncs = callFishEye('changeset', auth=auth, rep=rep, csid=csid)['changeset']\nprint (\"changeset\", cs['csid'])\nprint (\"author %s, date %s, branch %s\" % (cs['author'], cs['date'], cs['branch']))\nprint (cs['log'])\nlastFile = None\nfor ri in cs['revisions']['revisionkey__array']:\n print (\"- %s %s\" % (ri['path'], ri['rev']))\n lastFile = ri['path']\n\n\nprint (\"looking at\", lastFile)\nhist = callFishEye('pathHistory', auth=auth, rep=rep, path=lastFile)['history']\nfor r in hist['revisions']['revision__array']:\n print (r['rev'], r['author'], r['date'])\n if r.has_key('csid'): print (r['csid'])\n if r.has_key('ancestor'): print (r['ancestor'])\n print (r['log'])\n\n r2 = callFishEye('revision', auth=auth, rep=rep, path=lastFile, rev=r['rev'])['revision']\n assert r2['rev'] == r['rev']\n\n\nprint (\"trying logout:\", callFishEye('logout', auth=auth)['boolean'])\n#print \"listing reps after logout:\", callFishEye(\"repositories\", auth=auth)\n","sub_path":"Examples/pyexample.py","file_name":"pyexample.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"364489748","text":"from resources import resource_interface as interface\n\nclass LoadBalancerCloudWatchLogs(object):\n \n def __init__(self, log_classes: dict):\n for log_class in log_classes:\n if not isinstance(log_class, interface.InterfaceResource): raise Exception('Incorrect Interface for: ', log_class)\n \n for log_class in log_classes:\n if not log_class.is_resource_created():\n log_class.create_resource()","sub_path":"load_balancer_cloud_watch_logs.py","file_name":"load_balancer_cloud_watch_logs.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"390505366","text":"# server.py\n\nimport socket # Import socket module\nimport os\nimport threading\n\nport = 60000 # Reserve a port_ for your service.\ns = socket.socket() # Create a socket object\nhost = socket.gethostname() # Get local machine name\ns.bind((host, port)) # Bind to the port_\ns.listen(5) # Now wait for client connection.\n\nprint('Server listening....')\n\nos.chdir(\"/\")\n\n\ndef runner(conn, addr):\n curdir = \"/\"\n while 1:\n data = conn.recv(1024)\n print('Server received', repr(data))\n\n text = data.decode()\n cmd_and_params = text.split(\" \")\n cmd = cmd_and_params[0]\n params = cmd_and_params[1:]\n\n if cmd == \"get\":\n filepath = os.path.join(os.getcwd(), \" \".join(params))\n with open(filepath, \"rb\") as file:\n data = file.read()\n conn.send(data)\n elif cmd == \"cd\":\n filepath = curdir+\"/\"+\"\".join(params)\n curdir = filepath\n os.chdir(\"\".join(params))\n conn.send(b\"Successfully Changed Directory\")\n elif cmd == \"listdir\" or cmd == \"dir\" or cmd == \"ld\":\n filepath = curdir+\"/\"+\"\".join(params)\n list = os.listdir(\".\")\n\n Str = \"Files:\\n\"\n for i in list:\n Str += \" {file} | size={size}\\n\".format(file=i, size=os.path.getsize(os.path.join(os.getcwd(), \" \".join(params), i)))\n conn.send(Str.encode())\n elif cmd == \"/exit\":\n conn.close()\n elif cmd == \"curdir\":\n conn.send(os.getcwd().encode())\n\nwhile True:\n conn, addr = s.accept() # Establish connection with client.\n print('Got connection from', addr)\n threading.Thread(None, runner, args=(conn, addr)).start()\n # filename='mytext.txt'\n #\n # conn.send(b\"%s\" % filename)\n # f = open(filename,'rb')\n # l = f.read(1024)\n # while (l):\n # conn.send(l)\n # print('Sent ', repr(l))\n # l = f.read(1024)\n # f.close()\n #\n # print('Done sending')\n # # conn.send(b'File: ' % )\n # conn.close()\n\n","sub_path":"libtransfer2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"376959402","text":"'''\nhttps://www.win.tue.nl/~aeb/graphs/srg/srgtab.html\nhttp://www.maths.gla.ac.uk/~es/srgraphs.php\n\n\nproblem_35_16_6_8 = (35, 16, 6, 8, [])\nproblem_36_10_4_2 = (36, 10, 4, 2, [])\nproblem_37_18_8_9 = (37, 18, 8, 9, [])\nproblem_40_12_2_4 = (40, 12, 2, 4, [])\nproblem_40_27_18_18 = (40, 27, 18, 18, [])\nproblem_41_20_9_10 = (41, 20, 9, 10, [])\nproblem_45_12_3_3 = (45, 12, 3, 3, [])\nproblem_45_32_22_24 = (45, 32, 22, 24, [])\nproblem_49_12_5_2 = (49, 12, 5, 2, [])\nproblem_49_30_17_20 = (49, 30, 17, 20, [])\nproblem_50_21_8_9 = (50, 21, 8, 9, [])\nproblem_50_28_15_16 = (50, 28, 15, 16, [])\nproblem_53_26_12_13 = (53, 26, 12, 13, [])\nproblem_55_18_9_4 = (55, 18, 9, 4, [])\nproblem_56_10_0_2 = (56, 10, 0, 2, [])\nproblem_57_24_11_9 = (57, 24, 11, 9, [])\nproblem_61_30_14_15 = (61, 30, 14, 15, [])\nproblem_63_30_13_15 = (63, 30, 13, 15, [])\nproblem_64_14_6_2 = (64, 14, 6, 2, [])\nproblem_65_32_15_16 = (65, 32, 15, 16, [])\n\n'''\n\ndef list_problems():\n import pkgutil\n problem_solutions = [m.name for m in pkgutil.iter_modules(__path__) if m.name.startswith('problem')]\n return problem_solutions\n\ndef extract_vklu(problem : str):\n _, *vklu = problem.split('_')\n return list(map(int, vklu))\n\ndef get_solutions(v,k,l,u) -> list:\n import importlib\n name = __package__ + f'.problem_{v}_{k}_{l}_{u}'\n m = importlib.import_module(name)\n return m.solutions\n\n\ndef draw(v, k, l, u):\n import networkx as nx\n import matplotlib.pyplot as plt\n matrices = get_solutions(v,k,l,u)\n for i, matrix in enumerate(matrices):\n\n fig = plt.figure()\n\n nodes = {n: str(n) for n in range(v)}\n graph = nx.Graph()\n graph.add_nodes_from(nodes.keys())\n\n pos = nx.circular_layout(graph)\n nx.draw_networkx_labels(graph, pos, nodes)\n\n for r, c in zip(*matrix.nonzero()):\n graph.add_edge(r, c)\n\n nx.draw_circular(graph)\n\n plt.axis('equal')\n pngname = f'srg_{v}_{k}_{l}_{u}_{i}.png'\n fig.savefig(pngname)\n print(f'srg_{v}_{k}_{l}_{u}_{i}.png')","sub_path":"srg/database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"564084301","text":"import requests\nimport json\nimport datetime\nimport time\n\nfrom config import Config\nfrom models import dict_to_camel, Actor, Execution, ExecutionsSummary, Nonce, Worker, get_permissions, \\\n set_permission\nfrom worker import shutdown_workers, shutdown_worker\nfrom stores import actors_store, executions_store, logs_store, nonce_store, permissions_store\nfrom prometheus_client import start_http_server, Summary, MetricsHandler, Counter, Gauge, generate_latest\nfrom channels import ActorMsgChannel, CommandChannel, ExecutionResultsChannel\nfrom agaveflask.logs import get_logger\nlogger = get_logger(__name__)\n\nmessage_gauges = {}\nworker_gaueges = {}\ncmd_channel_gauges = {}\nPROMETHEUS_URL = 'http://172.17.0.1:9090'\nDEFAULT_SYNC_MAX_IDLE_TIME = 600 # defaults to 10*60 = 600 s = 10 min\n\nMAX_WORKERS_PER_HOST = Config.get('spawner', 'max_workers_per_host')\n\ncommand_gauge = Gauge(\n 'message_count_for_command_channel',\n 'Number of messages currently in this command channel',\n ['name'])\n\ndef create_gauges(actor_ids):\n logger.debug(\"METRICS: Made it to create_gauges; actor_ids: {}\".format(actor_ids))\n inbox_lengths = {}\n for actor_id in actor_ids:\n logger.debug(\"top of for loop for actor_id: {}\".format(actor_id))\n\n channel_name = None\n try:\n actor = actors_store[actor_id]\n except KeyError:\n logger.error(\"actor {} does not exist.\".format(actor_id))\n continue\n\n # If the actor doesn't have a gauge, add one\n if actor_id not in message_gauges.keys():\n try:\n g = Gauge(\n 'message_count_for_actor_{}'.format(actor_id.replace('-', '_')),\n 'Number of messages for actor {}'.format(actor_id.replace('-', '_'))\n )\n message_gauges.update({actor_id: g})\n logger.debug('Created gauge {}'.format(g))\n except Exception as e:\n logger.error(\"got exception trying to create/instantiate the gauge; \"\n \"actor {}; exception: {}\".format(actor_id, e))\n else:\n # Otherwise, get this actor's existing gauge\n try:\n g = message_gauges[actor_id]\n except Exception as e:\n logger.info(\"got exception trying to instantiate an existing gauge; \"\n \"actor: {}: exception:{}\".format(actor_id, e))\n\n # Update this actor's command channel metric\n channel_name = actor.get(\"queue\")\n\n queues_list = Config.get('spawner', 'host_queues').replace(' ', '')\n valid_queues = queues_list.split(',')\n\n if not channel_name or channel_name not in valid_queues:\n channel_name = 'default'\n\n # Update this actor's gauge to its current # of messages\n try:\n ch = ActorMsgChannel(actor_id=actor_id)\n except Exception as e:\n logger.error(\"Exception connecting to ActorMsgChannel: {}\".format(e))\n raise e\n result = {'messages': len(ch._queue._queue)}\n inbox_lengths[actor_id] = len(ch._queue._queue)\n ch.close()\n g.set(result['messages'])\n logger.debug(\"METRICS: {} messages found for actor: {}.\".format(result['messages'], actor_id))\n\n # add a worker gauge for this actor if one does not exist\n if actor_id not in worker_gaueges.keys():\n try:\n g = Gauge(\n 'worker_count_for_actor_{}'.format(actor_id.replace('-', '_')),\n 'Number of workers for actor {}'.format(actor_id.replace('-', '_'))\n )\n worker_gaueges.update({actor_id: g})\n logger.debug('Created worker gauge {}'.format(g))\n except Exception as e:\n logger.info(\"got exception trying to instantiate the Worker Gauge: {}\".format(e))\n else:\n # Otherwise, get the worker gauge that already exists\n g = worker_gaueges[actor_id]\n\n # Update this actor's worker IDs\n workers = Worker.get_workers(actor_id)\n result = {'workers': len(workers)}\n g.set(result['workers'])\n logger.debug(\"METRICS: {} workers found for actor: {}.\".format(result['workers'], actor_id))\n\n if not channel_name:\n return\n ch = CommandChannel(name=channel_name)\n cmd_length = len(ch._queue._queue)\n command_gauge.labels(channel_name).set(cmd_length)\n logger.debug(\"METRICS COMMAND CHANNEL {} size: {}\".format(channel_name, command_gauge))\n ch.close()\n\n # Return actor_ids so we don't have to query for them again later\n return actor_ids, inbox_lengths, cmd_length\n\n\ndef calc_change_rate(data, last_metric, actor_id):\n change_rate = 0\n try:\n previous_data = last_metric[actor_id]\n previous_message_count = int(previous_data[0]['value'][1])\n try:\n # what is data?\n current_message_count = int(data[0]['value'][1])\n change_rate = current_message_count - previous_message_count\n except:\n logger.debug(\"Could not calculate change rate.\")\n except:\n logger.info(\"No previous data yet for new actor {}\".format(actor_id))\n return change_rate\n\n\ndef allow_autoscaling(max_workers, num_workers, cmd_length):\n # first check if the number of messages on the command channel exceeds the limit:\n try:\n max_cmd_length = int(Config.get('spawner', 'max_cmd_length'))\n except:\n max_cmd_length = 10\n\n if cmd_length > max_cmd_length:\n return False\n if int(num_workers) >= int(max_workers):\n logger.debug('METRICS NO AUTOSCALE - criteria not met. {} '.format(num_workers))\n return False\n\n logger.debug('METRICS AUTOSCALE - criteria met. {} '.format(num_workers))\n return True\n\ndef scale_up(actor_id):\n tenant, aid = actor_id.split('_')\n logger.debug('METRICS Attempting to create a new worker for {}'.format(actor_id))\n try:\n # create a worker & add to this actor\n actor = Actor.from_db(actors_store[actor_id])\n worker_id = Worker.request_worker(tenant=tenant, actor_id=actor_id)\n logger.info(\"New worker id: {}\".format(worker_id))\n if actor.queue:\n channel_name = actor.queue\n else:\n channel_name = 'default'\n ch = CommandChannel(name=channel_name)\n ch.put_cmd(actor_id=actor.db_id,\n worker_id=worker_id,\n image=actor.image,\n tenant=tenant,\n stop_existing=False)\n ch.close()\n logger.debug('METRICS Added worker successfully for {}'.format(actor_id))\n return channel_name\n except Exception as e:\n logger.debug(\"METRICS - SOMETHING BROKE: {} - {} - {}\".format(type(e), e, e.args))\n return None\n\n\ndef scale_down(actor_id, is_sync_actor=False):\n logger.debug(f\"top of scale_down for actor_id: {actor_id}\")\n workers = Worker.get_workers(actor_id)\n logger.debug('METRICS NUMBER OF WORKERS: {}'.format(len(workers)))\n try:\n while len(workers) > 0:\n logger.debug('METRICS made it STATUS check')\n check_ttl = False\n sync_max_idle_time = 0\n if len(workers) == 1 and is_sync_actor:\n logger.debug(\"only one worker, on sync actor. checking worker idle time..\")\n try:\n sync_max_idle_time = int(Config.get('workers', 'sync_max_idle_time'))\n except Exception as e:\n logger.error(f\"Got exception trying to read sync_max_idle_time from config; e:{e}\")\n sync_max_idle_time = DEFAULT_SYNC_MAX_IDLE_TIME\n check_ttl = True\n worker = workers.pop()\n if check_ttl:\n try:\n last_execution = int(float(worker.get('last_execution_time', 0)))\n except Exception as e:\n logger.error(f\"metrics got exception trying to compute last_execution! e: {e}\")\n last_execution = 0\n # if worker has made zero executions, use the create_time\n if last_execution == 0:\n last_execution = worker.get('create_time', 0)\n logger.debug(\"using last_execution: {}\".format(last_execution))\n try:\n last_execution = int(float(last_execution))\n except:\n logger.error(\"Could not cast last_execution {} to int(float()\".format(last_execution))\n last_execution = 0\n if last_execution + sync_max_idle_time < time.time():\n # shutdown worker\n logger.info(\"OK to shut down this worker -- beyond ttl.\")\n # continue onto additional checks below\n else:\n logger.info(\"Autoscaler not shuting down this worker - still time left.\")\n break\n\n logger.debug('METRICS SCALE DOWN current worker: {}'.format(worker['status']))\n # check status of the worker is ready\n if worker['status'] == 'READY':\n # scale down\n try:\n shutdown_worker(actor_id, worker['id'], delete_actor_ch=False)\n continue\n except Exception as e:\n logger.debug('METRICS ERROR shutting down worker: {} - {} - {}'.format(type(e), e, e.args))\n logger.debug('METRICS shut down worker {}'.format(worker['id']))\n\n except IndexError:\n logger.debug('METRICS only one worker found for actor {}. '\n 'Will not scale down'.format(actor_id))\n except Exception as e:\n logger.debug(\"METRICS SCALE UP FAILED: {}\".format(e))\n\n","sub_path":"actors/metrics_utils.py","file_name":"metrics_utils.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"155968456","text":"#! /usr/bin/env python\n\nimport wx\n\nfrom fl.wx.FileControl import FileControl\n\n# ============================================================================\nclass ConfigureFileDialog(wx.Dialog):\n# ============================================================================\n \"\"\"\n Dialog to allow customization of the application's File sink.\n \"\"\"\n\n # ------------------------------------------------------------------------\n def __init__(self, parent, info={}):\n # ------------------------------------------------------------------------\n \"\"\"\n Initialize underlying wx.Dialog object and setup dialog GUI.\n \"\"\"\n super(ConfigureFileDialog, self).__init__(\n parent, title=\"Configure Raw File Sink:\")\n self.xInitGui(info)\n \n \n # ------------------------------------------------------------------------\n def GetDestinationFile(self):\n # ------------------------------------------------------------------------\n \"\"\"\n Return full path name of the File sink's underlying disk file.\n \"\"\"\n \n return str(self.editDestFile.GetValue())\n \n \n # ------------------------------------------------------------------------\n def xInitGui(self, info):\n # ------------------------------------------------------------------------\n \"\"\"\n Initialize dialog's GUI elements. Initial values are passed through the\n method's info parameter.\n \"\"\"\n \n labelDestFile = wx.StaticText(self, label=\"File Name:\", \n size=(120,20), style=wx.ST_NO_AUTORESIZE)\n self.editDestFile = FileControl(self, value=\"\", size=(-1,22))\n destfile = wx.BoxSizer(wx.HORIZONTAL)\n destfile.Add(labelDestFile, proportion=0, flag=wx.ALL, border=2)\n destfile.Add(self.editDestFile, proportion=1, flag=wx.ALL, border=2)\n \n cancelButton = wx.Button(self, wx.ID_CANCEL, label=\"Cancel\")\n okButton = wx.Button(self, wx.ID_OK, label=\"OK\")\n buttons = wx.BoxSizer(wx.HORIZONTAL)\n buttons.AddStretchSpacer(1)\n buttons.Add(cancelButton, proportion=0, flag=wx.ALL, border=2)\n buttons.Add(okButton, proportion=0, flag=wx.ALL, border=2)\n \n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.AddStretchSpacer(1)\n sizer.Add(destfile, proportion=0, flag=wx.ALL|wx.EXPAND, border=2)\n sizer.AddStretchSpacer(1)\n sizer.Add(buttons, proportion=0, flag=wx.ALL|wx.EXPAND, border=2)\n self.SetSizer(sizer)\n \n if \"filename\" in info:\n self.editDestFile.SetValue(info[\"filename\"])\n \n ","sub_path":"python/app/asig/ConfigureFileDialog.py","file_name":"ConfigureFileDialog.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"624717589","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.http import request\n\nclass Inherit_res_partner_velo(models.Model):\n _inherit = 'res.partner'\n _order = 'pop_name asc, name asc'\n\n id_object = fields.Char(string=\"Cust ID\")\n contact_name = fields.Char(string=\"IT Contact\")\n contact_title = fields.Char(string=\"Title\")\n contact_cluster = fields.Char(string=\"Cluster\")\n brand_object = fields.Many2one('brand.object', string=\"Brand\")\n technical_contact = fields.Char('Technical Contact')\n customer_type = fields.Selection([('Reguler', 'Reguler'), ('Major', 'Major')], string=\"Customer Type\")\n industry = fields.Many2one('pop.industry', string=\"Industry\")\n industry_cluster = fields.Many2one('pop.industry.cluster', string=\"Industry Cluster\")\n total_service = fields.Integer(compute=\"_count_total_contract\", string=\"Total Service\", store=True)\n total_revenue = fields.Char(string=\"Total Revenue\")\n revenue_month = fields.Float(compute=\"_total_revenue_month\", string=\"Monthly Tariff\", store=True)\n arpu_month = fields.Float(compute=\"_total_arpu_month\", string=\"ARPU/Month\", store=True)\n group_object = fields.Many2one('group.object', string=\"Group\")\n job_position = fields.Char(string=\"Job Position\")\n pop_id = fields.Many2many('pop.service.coverage', string=\"POP\")\n pop_ids = fields.Char(related=\"pop_id.pop_id\", string=\"POP ID\", store=True)\n pop_name = fields.Char(related=\"pop_id.pop_name\", string=\"POP Name\", store=True)\n service_id = fields.Char(string=\"Service ID\", compute=\"_count_total_contract\")\n activation_date = fields.Char(string=\"Activation Date\", compute=\"_count_total_contract\")\n termination_date = fields.Char(string=\"Termination Date\", compute=\"_count_total_contract\")\n exp_date = fields.Char(string=\"Contract Expiry\", compute=\"_count_total_contract\")\n space = fields.Char(' ', readonly=True) \n capacity = fields.Char('Capacity')\n product = fields.Many2one('product.template', string=\"Product\")\n\n sale_subcription_ids = fields.One2many('sale.subscription','partner_id',string=\"Sale Subcription\")\n\n tenant_cold_leads = fields.Char('Cold Leads')\n tenant_mcf_c = fields.Boolean(string=\"MCF-C\")\n tenant_sales_focus = fields.Char('Sales Focus')\n\n tenant_am = fields.Many2one('pop.pic', string=\"AM\")\n tenant_remarks = fields.Char(string=\"Remarks\")\n tenant_floor = fields.Char(string=\"Floor\")\n tenant_budget = fields.Char(string=\"Budget\")\n tenant_existing_provider = fields.Char(string=\"Existing Provider\")\n tenant_service = fields.Char(string=\"Service\")\n tenant_program = fields.Selection([('LoveVelo', 'LoveVelo'), ('BeVelo', 'BeVelo')], default='')\n tenant_sales_pipeline2 = fields.Many2one('crm.stage', string=\"Pipeline Progress\")\n tenant_total_user = fields.Integer(string=\"Total User\")\n tenant_expired = fields.Date(string=\"Contract Expiry\")\n tenant_capacity = fields.Char('Capacity')\n tenant_product = fields.Many2one('product.template', string=\"Product\")\n\n marketing_activities = fields.Char('Marketing Activities')\n marketing_date = fields.Date('Date')\n response_date = fields.Date('Response Date')\n req_follow_up = fields.Boolean('Request Follow-Up')\n cold_leads_date = fields.Date('Cold Lead Date')\n\n sales_activities = fields.Char('Sales Activities')\n activities_date = fields.Date('Activities Date')\n opportunity_stage = fields.Char('Opportunity Stage')\n opportunity_stage_budget = fields.Boolean('Budget')\n opportunity_stage_service_delivery = fields.Boolean('Service Delivery')\n opportunity_stage_solution_fit = fields.Boolean('Solution Fit')\n opportunity_stage_pricing = fields.Boolean('Pricing')\n opportunity_stage_relationship = fields.Boolean('Relationship')\n\n tenant = fields.Boolean(string=\"Potential\", default=True)\n tenant_mcf_c = fields.Boolean(string=\"MCF-C\")\n partner = fields.Boolean(string=\"Partner\")\n competitor = fields.Boolean(string=\"Competitor\")\n contact = fields.Boolean(string=\"Contact\")\n\n acc_mgm = fields.Many2one('pic.manager', string=\"AM Manager\")\n pic_partner_id = fields.Many2one('pop.pic', string=\"Account Manager\")\n\n opportunity_stage = fields.Char('Opportunity Stage')\n opportunity_stage1 = fields.Integer(string=\"Opportunity Progress\", compute=\"_compute_opportunity_stage\")\n\n opportunity_stage_budget = fields.Boolean(string=\"Budget\", readonly=True)\n opportunity_stage_service_delivery = fields.Boolean(string=\"Service Delivery\",readonly=True)\n opportunity_stage_solution_fit = fields.Boolean(string='Solution Fit',readonly=True)\n opportunity_stage_pricing = fields.Boolean(string='Pricing',readonly=True)\n opportunity_stage_relationship = fields.Boolean(string='Relationship',readonly=True)\n\n def function_name(self):\n self.tenant = True\n\n @api.depends('opportunity_stage_budget', 'opportunity_stage_service_delivery', 'opportunity_stage_solution_fit','opportunity_stage_pricing','opportunity_stage_relationship')\n def _compute_opportunity_stage(self):\n budget = 20\n service_delivery = 20\n solution_fit = 20\n pricing = 20\n relationship = 20\n for rec in self:\n if rec.opportunity_stage_budget == True:\n rec.opportunity_stage1 = budget\n if rec.opportunity_stage_service_delivery == True:\n rec.opportunity_stage1 = service_delivery\n if rec.opportunity_stage_solution_fit == True:\n rec.opportunity_stage1 = solution_fit\n if rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = pricing\n if rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = relationship\n\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True:\n rec.opportunity_stage1 = budget + service_delivery\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_solution_fit == True:\n rec.opportunity_stage1 = budget + solution_fit\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = budget + pricing\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + relationship\n \n if rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True:\n rec.opportunity_stage1 = service_delivery + solution_fit\n if rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = service_delivery + pricing\n if rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = service_delivery + relationship\n\n if rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = solution_fit + pricing\n if rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = solution_fit + relationship\n if rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = pricing + relationship\n \n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True:\n rec.opportunity_stage1 = budget + service_delivery + solution_fit\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = budget + service_delivery + pricing\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + service_delivery + relationship\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = budget + solution_fit + pricing\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + solution_fit + relationship\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + pricing + relationship\n\n if rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = service_delivery + solution_fit + pricing\n if rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = service_delivery + solution_fit + relationship\n if rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = service_delivery + pricing + relationship\n if rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = solution_fit + pricing + relationship\n \n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True:\n rec.opportunity_stage1 = budget + service_delivery + solution_fit + pricing\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + service_delivery + solution_fit + relationship\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + service_delivery + pricing + relationship\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + solution_fit + pricing + relationship\n if rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = service_delivery + solution_fit + pricing + relationship\n if rec.opportunity_stage_budget == True and rec.opportunity_stage_service_delivery == True and rec.opportunity_stage_solution_fit == True and rec.opportunity_stage_pricing == True and rec.opportunity_stage_relationship == True:\n rec.opportunity_stage1 = budget + service_delivery + solution_fit + pricing + relationship\n if rec.opportunity_stage_budget == False and rec.opportunity_stage_service_delivery == False and rec.opportunity_stage_solution_fit == False and rec.opportunity_stage_pricing == False and rec.opportunity_stage_relationship == False:\n rec.opportunity_stage1 = 0\n\n @api.depends('sale_subcription_ids')\n def _count_total_contract(self):\n for i in self:\n val_service = ''\n val_act_date = ''\n val_term_date = ''\n val_exp_date = ''\n for n in i.sale_subcription_ids :\n if n.service_id :\n val_service = str(val_service) + str(n.service_id) + ', '\n if n.act_date :\n val_act_date = str(val_act_date) + str(n.act_date) + ', '\n if n.termination_date :\n val_term_date = str(val_term_date) + str(n.termination_date) +', '\n if n.exp_date :\n val_exp_date = str(val_exp_date) + str(n.exp_date) +', '\n\n i.service_id = val_service\n i.activation_date = val_act_date\n i.termination_date = val_term_date\n i.exp_date = val_exp_date\n i.total_service = len(i.sale_subcription_ids)\n \n @api.depends('sale_subcription_ids')\n def _total_revenue_month(self):\n for a in self:\n a.revenue_month = sum(line.recurring_total for line in a.sale_subcription_ids)\n \n @api.depends('sale_subcription_ids')\n def _total_arpu_month(self):\n for b in self:\n b.revenue_month = sum(line.recurring_total for line in b.sale_subcription_ids)\n if len(b.sale_subcription_ids) != 0:\n b.arpu_month = b.revenue_month / (len(b.sale_subcription_ids))\n else:\n b.arpu_month = 0\n\n\nclass velo_customer_brand(models.Model):\n _name = 'brand.object'\n _description = 'Model customer brand'\n\n name = fields.Char(string=\"Name\")\n\nclass velo_customer_group(models.Model):\n _name = 'group.object'\n _description = 'Model customer group'\n\n name = fields.Char(string=\"Name\")\n\nclass pic_manager(models.Model):\n _name = 'pic.manager'\n _description = 'PIC Manager'\n _rec_name = 'pic_manager_name'\n\n pic_manager_name = fields.Many2one('hr.employee', string=\"Manager Name\")\n pic_number_of_customer = fields.Float(string=\"Total Customer\", compute=\"_total_number_of_customers_manager\", store=True)#\n pic_number_of_customer_pop = fields.Float(string=\"Customer POP\", compute=\"_total_number_of_customers_pop\", store=True)\n pic_number_of_customer_am = fields.Float(string=\"Customer AM\", compute=\"_total_number_of_customers_am\", store=True)\n \n pic_number_of_customer_industry_cluster = fields.Float(string=\"Number of Customers\", compute=\"_total_number_of_customers_cluster_manager\", store=True)\n pic_number_of_customer_industry_cluster_cluster = fields.Float(string=\"Number of Customers\", compute=\"_total_number_of_customers_cluster_cluster\", store=True)\n pic_number_of_customer_industry_cluster_am = fields.Float(string=\"Number of Customers\", compute=\"_total_number_of_customers_cluster_am\", store=True)\n \n pic_number_of_potential = fields.Float(string=\"Total Potential\", compute=\"_total_number_of_potential_manager\", store=True)\n pic_number_of_potential_pop = fields.Float(string=\"Potential POP\", compute=\"_total_number_of_potential_pop\", store=True)\n pic_number_of_potential_am = fields.Float(string=\"Potential AM\", compute=\"_total_number_of_potential_am\", store=True)\n \n pic_number_of_potential_industry_cluster = fields.Float(string=\"Number of Potential\", compute=\"_total_number_of_potential_cluster_manager\", store=True)\n pic_number_of_potential_industry_cluster_cluster = fields.Float(string=\"Number of Potential\", compute=\"_total_number_of_potential_cluster_cluster\", store=True)\n pic_number_of_potential_industry_cluster_am = fields.Float(string=\"Number of Potential\", compute=\"_total_number_of_potential_cluster_am\", store=True)\n \n pic_current_penetration = fields.Float(string=\"Total Penetration\", compute=\"_total_current_penetration\", store=True)\n pic_current_penetration_industry_cluster = fields.Float(string=\"Current Penetration\", compute=\"_total_current_penetration_industry_cluster\", store=True)\n \n pic_monthly_tariff = fields.Float(string=\"Total Monthly Tariff\", compute=\"_total_monthly_tariff_manager\", store=True)\n pic_monthly_tarif_pop = fields.Float(string=\"Monthly Tariff POP\", compute=\"_total_monthly_tariff_pop\", store=True)\n pic_monthly_tarif_am = fields.Float(string=\"Monthly Tariff AM\", compute=\"_total_monthly_tariff_am\", store=True)\n\n pic_monthly_tarif_cluster = fields.Float(string=\"Monthly Tariff\", compute=\"_total_monthly_tarif_cluster_manager\", store=True)\n pic_monthly_tarif_cluster_cluster = fields.Float(string=\"Monthly Tariff POP\", compute=\"_total_monthly_tarif_cluster_cluster\", store=True)\n pic_monthly_tarif_cluster_am = fields.Float(string=\"Monthly Tariff AM\", compute=\"_total_monthly_tarif_cluster_am\", store=True)\n\n pic_summary_of_specific_industry = fields.Many2one('pop.type', string=\"Specific Industry\")\n pic_period = fields.Date(string=\"Period\")\n pic_annual_quota = fields.Integer(string=\"Annual Quota\")\n pic_quota_this_month = fields.Integer(string=\"OB This Month\")\n pic_quota_last_month = fields.Integer('OB Last Month')\n pic_mcfc_this_month = fields.Integer('MCF-C This Month')\n pic_ytd_achievement = fields.Float(string=\"YTD Achievement\")\n pic_sf_this_month = fields.Integer(related=\"service_coverage_ids.pop_total_sales_focus\", string=\"SF This Month\")\n \n pic_cold_leads = fields.Integer(string=\"Cold Leads\", compute=\"_total_cold_leads\", store=True)\n pic_cold_leads_pop = fields.Integer(string=\"Cold Leads\", compute=\"_total_cold_leads_pop\", store=True)\n pic_cold_leads_am = fields.Integer(string=\"Cold Leads\", compute=\"_total_cold_leads_am\", store=True)\n \n pic_cold_leads_cluster = fields.Integer(string=\"Cold Leads\", compute=\"_total_cold_leads_cluster_manager\", store=True )\n pic_cold_leads_cluster_cluster = fields.Integer(string=\"Cold Leads\", compute=\"_total_cold_leads_cluster_cluster\", store=True)\n pic_cold_leads_cluster_am = fields.Integer(string=\"Cold Leads\", compute=\"_total_cold_leads_cluster_am\", store=True)\n\n pic_number_of_arpu = fields.Float(string=\"ARPU\", compute=\"_total_number_of_arpu_manager\", store=True)\n pic_number_of_arpu_pop = fields.Float(string=\"ARPU POP\", compute=\"_total_number_of_arpu_pop\", store=True)\n\n total_rowcount_name_pop = fields.Integer(string=\"Total Pop/Cluster\", compute=\"_total_rowcount_name_pop\", store=True)\n \n industry_cluster_ids = fields.One2many('pop.industry.cluster', 'pop_industry_cluster_manager', string=\"Industry Cluster ID\")\n service_coverage_ids = fields.One2many('pop.service.coverage', 'pic_manager', string=\"Service Coverage ID\")\n pop_pic_am_ids = fields.One2many('pop.pic', 'pic_am_manager', string=\"PIC ID\")\n partner_ids = fields.One2many('res.partner', 'acc_mgm', string=\"Service Coverage Customer\")\n user_id = fields.Many2one('res.users', string='Related User', required=True)\n\n\n\n #total pop/cluster\n @api.depends('service_coverage_ids')\n def _total_rowcount_name_pop(self): \n for service in self: \n total_rowcount = 0.0 \n for line in service.service_coverage_ids:\n total_rowcount += 1\n service.update({\n 'total_rowcount_name_pop': total_rowcount,\n })\n\n #total cold leads Industry Cluster\n @api.depends('industry_cluster_ids')\n def _total_cold_leads_cluster_cluster(self):\n for a in self:\n a.pic_cold_leads_cluster_cluster = sum(line.pop_industry_cluster_cold_leads for line in a.industry_cluster_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_cold_leads_cluster_am(self):\n for a in self:\n a.pic_cold_leads_cluster_am = sum(line.pic_cold_leads for line in a.pop_pic_am_ids)\n\n @api.depends('pic_cold_leads_cluster_cluster', 'pic_cold_leads_cluster_am')\n def _total_cold_leads_cluster_manager(self):\n for rec in self:\n rec.pic_cold_leads_cluster = rec.pic_cold_leads_cluster_cluster + rec.pic_cold_leads_cluster_am\n \n #total Customer Industry Cluster\n @api.depends('industry_cluster_ids')\n def _total_number_of_customers_cluster_cluster(self):\n for a in self:\n a.pic_number_of_customer_industry_cluster_cluster = sum(line.pop_industry_cluster_total_customer for line in a.industry_cluster_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_number_of_customers_cluster_am(self):\n for a in self:\n a.pic_number_of_customer_industry_cluster_am = sum(line.pic_number_of_customer_industry_cluster for line in a.pop_pic_am_ids)\n\n @api.depends('pic_number_of_customer_industry_cluster_cluster', 'pic_number_of_customer_industry_cluster_am')\n def _total_number_of_customers_cluster_manager(self):\n for rec in self:\n rec.pic_number_of_customer_industry_cluster = rec.pic_number_of_customer_industry_cluster_cluster + rec.pic_number_of_customer_industry_cluster_am\n \n #total potential industry cluster\n @api.depends('industry_cluster_ids')\n def _total_number_of_potential_cluster_cluster(self):\n for a in self:\n a.pic_number_of_potential_industry_cluster_cluster = sum(line.pop_industry_cluster_total_potential for line in a.industry_cluster_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_number_of_potential_cluster_am(self):\n for a in self:\n a.pic_number_of_potential_industry_cluster_am = sum(line.pic_number_of_potential_industry_cluster for line in a.pop_pic_am_ids)\n\n @api.depends('pic_number_of_potential_industry_cluster_cluster', 'pic_number_of_potential_industry_cluster_am')\n def _total_number_of_potential_cluster_manager(self):\n for rec in self:\n rec.pic_number_of_potential_industry_cluster = rec.pic_number_of_potential_industry_cluster_cluster + rec.pic_number_of_potential_industry_cluster_am\n \n #total monthly tariff industry cluster\n @api.depends('industry_cluster_ids')\n def _total_monthly_tarif_cluster_cluster(self):\n for a in self:\n a.pic_monthly_tarif_cluster_cluster = sum(line.pop_industry_cluster_revenue_month for line in a.industry_cluster_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_monthly_tarif_cluster_am(self):\n for a in self:\n a.pic_monthly_tarif_cluster_am = sum(line.pic_monthly_tarif_industry_cluster for line in a.pop_pic_am_ids)\n\n @api.depends('pic_monthly_tarif_cluster_cluster', 'pic_monthly_tarif_cluster_am')\n def _total_monthly_tarif_cluster_manager(self):\n for rec in self:\n rec.pic_monthly_tarif_cluster = rec.pic_monthly_tarif_cluster_cluster + rec.pic_monthly_tarif_cluster_am\n \n \n #total customer pop list\n @api.depends('service_coverage_ids')\n def _total_number_of_customers_pop(self):\n for a in self:\n a.pic_number_of_customer_pop = sum(line.pop_total_customer for line in a.service_coverage_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_number_of_customers_am(self):\n for a in self:\n a.pic_number_of_customer_am = sum(line.pic_number_of_customer for line in a.pop_pic_am_ids)\n\n @api.depends('pic_number_of_customer_pop', 'pic_number_of_customer_am')\n def _total_number_of_customers_manager(self):\n for rec in self:\n rec.pic_number_of_customer = rec.pic_number_of_customer_pop + rec.pic_number_of_customer_am\n \n\n #total potential pop list\n @api.depends('service_coverage_ids')\n def _total_number_of_potential_pop(self):\n for a in self:\n a.pic_number_of_potential_pop = sum(line.pop_total_potential for line in a.service_coverage_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_number_of_potential_am(self):\n for a in self:\n a.pic_number_of_potential_am = sum(line.pic_number_of_potential for line in a.pop_pic_am_ids)\n \n @api.depends('pic_number_of_potential_pop', 'pic_number_of_potential_am')\n def _total_number_of_potential_manager(self):\n for rec in self:\n rec.pic_number_of_potential = rec.pic_number_of_potential_pop + rec.pic_number_of_potential_am\n\n #total monthly tariff pop list\n @api.depends('service_coverage_ids')\n def _total_monthly_tariff_pop(self):\n for a in self:\n a.pic_monthly_tarif_pop = sum(line.pop_total_monthly_tariff for line in a.service_coverage_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_monthly_tariff_am(self):\n for a in self:\n a.pic_monthly_tarif_am = sum(line.pic_monthly_tarif for line in a.pop_pic_am_ids)\n\n @api.depends('pic_monthly_tarif_pop', 'pic_monthly_tarif_am')\n def _total_monthly_tariff_manager(self):\n for rec in self:\n rec.pic_monthly_tariff = rec.pic_monthly_tarif_pop + rec.pic_monthly_tarif_am\n\n #total ARPU pop list\n @api.depends('service_coverage_ids')\n def _total_number_of_arpu_pop(self):\n for a in self:\n a.pic_number_of_arpu_pop = sum(line.pop_arpu for line in a.service_coverage_ids)\n\n @api.depends('pic_number_of_arpu_pop')\n def _total_number_of_arpu_manager(self):\n for rec in self:\n rec.pic_number_of_arpu = rec.pic_number_of_arpu_pop \n \n #total cold leads pop list\n @api.depends('service_coverage_ids')\n def _total_cold_leads_pop(self):\n for a in self:\n a.pic_cold_leads_pop = sum(line.pop_cold_leads for line in a.service_coverage_ids)\n \n @api.depends('pop_pic_am_ids')\n def _total_cold_leads_am(self):\n for a in self:\n a.pic_cold_leads_am = sum(line.pic_cold_leads for line in a.pop_pic_am_ids)\n\n @api.depends('pic_cold_leads_pop', 'pic_cold_leads_am')\n def _total_cold_leads(self):\n for rec in self:\n rec.pic_cold_leads = rec.pic_cold_leads_pop + rec.pic_cold_leads_am\n\n #menghitung total penetration di form pic.manager (HRB)\n @api.depends('pic_number_of_customer','pic_number_of_potential')\n def _total_current_penetration(self):\n for i in self:\n if i.pic_number_of_customer == 0:\n i.pic_current_penetration = 0\n elif i.pic_number_of_potential == 0:\n i.pic_current_penetration = 0\n else:\n i.pic_current_penetration = (i.pic_number_of_customer / i.pic_number_of_potential)\n \n #menghitung total penetration industry cluster di form pic.manager \n @api.depends('pic_number_of_customer_industry_cluster','pic_number_of_potential_industry_cluster')\n def _total_current_penetration_industry_cluster(self):\n for i in self:\n if i.pic_number_of_customer_industry_cluster == 0:\n i.pic_current_penetration_industry_cluster = 0\n elif i.pic_number_of_potential_industry_cluster == 0:\n i.pic_current_penetration_industry_cluster = 0\n else:\n i.pic_current_penetration_industry_cluster = (i.pic_number_of_customer_industry_cluster / i.pic_number_of_potential_industry_cluster)\n \n \nclass pic(models.Model):\n _name = 'pop.pic'\n _description = 'POP PIC'\n _rec_name = 'pop_pic_name_am'\n\n pop_pic_name_am = fields.Many2one('hr.employee', string=\"AM Name\")\n pic_am_manager = fields.Many2one('pic.manager', string=\"Manager In Charge\")\n pic_number_of_customer = fields.Integer(string=\"Total Customer\", compute=\"_total_number_of_customers\")\n pic_number_of_customer_industry_cluster = fields.Integer(string=\"Total Customer\", compute=\"_total_number_of_customers_inudstry_cluster\")\n pic_number_of_potential = fields.Integer(string=\"Total Potential\", compute=\"_total_number_of_potential\")\n pic_number_of_potential_industry_cluster = fields.Integer(string=\"Total Potential\", compute=\"_total_number_of_potential_inudstry_cluster\")\n pic_current_penetration = fields.Float(string=\"Total Penetration\", compute=\"_total_current_penetration\")\n pic_current_penetration_industry_cluster = fields.Float(string=\"Total Penetration\", compute=\"_total_current_penetration_inudstry_cluster\")\n pic_monthly_tarif = fields.Integer(string=\"Total Monthly Tariff\", compute=\"_total_monthly_tariff\", store=True)\n pic_monthly_tarif_industry_cluster = fields.Integer(string=\"Monthly Tariff\", compute=\"_total_monthly_tariff_industry_cluster\")\n pic_summary_of_specific_industry = fields.Many2one('pop.type', string=\"Specific Industry\")\n pic_period = fields.Date(string=\"Period\")\n pic_annual_quota = fields.Integer(string=\"Annual Quota\")\n pic_quota_this_month = fields.Integer(string=\"OB This Month\")\n pic_quota_last_month = fields.Integer('OB Last Month')\n pic_mcfc_this_month = fields.Integer('MCF-C This Month')\n pic_ytd_achievement = fields.Float(string=\"YTD Achievement\")\n pic_sf_this_month = fields.Integer(related=\"service_coverage_ids.pop_total_sales_focus\", string=\"SF This Month\")\n pic_cold_leads = fields.Integer(string=\"Cold Leads\")\n\n industry_cluster_ids = fields.One2many('pop.industry.cluster', 'pop_industry_cluster_pic_am', string=\"Industry Cluster ID\")\n service_coverage_ids = fields.One2many('pop.service.coverage', 'pic_id', string=\"Service Coverage ID\")\n\n ytd_achievement_ids = fields.One2many('ytd.achievement', 'pic_id')\n ytd_sales_focus = fields.One2many('ytd.sales.focus', 'pic_id')\n ytd_mcfc_ids = fields.One2many('ytd.mcfc', 'pic_id')\n ytd_cold_leads_ids = fields.One2many('ytd.cold.leads', 'pic_id')\n total_rowcount_name_pic = fields.Integer(string=\"Total Pop/Cluster\", compute=\"_total_rowcount_name_pic\")\n pic_number_of_arpu = fields.Integer(string=\"ARPU\", compute=\"_total_number_of_arpu_manager\")\n pic_number_of_arpu_pic = fields.Integer(string=\"ARPU POP\", compute=\"_total_number_of_arpu_pop\")\n pic_partner_ids = fields.One2many('res.partner', 'pic_partner_id', string=\"PIC Customer\")\n user_id = fields.Many2one('res.users', string='Related User', required=True)\n\n\n #total pop/cluster\n @api.depends('service_coverage_ids')\n def _total_rowcount_name_pic(self): \n for service in self: \n total_rowcount = 0.0 \n for line in service.service_coverage_ids:\n total_rowcount += 1\n service.update({\n 'total_rowcount_name_pic': total_rowcount,\n })\n\n #total ARPU pop list\n @api.depends('service_coverage_ids')\n def _total_number_of_arpu_pop(self):\n for a in self:\n a.pic_number_of_arpu_pic = sum(line.pop_arpu for line in a.service_coverage_ids)\n\n #total ARPU pop list\n @api.depends('pic_number_of_arpu_pic')\n def _total_number_of_arpu_manager(self):\n for rec in self:\n rec.pic_number_of_arpu = rec.pic_number_of_arpu_pic \n\n #total customer di form pop.pic\n @api.depends('service_coverage_ids')\n def _total_number_of_customers(self):\n for a in self:\n a.pic_number_of_customer = sum(line.pop_total_customer for line in a.service_coverage_ids)\n \n #total potential di form pop.pic\n @api.depends('service_coverage_ids')\n def _total_number_of_potential(self):\n for a in self:\n a.pic_number_of_potential = sum(line.pop_total_potential for line in a.service_coverage_ids)\n \n ##total penetration di form pop.pic\n @api.depends('pic_number_of_customer','pic_number_of_potential')\n def _total_current_penetration(self):\n for i in self:\n if i.pic_number_of_customer == 0:\n i.pic_current_penetration = 0\n elif i.pic_number_of_potential == 0:\n i.pic_current_penetration = 0\n else:\n i.pic_current_penetration = (i.pic_number_of_customer / i.pic_number_of_potential)\n \n #total monthly tarif di form pop.pic\n @api.depends('service_coverage_ids')\n def _total_monthly_tariff(self):\n for a in self:\n a.pic_monthly_tarif = sum(line.pop_total_monthly_tariff for line in a.service_coverage_ids)\n \n #total customer industry cluster di form pop.pic\n @api.depends('industry_cluster_ids')\n def _total_number_of_customers_inudstry_cluster(self):\n for a in self:\n a.pic_number_of_customer_industry_cluster = sum(line.pop_industry_cluster_total_customer for line in a.industry_cluster_ids)\n \n #total potential industry cluster di form pop.pic\n @api.depends('industry_cluster_ids')\n def _total_number_of_potential_inudstry_cluster(self):\n for a in self:\n a.pic_number_of_potential_industry_cluster = sum(line.pop_industry_cluster_total_potential for line in a.industry_cluster_ids)\n \n #total penetration industry cluster di form pop.pic\n @api.depends('pic_number_of_customer_industry_cluster','pic_number_of_potential_industry_cluster')\n def _total_current_penetration_inudstry_cluster(self):\n for i in self:\n if i.pic_number_of_customer_industry_cluster == 0:\n i.pic_current_penetration_industry_cluster = 0\n elif i.pic_number_of_potential_industry_cluster == 0:\n i.pic_current_penetration_industry_cluster = 0\n else:\n i.pic_current_penetration_industry_cluster = (i.pic_number_of_customer_industry_cluster / i.pic_number_of_potential_industry_cluster)\n \n #total monthly tariff industry cluster di form pop.pic\n @api.depends('industry_cluster_ids')\n def _total_monthly_tariff_industry_cluster(self):\n for a in self:\n a.pic_monthly_tarif_industry_cluster = sum(line.pop_industry_cluster_revenue_month for line in a.industry_cluster_ids)\n\nclass YtdAchievement(models.Model):\n _name = 'ytd.achievement'\n _description = 'YTD Achievement'\n\n pic_id = fields.Many2one('pop.pic', string=\"PIC\")\n customer_name = fields.Many2one('res.partner', string=\"Customer Name\", domain=[('customer_rank', '=', True)])\n period = fields.Date('Period')\n ob_date = fields.Date('OB Date')\n monthly_tariff = fields.Integer('Monthly Tariff')\n service = fields.Char('Service')\n ba_date = fields.Date('BA Date')\n\nclass YtdSalesFocus(models.Model):\n _name = 'ytd.sales.focus'\n _description = 'YTD Sales Focus'\n\n pic_id = fields.Many2one('pop.pic', string=\"PIC\")\n potential_name = fields.Many2one('res.partner', string=\"Potential Name\", domain=[('tenant', '=', True)])\n period = fields.Date('Period')\n propose_tariff = fields.Date('Propose Tariff')\n service = fields.Char('Service')\n opportunity = fields.Float('Opportunity %')\n opportunity_mix = fields.Char('Opportunity Mix')\n sf_result = fields.Selection([('Win', 'Win'), ('Lose', 'Lose'), ('Carry Over', 'Carry Over')], string=\"SF Result\")\n next_action = fields.Char('Next Action')\n\nclass YtdMcfC(models.Model):\n _name = 'ytd.mcfc'\n _description = 'YTD MCF-C'\n\n pic_id = fields.Many2one('pop.pic', string=\"PIC\")\n potential_name = fields.Many2one('res.partner', string=\"Potential Name\", domain=[('tenant', '=', True)])\n period = fields.Date('Period')\n propose_tariff = fields.Date('Propose Tariff')\n service = fields.Char('Service')\n opportunity = fields.Float('Opportunity %')\n opportunity_mix = fields.Char('Opportunity Mix')\n sf_result = fields.Selection([('Win', 'Win'), ('Lose', 'Lose'), ('Carry Over', 'Carry Over')], string=\"SF Result\")\n next_action = fields.Char('Next Action')\n\nclass YtdColdLeads(models.Model):\n _name = 'ytd.cold.leads'\n _description = 'YTD Cold Leads'\n\n pic_id = fields.Many2one('pop.pic', string=\"PIC\")\n cold_leads_name = fields.Many2one('res.partner', string=\"Potential Name\", domain=[('tenant', '=', True)])\n period = fields.Date('Period')\n cold_leads_date = fields.Date('Cold Leads Date')\n marketing_program = fields.Many2one('velo.program', string=\"Marketing Program\")\n\nclass Inherit_pop_industry_cluster(models.Model):\n _inherit = 'pop.industry.cluster'\n \n pop_potential_ids = fields.One2many('res.partner', 'industry_cluster', string=\"Pontential\", domain=[('tenant', '=', True)])\n industry_cluster_customer_ids = fields.One2many('res.partner', 'industry_cluster', string=\"Customer\", domain=[('customer_rank', '=', 1), ('tenant', '=', False)])\n \n #count total industri potential di menu market potential (setting=>pop industry cluster)\n @api.depends('pop_potential_ids')\n def _count_total_potential(self):\n for i in self:\n i.pop_industry_cluster_total_potential = len(i.pop_potential_ids)\n\n #count total industri potential di menu market potential (setting=>pop industry cluster)\n @api.depends('industry_cluster_customer_ids')\n def _count_total_customer(self):\n for i in self:\n i.pop_industry_cluster_total_customer = len(i.industry_cluster_customer_ids)\n \n #count total industri ponetration di menu market potential (setting=>pop industry cluster)\n @api.depends('pop_industry_cluster_total_customer','pop_industry_cluster_total_potential')\n def _total_current_penetration(self):\n for i in self:\n if i.pop_industry_cluster_total_customer == 0:\n i.pop_industry_cluster_total_penetration = 0\n elif i.pop_industry_cluster_total_potential == 0:\n i.pop_industry_cluster_total_penetration = 0\n else:\n i.pop_industry_cluster_total_penetration = (i.pop_industry_cluster_total_customer / i.pop_industry_cluster_total_potential)\n\n #count total customer di menu market potential (setting=>pop industry cluster)\n @api.depends('industry_cluster_customer_ids')\n def _total_monthly_tariff(self):\n for a in self:\n a.pop_industry_cluster_revenue_month = sum(line.revenue_month for line in a.industry_cluster_customer_ids)\n \n #count total service di menu market potential (setting=>pop industry cluster)\n @api.depends('industry_cluster_customer_ids')\n def _total_service(self):\n for a in self:\n a.pop_indistry_cluster_total_service = sum(line.total_service for line in a.industry_cluster_customer_ids)\n \n #count total ARPU di menu market potential (setting=>pop industry cluster)\n @api.depends('pop_industry_cluster_revenue_month','pop_industry_cluster_total_customer')\n def _industry_cluster_arpu(self):\n for i in self:\n if i.pop_industry_cluster_revenue_month == 0:\n i.pop_industry_cluster_arpu = 0\n elif i.pop_industry_cluster_total_customer == 0:\n i.pop_industry_cluster_arpu\n else:\n i.pop_industry_cluster_arpu = i.pop_industry_cluster_revenue_month / i.pop_industry_cluster_total_customer\n\n\nclass Pop(models.Model):\n _inherit = 'pop.service.coverage'\n\n pic_manager = fields.Many2one('pic.manager', string=\"Account Management\")\n pic_id = fields.Many2one('pop.pic', string=\"PIC ID\")\n \n res_partner = fields.Many2many('res.partner', string=\"Customer\", domain=[('customer_rank', '=', 1), ('tenant', '=', False)])\n product_template_ids = fields.Many2many('product.template', string=\"Product\")\n pop_tenant_id = fields.One2many('res.partner', 'pop_id', string=\"Potential\", domain=[('tenant', '=', True)])\n \n #count total Monthly tariff di menu market potential (summary of type)\n @api.depends('res_partner')\n def _total_monthly_tariff(self):\n for a in self:\n a.pop_total_monthly_tariff = sum(line.revenue_month for line in a.res_partner)\n\n #count ARPU di menu market potential (summary of type)\n @api.depends('res_partner')\n def _total_arpu(self):\n for b in self:\n b.pop_total_monthly_tariff = sum(line.revenue_month for line in b.res_partner)\n if len(b.res_partner) == 0:\n b.pop_arpu = 0\n else:\n b.pop_arpu = b.pop_total_monthly_tariff / (len(b.res_partner))\n\n #count pop total customer di menu market potential (summary of type)\n @api.depends('res_partner')\n def _count_tot_customer(self):\n for i in self:\n i.pop_total_customer = len(i.res_partner)\n\n #count total industry (HRB) di menu market potential (summary of type)\n @api.depends('res_partner')\n def _count_tot_customer_hrb(self):\n for rec in self:\n count = 0\n tarif = 0\n service = 0\n for line in rec.res_partner:\n if line.industry.pop_industry_name.poptype_name == 'HRB':\n count += 1\n tarif += line.revenue_month\n service += line.total_service\n\n rec.pop_total_customer_hrb = count\n rec.pop_total_monthly_tarif_hrb = tarif\n rec.pop_total_service_hrb = service\n\n #count total service di menu market potential (summary of type)\n @api.depends('res_partner')\n def _pop_total_service(self):\n for a in self:\n a.pop_total_service = sum(line.total_service for line in a.res_partner)\n \n #count Total Potential di menu market potential (summary of type)\n @api.depends('pop_tenant_id')\n def _count_tot_potential(self):\n for i in self:\n i.pop_total_potential = len(i.pop_tenant_id)\n\n\nclass velo_customer_summary(models.Model):\n _name = 'customer.summary'\n _description = 'Model Customer Summary'\n\n #count number of customer di Menu customer (setting=>Customer Summary)\n @api.model\n def _number_of_customer(self):\n hos_customer = self.env['res.partner'].search([('industry.id','=', self.hos_name.id),('tenant','=', False),('customer_rank','>', 0)])\n hrb_customer = self.env['res.partner'].search([('industry.id','=', self.hrb_name.id),('tenant','=', False),('customer_rank','>', 0)])\n itemb_customer = self.env['res.partner'].search([('industry.id','=', self.itemb_name.id),('tenant','=', False),('customer_rank','>', 0)])\n nrx_customer = self.env['res.partner'].search([('industry.id','=', self.nrx_name.id),('tenant','=', False),('customer_rank','>', 0)])\n for i in self:\n i.hos_number_of_customer = len(hos_customer)\n i.hrb_number_of_customer = len(hrb_customer)\n i.itemb_number_of_customer = len(itemb_customer)\n i.nrx_number_of_customer = len(nrx_customer)\n\n i.hos_revenue_month = sum(line.revenue_month for line in hos_customer)\n i.hrb_revenue_month = sum(line.revenue_month for line in hrb_customer)\n i.itemb_revenue_month = sum(line.revenue_month for line in itemb_customer)\n i.nrx_revenue_month = sum(line.revenue_month for line in nrx_customer)\n\n i.hos_arpu = i.hos_revenue_month / i.hos_number_of_customer\n i.hrb_arpu = i.hrb_revenue_month / i.hrb_number_of_customer\n i.itemb_arpu = i.itemb_revenue_month / i.itemb_number_of_customer\n i.nrx_arpu = i.nrx_revenue_month / i.nrx_number_of_customer\n\n hos_number_of_service = 0\n hrb_number_of_service = 0\n itemb_number_of_service = 0\n nrx_number_of_service = 0\n for line in hos_customer.sale_subcription_ids :\n hos_number_of_service = hos_number_of_service + 1 \n\n for line in hrb_customer.sale_subcription_ids :\n hrb_number_of_service = hrb_number_of_service + 1 \n\n for line in itemb_customer.sale_subcription_ids :\n itemb_number_of_service = itemb_number_of_service + 1 \n\n for line in nrx_customer.sale_subcription_ids :\n nrx_number_of_service = nrx_number_of_service + 1 \n\n i.hos_number_of_service = hos_number_of_service\n i.hrb_number_of_service = hrb_number_of_service\n i.itemb_number_of_service = itemb_number_of_service\n i.nrx_number_of_service = nrx_number_of_service\n\n\n hos_name = fields.Many2one(\"pop.industry\",string=\"HOS Name\")\n hos_number_of_customer = fields.Integer(string=\"HOS Number of Customer\", compute=\"_number_of_customer\")\n hos_revenue_month = fields.Float(string=\"HOS Revenue/Month\", compute=\"_number_of_customer\")\n hos_arpu = fields.Float(string=\"HOS ARPU\", compute=\"_number_of_customer\")\n hos_number_of_service = fields.Integer(string=\"HOS Number of Service\", compute=\"_number_of_customer\")\n hos_ob_quota = fields.Integer(string=\"HOS OB Quota\")\n hos_ytd_achivement = fields.Float(string=\"HOS YTD Achivement\")\n hos_achieve = fields.Float(stirng=\"HOS Achieve %\")\n hos_ytd_churn = fields.Float(string=\"HOS YTD Churn\")\n hos_ytd_downgrade = fields.Float(string=\"HOS YTD Downgrade\")\n hos_ytd_net_increase = fields.Float(string=\"HOS YTD Net Increase\")\n hos_ytd_rev_growth = fields.Float(string=\"HOS YTD Rev Growth\")\n\n hrb_name = fields.Many2one(\"pop.industry\",string=\"hrb Name\")\n hrb_number_of_customer = fields.Integer(string=\"hrb Number of Customer\", compute=\"_number_of_customer\")\n hrb_revenue_month = fields.Float(string=\"hrb Revenue/Month\", compute=\"_number_of_customer\")\n hrb_arpu = fields.Float(string=\"hrb ARPU\", compute=\"_number_of_customer\")\n hrb_number_of_service = fields.Integer(string=\"hrb Number of Service\", compute=\"_number_of_customer\")\n hrb_ob_quota = fields.Integer(string=\"hrb OB Quota\")\n hrb_ytd_achivement = fields.Float(string=\"hrb YTD Achivement\")\n hrb_achieve = fields.Float(stirng=\"hrb Achieve %\")\n hrb_ytd_churn = fields.Float(string=\"hrb YTD Churn\")\n hrb_ytd_downgrade = fields.Float(string=\"hrb YTD Downgrade\")\n hrb_ytd_net_increase = fields.Float(string=\"hrb YTD Net Increase\")\n hrb_ytd_rev_growth = fields.Float(string=\"hrb YTD Rev Growth\")\n\n itemb_name = fields.Many2one(\"pop.industry\",string=\"itemb Name\")\n itemb_number_of_customer = fields.Integer(string=\"itemb Number of Customer\", compute=\"_number_of_customer\")\n itemb_revenue_month = fields.Float(string=\"itemb Revenue/Month\", compute=\"_number_of_customer\")\n itemb_arpu = fields.Float(string=\"itemb ARPU\", compute=\"_number_of_customer\")\n itemb_number_of_service = fields.Integer(string=\"itemb Number of Service\", compute=\"_number_of_customer\")\n itemb_ob_quota = fields.Integer(string=\"itemb OB Quota\")\n itemb_ytd_achivement = fields.Float(string=\"itemb YTD Achivement\")\n itemb_achieve = fields.Float(stirng=\"itemb Achieve %\")\n itemb_ytd_churn = fields.Float(string=\"itemb YTD Churn\")\n itemb_ytd_downgrade = fields.Float(string=\"itemb YTD Downgrade\")\n itemb_ytd_net_increase = fields.Float(string=\"itemb YTD Net Increase\")\n itemb_ytd_rev_growth = fields.Float(string=\"itemb YTD Rev Growth\")\n\n nrx_name = fields.Many2one(\"pop.industry\",string=\"nrx Name\")\n nrx_number_of_customer = fields.Integer(string=\"nrx Number of Customer\", compute=\"_number_of_customer\")\n nrx_revenue_month = fields.Float(string=\"nrx Revenue/Month\", compute=\"_number_of_customer\")\n nrx_arpu = fields.Float(string=\"nrx ARPU\", compute=\"_number_of_customer\")\n nrx_number_of_service = fields.Integer(string=\"nrx Number of Service\", compute=\"_number_of_customer\")\n nrx_ob_quota = fields.Integer(string=\"nrx OB Quota\")\n nrx_ytd_achivement = fields.Float(string=\"nrx YTD Achivement\")\n nrx_achieve = fields.Float(stirng=\"nrx Achieve %\")\n nrx_ytd_churn = fields.Float(string=\"nrx YTD Churn\")\n nrx_ytd_downgrade = fields.Float(string=\"nrx YTD Downgrade\")\n nrx_ytd_net_increase = fields.Float(string=\"nrx YTD Net Increase\")\n nrx_ytd_rev_growth = fields.Float(string=\"nrx YTD Rev Growth\")\n\n total_number_of_customer = fields.Integer('Total Number Customer', compute=\"_total_number_customers\")\n total_revenue_month = fields.Float('Total Revenue/Month', compute=\"_total_revenue_month\")\n total_arpu = fields.Float('Total ARPU', compute=\"_total_arpu\")\n total_number_of_service = fields.Integer('Total Number of Customer', compute=\"_total_number_of_service\")\n total_ob_quota = fields.Integer('Total OB Quota', compute=\"_total_ob_quota\")\n total_ytd_achivement = fields.Float('Total YTD Achievement', compute=\"_total_ytd_achivement\")\n total_achieve = fields.Float('Total Achieve%', compute=\"_total_achieve\")\n total_ytd_churn = fields.Float('Total YTD Churn', compute=\"_total_ytd_churn\")\n total_ytd_downgrade = fields.Float('Total YTD Downgrade', compute=\"_total_ytd_downgrade\")\n total_ytd_net_increase = fields.Float('Total YTD Net Increase', compute=\"_total_ytd_net_increase\")\n total_ytd_rev_growth = fields.Float('Total YTD Rev Growth', compute=\"_total_ytd_rev_growth\")\n\n #count number of customer di Menu customer (setting=>Customer Summary)\n @api.depends('total_number_of_customer')\n def _total_number_customers(self):\n self.total_number_of_customer = self.hos_number_of_customer + self.hrb_number_of_customer + self.itemb_number_of_customer + self.nrx_number_of_customer\n\n #count revenue/month di Menu customer (setting=>Customer Summary)\n @api.depends('total_revenue_month')\n def _total_revenue_month(self):\n self.total_revenue_month = self.hos_revenue_month + self.hrb_revenue_month + self.itemb_revenue_month + self.nrx_revenue_month\n\n #count ARPU di Menu customer (setting=>Customer Summary)\n @api.depends('total_arpu')\n def _total_arpu(self):\n self.total_arpu = self.total_revenue_month / self.total_number_of_customer\n \n #count number of service di Menu customer (setting=>Customer Summary) \n @api.depends('total_number_of_service')\n def _total_number_of_service(self):\n self.total_number_of_service = self.hos_number_of_service + self.hrb_number_of_service + self.itemb_number_of_service + self.nrx_number_of_service\n\n #count OB Quota di Menu customer (setting=>Customer Summary)\n @api.depends('total_ob_quota')\n def _total_ob_quota(self):\n self.total_ob_quota = self.hos_ob_quota + self.hrb_ob_quota + self.itemb_ob_quota + self.nrx_ob_quota\n\n #count YTD Achievement di Menu customer (setting=>Customer Summary)\n @api.depends('total_ytd_achivement')\n def _total_ytd_achivement(self):\n self.total_ytd_achivement = self.hos_ytd_achivement + self.hrb_ytd_achivement + self.itemb_ytd_achivement + self.nrx_ytd_achivement\n\n #count achieve% di Menu customer (setting=>Customer Summary)\n @api.depends('total_achieve')\n def _total_achieve(self):\n self.total_achieve = self.hos_achieve + self.hrb_achieve + self.itemb_achieve + self.nrx_achieve\n\n #count YTD Churn di Menu customer (setting=>Customer Summary)\n @api.depends('total_ytd_churn')\n def _total_ytd_churn(self):\n self.total_ytd_churn = self.hos_ytd_churn + self.hrb_ytd_churn + self.itemb_ytd_churn + self.nrx_ytd_churn\n\n #count YTD Downgrade di Menu customer (setting=>Customer Summary)\n @api.depends('total_ytd_downgrade')\n def _total_ytd_downgrade(self):\n self.total_ytd_downgrade = self.hos_ytd_downgrade + self.hrb_ytd_downgrade + self.itemb_ytd_downgrade + self.nrx_ytd_downgrade\n \n #count YTD Net Increase di Menu customer (setting=>Customer Summary)\n @api.depends('total_ytd_net_increase')\n def _total_ytd_net_increase(self):\n self.total_ytd_net_increase = self.hos_ytd_net_increase + self.hrb_ytd_net_increase + self.itemb_ytd_net_increase + self.nrx_ytd_net_increase\n\n #count rev growth di Menu customer (setting=>Customer Summary)\n @api.depends('total_ytd_rev_growth')\n def _total_ytd_rev_growth(self):\n self.total_ytd_rev_growth = self.hos_ytd_rev_growth + self.hrb_ytd_rev_growth + self.itemb_ytd_rev_growth + self.nrx_ytd_rev_growth\n\n\nclass velo_potential_summary(models.Model):\n _name = 'potential.summary'\n _description = 'Model potential Summary'\n _rec_name = 'id'\n\n #count number of customer di Menu customer (setting=>Potential Summary)\n @api.model\n def _number_of_customer(self):\n hos_customer = self.env['res.partner'].search([('industry.id','=', self.hos_name.id),('tenant','=', False),('customer_rank','>', 0)])\n hrb_customer = self.env['res.partner'].search([('industry.id','=', self.hrb_name.id),('tenant','=', False),('customer_rank','>', 0)])\n itemb_customer = self.env['res.partner'].search([('industry.id','=', self.itemb_name.id),('tenant','=', False),('customer_rank','>', 0)])\n nrx_customer = self.env['res.partner'].search([('industry.id','=', self.nrx_name.id),('tenant','=', False),('customer_rank','>', 0)])\n for i in self:\n i.hos_number_of_customer = len(hos_customer)\n i.hrb_number_of_customer = len(hrb_customer)\n i.itemb_number_of_customer = len(itemb_customer)\n i.nrx_number_of_customer = len(nrx_customer)\n\n #count number of potential di Menu customer (setting=>Potential Summary)\n @api.model\n def _number_of_potential(self):\n hos_potential = self.env['res.partner'].search([('industry.id','=', self.hos_name.id),('tenant','=', True),('customer_rank','>', 0)])\n hrb_potential = self.env['res.partner'].search([('industry.id','=', self.hrb_name.id),('tenant','=', True),('customer_rank','>', 0)])\n itemb_potential = self.env['res.partner'].search([('industry.id','=', self.itemb_name.id),('tenant','=', True),('customer_rank','>', 0)])\n nrx_potential = self.env['res.partner'].search([('industry.id','=', self.nrx_name.id),('tenant','=', True),('customer_rank','>', 0)])\n for i in self:\n i.hos_number_of_potential = len(hos_potential)\n i.hrb_number_of_potential = len(hrb_potential)\n i.itemb_number_of_potential = len(itemb_potential)\n i.nrx_number_of_potential = len(nrx_potential)\n\n #count penetration (HOS) di Menu customer (setting=>Potential Summary)\n @api.depends('hos_number_of_customer','hos_number_of_potential')\n def _sum_penetration_potential_hos(self):\n for i in self:\n if i.hos_number_of_customer == 0:\n i.hos_penetration = 0\n elif i.hos_number_of_potential == 0:\n i.hos_penetration = 0\n else:\n i.hos_penetration = (i.hos_number_of_customer / i.hos_number_of_potential) * 100\n \n #count penetration (HRB) di Menu customer (setting=>Potential Summary)\n @api.depends('hrb_number_of_customer','hrb_number_of_potential')\n def _sum_penetration_potential_hrb(self):\n for i in self:\n if i.hrb_number_of_customer == 0:\n i.hrb_penetration = 0\n elif i.hrb_number_of_potential == 0:\n i.hrb_penetration = 0\n else:\n i.hrb_penetration = (i.hrb_number_of_customer / i.hrb_number_of_potential) * 100\n \n #count penetration (ITEMB) di Menu customer (setting=>Potential Summary)\n @api.depends('itemb_number_of_customer','itemb_number_of_potential')\n def _sum_penetration_potential_itemb(self):\n for i in self:\n if i.itemb_number_of_customer == 0:\n i.itemb_penetration = 0\n elif i.itemb_number_of_potential == 0:\n i.itemb_penetration = 0\n else:\n i.itemb_penetration = (i.itemb_number_of_customer / i.itemb_number_of_potential) * 100\n \n #count penetration (NRX) di Menu customer (setting=>Potential Summary)\n @api.depends('nrx_number_of_customer','nrx_number_of_potential')\n def _sum_penetration_potential_nrx(self):\n for i in self:\n if i.nrx_number_of_customer == 0:\n i.nrx_penetration = 0\n elif i.nrx_number_of_potential == 0:\n i.nrx_penetration = 0\n else:\n i.nrx_penetration = (i.nrx_number_of_customer / i.nrx_number_of_potential) * 100\n \n hos_name = fields.Many2one(\"pop.industry\",string=\"HOS Name\")\n hos_number_of_customer = fields.Integer('HOS Number of Customer', compute=\"_number_of_customer\")\n hos_number_of_potential = fields.Integer(string=\"HOS Number of potential\", compute=\"_number_of_potential\")\n hos_penetration = fields.Float(string=\"HOS Penetration\", compute=\"_sum_penetration_potential_hos\")\n hos_tariff = fields.Float(string=\"HOS Tariff\")\n hos_quota = fields.Integer(string=\"HOS Quota\")\n hos_ytd_ob = fields.Float('HOS YTD OB')\n hos_ytd_achivement = fields.Float(string=\"HOS YTD Achivement\")\n hos_sf_this_month = fields.Integer(stirng=\"HOS SF This Month\")\n hos_mcfc_this_month = fields.Integer(string=\"HOS MCF-C This Month\")\n hos_cold_leads = fields.Integer(string=\"HOS Cold Leads\")\n \n hrb_name = fields.Many2one(\"pop.industry\",string=\"HRB Name\")\n hrb_number_of_customer = fields.Integer('HRB Number of Customer', compute=\"_number_of_customer\")\n hrb_number_of_potential = fields.Integer(string=\"HRB Number of potential\", compute=\"_number_of_potential\")\n hrb_penetration = fields.Float(string=\"HRB Penetration\", compute=\"_sum_penetration_potential_hrb\")\n hrb_tariff = fields.Float(string=\"HRB Tariff\")\n hrb_quota = fields.Integer(string=\"HRB Quota\")\n hrb_ytd_ob = fields.Float('HRB YTD OB')\n hrb_ytd_achivement = fields.Float(string=\"HRB YTD Achivement\")\n hrb_sf_this_month = fields.Integer(stirng=\"HRB SF This Month\")\n hrb_mcfc_this_month = fields.Integer(string=\"HRB MCF-C This Month\")\n hrb_cold_leads = fields.Integer(string=\"HRB Cold Leads\")\n\n itemb_name = fields.Many2one(\"pop.industry\",string=\"ITEMB Name\")\n itemb_number_of_customer = fields.Integer('ITEMB Number of Customer', compute=\"_number_of_customer\")\n itemb_number_of_potential = fields.Integer(string=\"ITEMB Number of potential\", compute=\"_number_of_potential\")\n itemb_penetration = fields.Float(string=\"ITEMB Penetration\", compute=\"_sum_penetration_potential_itemb\")\n itemb_tariff = fields.Float(string=\"ITEMB Tariff\")\n itemb_quota = fields.Integer(string=\"ITEMB Quota\")\n itemb_ytd_ob = fields.Float('ITEMB YTD OB')\n itemb_ytd_achivement = fields.Float(string=\"ITEMB YTD Achivement\")\n itemb_sf_this_month = fields.Integer(stirng=\"ITEMB SF This Month\")\n itemb_mcfc_this_month = fields.Integer(string=\"ITEMB MCF-C This Month\")\n itemb_cold_leads = fields.Integer(string=\"ITEMB Cold Leads\")\n\n nrx_name = fields.Many2one(\"pop.industry\",string=\"NRX Name\")\n nrx_number_of_customer = fields.Integer('NRX Number of Customer', compute=\"_number_of_customer\")\n nrx_number_of_potential = fields.Integer(string=\"NRX Number of potential\", compute=\"_number_of_potential\")\n nrx_penetration = fields.Float(string=\"NRX Penetration\", compute=\"_sum_penetration_potential_nrx\")\n nrx_tariff = fields.Float(string=\"NRX Tariff\")\n nrx_quota = fields.Integer(string=\"NRX Quota\")\n nrx_ytd_ob = fields.Float('NRX YTD OB')\n nrx_ytd_achivement = fields.Float(string=\"NRX YTD Achivement\")\n nrx_sf_this_month = fields.Integer(stirng=\"NRX SF This Month\")\n nrx_mcfc_this_month = fields.Integer(string=\"NRX MCF-C This Month\")\n nrx_cold_leads = fields.Integer(string=\"NRX Cold Leads\")\n\n total_pot_number_of_customer = fields.Integer('Total Number of Customer', compute=\"_total_pot_number_of_customer\")\n total_pot_number_of_potential = fields.Integer('Total Number of potential', compute=\"_total_pot_number_of_potential\")\n total_pot_penetration = fields.Float('Total Penetration', compute=\"_total_pot_penetration\")\n total_pot_tariff = fields.Float('Total Tariff', compute=\"_total_pot_tariff\")\n total_pot_quota = fields.Integer('Total Quota', compute=\"_total_pot_quota\")\n total_pot_ytd_ob = fields.Float('NRX YTD OB', compute=\"_total_pot_ytd_ob\")\n total_pot_ytd_achivement = fields.Float('NRX YTD Achivement', compute=\"_total_pot_ytd_achivement\")\n total_pot_sf_this_month = fields.Integer('NRX SF This Month', compute=\"_total_pot_sf_this_month\")\n total_pot_mcfc_this_month = fields.Integer('NRX MCF-C This Month', compute=\"_total_pot_mcfc_this_month\")\n total_pot_cold_leads = fields.Integer('NRX Cold Leads', compute=\"_total_pot_cold_leads\")\n\n #count total number of customer di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_number_of_customer')\n def _total_pot_number_of_customer(self):\n self.total_pot_number_of_customer = self.hos_number_of_customer + self.hrb_number_of_customer + self.itemb_number_of_customer + self.nrx_number_of_customer\n\n #count total number of potential di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_number_of_potential')\n def _total_pot_number_of_potential(self):\n self.total_pot_number_of_potential = self.hos_number_of_potential + self.hrb_number_of_potential + self.itemb_number_of_potential + self.nrx_number_of_potential\n\n #count total penetration di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_number_of_customer','total_pot_number_of_potential')\n def _total_pot_penetration(self):\n for i in self:\n if i.total_pot_number_of_customer == 0:\n i.total_pot_penetration = 0\n elif i.total_pot_number_of_potential == 0:\n i.total_pot_penetration = 0\n else:\n i.total_pot_penetration = (i.total_pot_number_of_customer / i.total_pot_number_of_potential) *100\n\n #count total tariff di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_tariff')\n def _total_pot_tariff(self):\n self.total_pot_tariff = self.hos_tariff + self.hrb_tariff + self.itemb_tariff + self.nrx_tariff\n\n #count total quota di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_quota')\n def _total_pot_quota(self):\n self.total_pot_quota = self.hos_quota + self.hrb_quota + self.itemb_quota + self.nrx_quota\n \n #count total ytd ob di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_ytd_ob')\n def _total_pot_ytd_ob(self):\n self.total_pot_ytd_ob = self.hos_ytd_ob + self.hrb_ytd_ob + self.itemb_ytd_ob + self.nrx_ytd_ob\n \n #count total ytd achievement di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_ytd_achivement')\n def _total_pot_ytd_achivement(self):\n self.total_pot_ytd_achivement = self.hos_ytd_achivement + self.hrb_ytd_achivement + self.itemb_ytd_achivement + self.nrx_ytd_achivement\n \n #count total sf this month di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_sf_this_month')\n def _total_pot_sf_this_month(self):\n self.total_pot_sf_this_month = self.hos_sf_this_month + self.hrb_sf_this_month + self.itemb_sf_this_month + self.nrx_sf_this_month\n\n #count total mcfc this month di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_mcfc_this_month')\n def _total_pot_mcfc_this_month(self):\n self.total_pot_mcfc_this_month = self.hos_mcfc_this_month + self.hrb_mcfc_this_month + self.itemb_mcfc_this_month + self.nrx_mcfc_this_month\n\n #count total cold leads di Menu customer (setting=>Potential Summary)\n @api.depends('total_pot_cold_leads')\n def _total_pot_cold_leads(self):\n self.total_pot_cold_leads = self.hos_cold_leads + self.hrb_cold_leads + self.itemb_cold_leads + self.nrx_cold_leads\n\n\nclass pic_am(models.Model):\n _name = 'pic.am'\n _description = \"PIC Account Manager\"\n _inherit = ['pop.pic', 'pic.manager', 'pop.service.coverage']\n\n pic_am_id = fields.Many2one('pop.pic', string=\"AM Name\")\n pic_am_pic_summary_of_specific_industry = fields.Many2one(related='pic_am_id.pic_summary_of_specific_industry', string=\"Specific Industry\")\n pic_am_pic_period = fields.Date(related='pic_am_id.pic_period', string=\"Period\")\n pic_am_pic_am_manager = fields.Many2one(related='pic_am_id.pic_am_manager', string=\"Manager In Charge\")\n pic_am_total_rowcount_name_pic = fields.Integer(related=\"pic_am_id.total_rowcount_name_pic\", string=\"Total Pop/Cluster\")\n pic_am_pic_number_of_customer = fields.Integer(related=\"pic_am_id.pic_number_of_customer\", string=\"Total Customer\")\n pic_am_pic_number_of_potential = fields.Integer(related=\"pic_am_id.pic_number_of_potential\",string=\"Total Potential\")\n pic_am_pic_current_penetration = fields.Float(related=\"pic_am_id.pic_current_penetration\", string=\"Total Penetration\")\n pic_am_pic_monthly_tarif = fields.Integer(related=\"pic_am_id.pic_monthly_tarif\", string=\"Total Monthly Tariff\", store=False)\n pic_am_pic_number_of_arpu = fields.Integer(related=\"pic_am_id.pic_number_of_arpu\", string=\"ARPU\")\n pic_am_pic_annual_quota = fields.Integer(related=\"pic_am_id.pic_annual_quota\", string=\"Annual Quota\")\n pic_am_pic_quota_this_month = fields.Integer(related=\"pic_am_id.pic_quota_this_month\", string=\"OB This Month\")\n pic_am_pic_ytd_achievement = fields.Float(related=\"pic_am_id.pic_ytd_achievement\", string=\"YTD Achievement\")\n pic_am_pic_mcfc_this_month = fields.Integer(related=\"pic_am_id.pic_mcfc_this_month\", string=\"MCF-C This Month\")\n pic_am_pic_sf_this_month = fields.Integer(related=\"pic_am_id.pic_sf_this_month\", string=\"SF This Month\")\n pic_am_pic_cold_leads = fields.Integer(related=\"pic_am_id.pic_cold_leads\", strin=\"Cold Leads\")\n\n pic_am_service_coverage_ids = fields.One2many(related=\"pic_am_id.service_coverage_ids\", string=\"Service Coverage ID\")\n pic_am_partner_ids = fields.One2many(related=\"pic_am_id.pic_partner_ids\", strring=\"PIC Customer\")\n pic_am_ytd_achievement_ids = fields.One2many(related=\"pic_am_id.ytd_achievement_ids\", string=\"YTD Achievement\")\n pic_am_ytd_sales_focus = fields.One2many(related=\"pic_am_id.ytd_sales_focus\", string=\"YTD Sales Focus\")\n pic_am_ytd_mcfc_ids = fields.One2many(related=\"pic_am_id.ytd_mcfc_ids\", string=\"YTD MCF-C\")\n pic_am_ytd_cold_leads_ids = fields.One2many(related=\"pic_am_id.ytd_cold_leads_ids\", string=\"YTD Cold Leads\")\n \n # def pic_am_print(self):\n # print(self.pic_am_id)","sub_path":"velo_customers/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":65386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"646577741","text":"import unittest\n\nimport pytest\nfrom analyzer_executor_lib.analyzer_executor import AnalyzerExecutor\nfrom hypothesis import strategies as st\n\nSAMPLE_ADDR = \"localhost\"\nSAMPLE_PORT = \"12345\"\n\n\n@pytest.fixture\ndef AnalyzerExecutorSingleton(monkeypatch):\n def _AnalyzerExecutorSingleton(stub_env=False, env_addr=None, env_port=None):\n with monkeypatch.context() as mp:\n if stub_env:\n if env_addr:\n mp.setenv(\"MESSAGECACHE_ADDR\", env_addr)\n mp.setenv(\"HITCACHE_ADDR\", env_addr)\n else:\n mp.delenv(\"MESSAGECACHE_ADDR\", raising=False)\n mp.delenv(\"HITCACHE_ADDR\", raising=False)\n\n if env_port:\n mp.setenv(\"MESSAGECACHE_PORT\", env_port)\n mp.setenv(\"HITCACHE_PORT\", env_port)\n else:\n mp.delenv(\"MESSAGECACHE_PORT\", raising=False)\n mp.delenv(\"HITCACHE_PORT\", raising=False)\n\n # force singleton to reinitialize,\n # this should be idempotent?\n AnalyzerExecutor._singleton = None\n return AnalyzerExecutor.singleton()\n\n return _AnalyzerExecutorSingleton\n\n\n@pytest.mark.integration_test\ndef test_connection_info(AnalyzerExecutorSingleton) -> None:\n \"\"\"\n Ensures exceptions are raised for incomplete connection info.\n \"\"\"\n\n with pytest.raises(ValueError):\n ae = AnalyzerExecutorSingleton(\n stub_env=True, env_addr=SAMPLE_ADDR, env_port=None\n )\n\n with pytest.raises(ValueError):\n ae = AnalyzerExecutorSingleton(\n stub_env=True, env_addr=None, env_port=SAMPLE_PORT\n )\n\n with pytest.raises(ValueError):\n ae = AnalyzerExecutorSingleton(stub_env=True, env_addr=None, env_port=None)\n\n\n@pytest.mark.integration_test\ndef test_hit_cache(AnalyzerExecutorSingleton) -> None:\n \"\"\"\n Initializes the AnalyzerExecutor singleton with Redis connection params\n sourced from the environment, expecting hit cache to populate.\n \"\"\"\n ae = AnalyzerExecutorSingleton(stub_env=False)\n\n k1, k2 = st.text(min_size=3, max_size=64), st.text(min_size=3, max_size=64)\n\n assert not ae.check_hit_cache(k1, k2)\n ae.update_hit_cache(k1, k2)\n assert ae.check_hit_cache(k1, k2)\n\n\n@pytest.mark.integration_test\ndef test_message_cache(AnalyzerExecutorSingleton) -> None:\n \"\"\"\n Initializes the AnalyzerExecutor singleton with Redis connection params\n sourced from the environment, expecting message cache to populate.\n \"\"\"\n ae = AnalyzerExecutorSingleton(stub_env=False)\n\n k1, k2, k3 = (\n st.text(min_size=3, max_size=64),\n st.text(min_size=3, max_size=64),\n st.text(min_size=3, max_size=64),\n )\n\n assert not ae.check_msg_cache(k1, k2, k3)\n ae.update_msg_cache(k1, k2, k3)\n assert ae.check_msg_cache(k1, k2, k3)\n","sub_path":"src/python/analyzer_executor/tests/test_cache_integration.py","file_name":"test_cache_integration.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"522935437","text":"import torch\nfrom torchvision.models import resnet\n\nMODEL_FILE_NAME = 'resnet34.pt'\n\ndef create_model(out_dir='./'):\n model = resnet.resnet34(pretrained=True)\n model.eval()\n traced_model = torch.jit.trace(model, torch.randn(1,3,224,224))\n traced_model.save(out_dir + MODEL_FILE_NAME)\n\n\nif __name__ == \"__main__\":\n create_model()\n print(MODEL_FILE_NAME + \" model file is created\")\n","sub_path":"part2/1-image-classifier-serverless-aws-lambda/mobilenetv2-pytorch-aws/model/create_resnet34_model.py","file_name":"create_resnet34_model.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"534239584","text":"\"\"\"\n Testing for particlesclasses module.\n\"\"\"\n\nimport unittest\nimport uuid\n\nfrom simphony.cuds.particles import Particle, Bond, ParticleContainer\nfrom simphony.core.data_container import DataContainer\nfrom simphony.core.cuba import CUBA\n\n\nclass ParticleTestCase(unittest.TestCase):\n \"\"\"Test case for Particle class.\"\"\"\n\n def test_simple_particle_default(self):\n particle = Particle()\n self.assertIsInstance(particle, Particle)\n self.assertEqual(particle.coordinates, (0, 0, 0))\n self.assertEqual(particle.id, None)\n self.assertEqual(particle.data, DataContainer())\n\n def test_simple_particle_custom(self):\n data = DataContainer()\n data[CUBA.RADIUS] = 3.0\n particle = Particle([20.5, 30.5, 40.5], uuid.UUID(int=33), data)\n self.assertIsInstance(particle, Particle)\n self.assertEqual(particle.coordinates, (20.5, 30.5, 40.5))\n self.assertEqual(particle.id, uuid.UUID(int=33))\n self.assertEqual(particle.data, data)\n\n def test_str(self):\n particle = Particle()\n total_str = str(particle.id) + '_' + str(particle.coordinates)\n self.assertEqual(str(particle), total_str)\n\n\nclass BondTestCase(unittest.TestCase):\n\n def test_simple_bond(self):\n data = DataContainer()\n data[CUBA.RADIUS] = 2.0\n uuids = [uuid.UUID(int=i) for i in range(3)]\n bond = Bond(uuids, uuid.UUID(int=12), data)\n self.assertIsInstance(bond, Bond)\n self.assertEqual(bond.particles, tuple(uuids))\n self.assertEqual(bond.id, uuid.UUID(int=12))\n self.assertEqual(bond.particles, tuple(uuids))\n self.assertEqual(bond.data, data)\n\n def test_expection_when_initializing_with_empty_tuple(self):\n with self.assertRaises(Exception):\n Bond(())\n\n def test_str(self):\n uuids = [uuid.UUID(int=i) for i in range(3)]\n bond = Bond(particles=uuids)\n total_str = str(bond.id) + '_' + str(tuple(uuids))\n self.assertEqual(str(bond), total_str)\n\n\nclass ParticleContainerAddParticlesTestCase(unittest.TestCase):\n def setUp(self):\n self.p_list = []\n for i in xrange(10):\n self.p_list.append(Particle([i, i*10, i*100]))\n self.pc = ParticleContainer()\n\n def test_has_particle(self):\n id = self.pc.add_particle(self.p_list[0])\n self.assertTrue(self.pc.has_particle(id))\n self.assertFalse(self.pc.has_particle(uuid.UUID(int=1234)))\n\n def test_add_particle_ok(self):\n ids = [\n self.pc.add_particle(particle) for particle in self.p_list]\n for index, particle in enumerate(self.p_list):\n self.assertTrue(self.pc.has_particle(particle.id))\n self.assertEqual(particle.id, ids[index])\n\n def test_exception_when_adding_particle_twice(self):\n for particle in self.p_list:\n self.pc.add_particle(particle)\n with self.assertRaises(Exception):\n self.pc.add_particle(self.p_list[0])\n\n\nclass ParticleContainerManipulatingParticlesTestCase(unittest.TestCase):\n def setUp(self):\n self.p_list = []\n self.pc = ParticleContainer()\n for i in xrange(10):\n particle = Particle([i, i*10, i*100], id=uuid.UUID(int=i))\n self.p_list.append(particle)\n self.pc.add_particle(particle)\n\n def test_update_particle(self):\n particle = self.pc.get_particle(self.p_list[1].id)\n particle.coordinates = (123, 456, 789)\n part_coords = particle.coordinates\n self.pc.update_particle(particle)\n new_particle = self.pc.get_particle(particle.id)\n self.assertTrue(new_particle is not particle)\n self.assertEqual(particle.id, new_particle.id)\n self.assertEqual(part_coords, new_particle.coordinates)\n self.assertEqual(particle.data, new_particle.data)\n\n def test_exception_when_update_particle_when_wrong_id(self):\n particle = Particle()\n with self.assertRaises(KeyError):\n self.pc.update_particle(particle)\n\n def test_remove_particle(self):\n particle = self.p_list[0]\n self.pc.remove_particle(particle.id)\n self.assertFalse(self.pc.has_particle(particle.id))\n\n def test_exception_when_removing_particle_with_bad_id(self):\n with self.assertRaises(KeyError):\n self.pc.remove_particle(uuid.UUID(int=23325))\n\n def test_iter_particles_when_passing_ids(self):\n particle_ids = [p.id for p in self.p_list[::2]]\n iterated_ids = [\n particle.id for particle in self.pc.iter_particles(particle_ids)]\n self.assertEqual(particle_ids, iterated_ids)\n\n def test_iter_all_particles(self):\n particle_ids = [p.id for p in self.p_list]\n iterated_ids = [\n particle.id for particle in self.pc.iter_particles()]\n # The order of iteration is not important in this case.\n self.assertItemsEqual(particle_ids, iterated_ids)\n\n def test_exception_on_iter_particles_when_passing_wrong_ids(self):\n ids = [particle.id for particle in self.p_list]\n ids.append(uuid.UUID(int=20))\n with self.assertRaises(KeyError):\n for particle in self.pc.iter_particles(ids):\n last_id = particle.id\n continue\n self.assertEqual(last_id, self.p_list[-1].id)\n\n\nclass ParticleContainerAddBondsTestCase(unittest.TestCase):\n def setUp(self):\n self.p_list = []\n self.b_list = []\n for i in xrange(10):\n self.b_list.append(\n Bond([\n uuid.UUID(int=i),\n uuid.UUID(int=i + 1),\n uuid.UUID(int=i+2)]))\n self.pc = ParticleContainer()\n\n def test_has_bond(self):\n id = self.pc.add_bond(self.b_list[0])\n self.assertTrue(self.pc.has_bond(id))\n self.assertFalse(self.pc.has_bond(uuid.UUID(int=2122)))\n\n def test_add_bond(self):\n ids = [self.pc.add_bond(bond) for bond in self.b_list]\n for index, bond in enumerate(self.p_list):\n self.assertTrue(self.pc.has_bond(bond.id))\n self.assertEqual(bond.id, ids[index])\n\n def test_exception_when_adding_bond_twice(self):\n for bond in self.b_list:\n self.pc.add_bond(bond)\n with self.assertRaises(Exception):\n self.pc.add_bond(self.b_list[0])\n\n\nclass ParticleContainerManipulatingBondsTestCase(unittest.TestCase):\n def setUp(self):\n self.p_list = []\n self.b_list = []\n self.pc = ParticleContainer()\n for i in xrange(10):\n self.p_list.append(Particle([i, i*10, i*100]))\n self.b_list.append(Bond([1, 2, 3]))\n self.pc.add_bond(self.b_list[i])\n\n def test_update_bond(self):\n bond = self.pc.get_bond(self.b_list[1].id)\n bond.particles = bond.particles[:-1]\n self.pc.update_bond(bond)\n new_bond = self.pc.get_bond(bond.id)\n self.assertTrue(new_bond is not bond)\n self.assertEqual(bond.id, new_bond.id)\n self.assertEqual(bond.particles, new_bond.particles)\n self.assertEqual(bond.data, bond.data)\n\n def test_exeception_when_updating_bond_with_incorrect_id(self):\n bond = Bond([1, 2])\n with self.assertRaises(KeyError):\n self.pc.update_bond(bond)\n\n def test_remove_bond(self):\n bond = self.b_list[0]\n self.pc.remove_bond(bond.id)\n self.assertFalse(self.pc.has_bond(bond.id))\n\n def test_exception_removing_bond_with_missing_id(self):\n with self.assertRaises(KeyError):\n self.pc.remove_bond(uuid.UUID(int=12124124))\n\n def test_iter_bonds_when_passing_ids(self):\n ids = [b.id for b in self.b_list[::2]]\n iterated_ids = [\n bond.id for bond in self.pc.iter_bonds(ids)]\n self.assertEqual(ids, iterated_ids)\n\n def test_iter_all_bonds(self):\n bonds_ids = [b.id for b in self.b_list]\n iterated_ids = [bond.id for bond in self.pc.iter_bonds()]\n # The order of iteration is not important in this case.\n self.assertItemsEqual(bonds_ids, iterated_ids)\n\n def test_exception_on_iter_bonds_when_passing_wrong_ids(self):\n bonds_ids = [bond.id for bond in self.b_list]\n bonds_ids.append(uuid.UUID(int=20))\n with self.assertRaises(KeyError):\n for bond in self.pc.iter_bonds(bonds_ids):\n last_id = bond.id\n continue\n self.assertEqual(last_id, self.b_list[-1].id)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"simphony/cuds/tests/test_particlesclasses.py","file_name":"test_particlesclasses.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"581055685","text":"import matplotlib.pyplot as plt\n\ncsize=[4000, 10000]\n#csize=[10000]\nfor c in csize:\n sgd = []\n f1 = open(\"20_0.05_cache_real_\" + str(c) + \"_dual_14_Warship_0.1/objective.txt\", \"r\")\n for l in f1:\n l = l.strip().split(\" \")\n obj = float(l[1])\n sgd.append(obj)\n\n lru = []\n f2 = open(\"20_0.01_cache_real_\" + str(c) + \"_lru_14_Warship_simple/objective.txt\", \"r\")\n for l in f2:\n l = l.strip().split(\" \")\n obj = float(l[1])\n lru.append(obj)\n\n plt.plot(lru, label=\"lru\")\n plt.plot(sgd, label=\"sgd\")\n plt.xlabel(\"Time\")\n plt.ylabel(\"Objective\")\n plt.grid()\n plt.legend()\n plt.savefig(str(c) + \"_time.png\")\n plt.clf()\n","sub_path":"similarity_caching_3d_time/plot_time.py","file_name":"plot_time.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"481589358","text":"# -*- coding: utf-8 -*-\nfrom rest_framework import views, status\nimport json\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.core.mail import send_mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom .models import SendgridApiKey, StaffEmail\nfrom authentication.models import Account\nfrom shop.models import Commande, Article, Panier\nfrom django.utils.dateparse import parse_datetime\nimport sendgrid\nfrom sendgrid.helpers.mail import Email, Content, Mail, Attachment\nimport datetime\n\n\ndef getApiKey():\n key = SendgridApiKey.objects.get(id=1)\n return key\n\n\ndef getEmails():\n emails = StaffEmail.objects.get(id=1)\n return emails\n\n\ndef send_email(from_email, to_email, content, subject):\n sg = sendgrid.SendGridAPIClient(apikey=getApiKey())\n\n from_email = Email(from_email)\n to_email = Email(to_email)\n content = Content(\"text/html\", content)\n mail = Mail(from_email, subject, to_email, content)\n\n response = sg.client.mail.send.post(request_body=mail.get())\n\n if (response.status_code >= 200) and (response.status_code < 300):\n return Response({\n 'status': 'OK',\n 'message': 'Email sent'\n }, status=status.HTTP_200_OK)\n else:\n return Response({\n 'status': 'KO',\n 'message': 'Error'\n }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass AccountCreationEmailView(views.APIView):\n def post(self, request, format=None):\n data = json.loads(request.body)\n json_email = data['email']\n\n staff_email = getEmails()\n account = Account.objects.filter(email=json_email)\n if not account:\n return Response({\n 'status': 'Not found',\n 'message': 'Account not found'\n }, status=status.HTTP_404_NOT_FOUND)\n\n account = account[0]\n\n subject = \"Création de compte\"\n message_content = \"\"\"\n \n \n \n \n Création de compte\n \n \n \n
\n Bienvenue %s,
\n Vous venez de créer un compte sur Organic Azuki.

\n Votre identifiant est : %s

\n\n Vous pouvez dès à présent vous connecter sur le site et passer commande !

\n\n Si vous avez des questions, n’hésitez pas à nous écrire à contact@organicazuki.com.

\n\n À très bientôt.
\n Organic Azuki
\n
\n
\n \n \"\"\"%(account.get_first_name(),\n account.get_email())\n\n return send_email(staff_email.noreply(),\n json_email,\n message_content,\n subject)\n\n\nclass AccountDeletionToCustomerEmailView(views.APIView):\n def post(self, request, format=None):\n data = json.loads(request.body)\n json_email = data['email']\n json_first_name = data['first_name']\n json_last_name = data['last_name']\n staff_email = getEmails()\n\n subject = \"Au revoir\"\n message_content = \"\"\"\n \n \n \n \n Création de compte\n \n \n \n
\n Bonjour %s,
\n\n Nous vous informons que votre compte client a bien été supprimé.

\n\n N’hésitez pas à nous contacter, pour nous faire part de vos remarques sur votre expérience parmi nous.

\n\n Cordialement,

\n\n L’équipe de Café Aum

\n
\n \n \"\"\" % (json_first_name)\n\n return send_email(staff_email.noreply(),\n json_email,\n message_content,\n subject)\n\n\nclass AccountDeletionToStaffEmailView(views.APIView):\n def post(self, request, format=None):\n data = json.loads(request.body)\n json_email = data['email']\n json_first_name = data['first_name']\n json_last_name = data['last_name']\n json_account_id = data['account_id']\n\n staff_email = getEmails()\n\n subject = \"Suppression d'un compte client\"\n message_content = \"\"\"\n \n \n \n \n Création de compte\n \n \n \n
\n Un nouveau compte vient d’être supprimé :
\n Numéro de compte : %s
\n %s %s
\n %s
\n\n
\n \n \"\"\" % (json_account_id,\n json_first_name,\n json_last_name,\n json_email)\n\n return send_email(staff_email.noreply(),\n staff_email.contact(),\n message_content,\n subject)\n\n\nclass CommandConfirmationToCustomerEmailView(views.APIView):\n\n def post(self, request, format=None):\n data = json.loads(request.body)\n\n json_commande = data['commande']\n json_account = data['account']\n\n commande = Commande.objects.get(id=json_commande['id'])\n account = Account.objects.get(id=json_account['id'])\n\n staff_email = getEmails()\n\n articles = Panier.objects.get_articles(commande.panier.uuid)\n articles_detail = \"\"\n for article in articles:\n articles_detail += article.reference.nom + u\" - Quantité : \" + str(article.quantite) \\\n + u\" (\" + str(article.taille) + u\") - Prix : \" + str(article.reference.prix * article.quantite) + u\" €\"\n articles_detail += u\"
\"\n\n subject = u\"Confirmation de commande\"\n message_content = u\"\"\"\n \n \n \n \n Confirmation de commande\n \n \n \n
\n

Confirmation de commande

\n Votre commande %s a bien été prise en compte, merci pour votre confiance !
\n Vous trouverez ci-dessous un récapitulatif de vos achats.
\n Si toutefois vous changez d’avis, vous avez 1h pour annuler votre commande.
\n Pour cela, il suffit de vous rendre sur votre espace Mon Compte sur le site.

\n Vous serez tenu(e) au courant de l’expédition de votre commande.
\n\n

Rappel de votre commande

\n\n %s
\n\n Cout total de la commande (frais de port inclus): %s €

\n\n Adresse de livraison : %s

\n Adresse de facturation : %s


\n\n\n A très bientot,
\n Organic Azuki
\n
\n
\n \n \"\"\"%(str(commande.unique_id),\n articles_detail,\n str(commande.transaction.montant),\n commande.adresse_livraison,\n commande.adresse_facturation)\n\n return send_email(staff_email.noreply(),\n account.get_email(),\n message_content,\n subject)\n\n\nclass CommandConfirmationToStaffEmailView(views.APIView):\n def post(self, request, format=None):\n data = json.loads(request.body)\n\n json_account = data['account']\n json_commande = data['commande']\n\n account = Account.objects.get(id=json_account['id'])\n commande = Commande.objects.get(id=json_commande['id'])\n\n staff_email = getEmails()\n\n subject = u\"Nouvelle commande n°%s\"%commande.unique_id\n message_content = u\"\"\"\n \n \n \n \n Nouvelle commande\n \n \n \n
\n

Nouvelle commande

\n Une nouvelle commande (n° %s) a été effectuée par %s %s\n
\n
\n \n \"\"\"%(commande.unique_id,\n account.get_first_name(),\n account.get_last_name(),)\n\n return send_email(staff_email.noreply(),\n staff_email.noreply(),\n message_content,\n subject)\n\n\nclass ContactEmailView(views.APIView):\n\n def post(self, request, format=None):\n data = json.loads(request.body)\n question = data['question']\n prenom = data['prenom']\n nom = data['nom']\n email = data['email']\n tel = data['tel']\n sujet = data['sujet']\n message = data['message']\n\n staff_email = getEmails()\n subject = \"Demande de contact : %s\"%sujet\n message_content = \"\"\"\n \n \n \n \n Demande de contact\n \n \n

\n Organic Azuki,
\n %s %s (email : %s / tel : %s) a laissé une demande de contact.
\n Son message concerne \"%s\"

\n\n Son message :
\n \"%s\"

\n\n \"%s\"\n

\n\n
\n

\n \n \"\"\"%(prenom, nom, email,email, tel, question, sujet, '
'.join(message.splitlines()))\n\n sg = sendgrid.SendGridAPIClient(apikey=getApiKey())\n\n from_email = Email(staff_email.noreply())\n to_email = Email(staff_email.contact())\n content = Content(\"text/html\", message_content)\n mail = Mail(from_email, subject, to_email, content)\n\n response = sg.client.mail.send.post(request_body=mail.get())\n\n if (response.status_code >= 200) and (response.status_code < 300):\n return Response({\n 'status': 'OK',\n 'message': 'Email sent'\n }, status=status.HTTP_200_OK)\n else:\n return Response({\n 'status': 'KO',\n 'message': 'Error'\n }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass PasswordRecoveryEmailView(views.APIView):\n def post(self, request, format=None):\n data = json.loads(request.body)\n\n email = data['email']\n token = data['token']\n\n staff_email = getEmails()\n\n subject = \"Renouvellement de mot de passe\"\n message_content = \"\"\"\n \n \n \n \n Renouvellement de mot de passe\n \n \n

\n Bonjour,
\n Pour mettre à jour votre mot de passe, veuillez cliquer sur le lien suivant :
\n http://www.organicazuki.com/recovery/%s
\n\n Attention ce lien n'est valide que pour 24h, aussi nous vous recommandons de modifier votre mot de passe dès que possible.

\n\n\n Organic Azuki
\n

\n \n \"\"\"%(token,token)\n\n sg = sendgrid.SendGridAPIClient(apikey=getApiKey())\n\n from_email = Email(staff_email.noreply())\n to_email = Email(email)\n content = Content(\"text/html\", message_content)\n mail = Mail(from_email, subject, to_email, content)\n\n response = sg.client.mail.send.post(request_body=mail.get())\n\n if (response.status_code >= 200) and (response.status_code < 300):\n return Response({\n 'status': 'OK',\n 'message': 'Email sent'\n }, status=status.HTTP_200_OK)\n else:\n return Response({\n 'status': 'KO',\n 'message': 'Error'\n }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n","sub_path":"messaging/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"632673786","text":"import os\r\nimport csv\r\n\r\nimport os\r\nimport csv\r\n\r\ncsvpath = os.path.join(\".\", \"Resources\", \"budget_data.csv\")\r\nprint (csvpath)\r\n\r\ntotal_rows=0\r\nnet_amount= 0\r\naverage= 0\r\nmax_inc= 0\r\nmax_date= \" \"\r\nmin_inc= 1170593\r\nmin_date= \" \"\r\n\r\n\r\n# Read in the CSV file\r\nwith open(csvpath, 'r') as csvfile:\r\n\r\n # Split the data on commas\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n\r\n header = next(csvreader)\r\n\r\n print(header)\r\n\r\n for row in csvreader:\r\n #print(row[0],row[1])\r\n total_rows=total_rows+1\r\n net_amount=net_amount + int(row[1])\r\n if int(row[1]) > max_inc:\r\n max_inc=(int(row[1]))\r\n max_date = row[0]\r\n if total_rows == 1:\r\n min_inc = int(row[1])\r\n max_inc = int(row[1])\r\n if int(row[1])< min_inc:\r\n min_inc = int(row[1])\r\n min_date = row[0]\r\n\r\n\r\n\r\n print (total_rows)\r\n print(net_amount)\r\n print(net_amount/total_rows)\r\n print (max_inc, max_date)\r\n print(min_inc, min_date)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Homework2019/Python December1/PyBank.py","file_name":"PyBank.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"108233152","text":"'''\nProject Euler 99\nRead from base_exp.txt base/exponent pairs, which line is highest?\n'''\n\nimport math\n\n# Reads data, puts in list of integer couplets [base, exp]\nf = open('base_exp.txt','r')\ng = f.readlines()\nf.close()\ng = [line.rstrip('\\n').split(',') for line in g]\ng = [[int(line[0]),int(line[1])] for line in g]\n\n\ndef fast_exp(base, exp):\n if exp == 1:\n return base\n elif exp % 2 == 0:\n return fast_exp(base ** 2, int(exp / 2))\n else:\n return base*fast_exp(base,exp-1)\n\n\n\n# Will take too long! Do up to a certain point perhaps?\n\n'''\nmax = 0\nhigh = ''\n\nfor pair in g:\n if pair[1] * math.log(pair[0]) > max:\n max = pair[1] * math.log(pair[0])\n high = pair\n\nprint (str(high) + ' ' + str(max))\n'''\n\ni = 0\nwhile True:\n if g[i][0] != 895447:\n i += 1\n else:\n break\nprint ( i + 1 )\n","sub_path":"old_solutions/projecteuler99.py","file_name":"projecteuler99.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"68382512","text":"\"\"\"\n %s 字符串\n %d 有符号整数 , %06d表示输出的整数显示位数,不足的补0\n %f 浮点数 %.2f 小数点后只显示2位\n %% 输出%\n\n 格式 print(\"格式化字符串\" % var)\n 格式 print(\"格式化字符串 ...\" % (var1, var2...))\n\"\"\"\nname = \"jack\"\nage = 20\nheight = 1.75\nstudent_no = 1856\n\nprint(\"%s will say something to everybody\" % name)\nprint(\"hello, my name is %s , age %d , height %.2f, student no %06d\" % (name, age, height, student_no))\n","sub_path":"basic/s_05_格式化字符串.py","file_name":"s_05_格式化字符串.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"292500262","text":"from tkinter import Tk, Canvas\r\nfrom Quad import *\r\nfrom random import randint\r\n\r\ndef add_point(mouse):\r\n global Q, canvas\r\n points = [] # has do be done this way due to how Quad.add_points() works\r\n point = Point(canvas, mouse.x, mouse.y, \"white\")\r\n points.append(point)\r\n Q.add_points(points)\r\n\r\n\r\ndef select_rectangle(mouse):\r\n global temp_x, temp_y, started, Q, canvas\r\n if started:\r\n rect = Rectangle(canvas, temp_x, temp_y, mouse.x, mouse.y)\r\n points = Q.find_points(rect)\r\n\r\n for point in points:\r\n canvas.itemconfigure(point.display, outline = \"green\", fill = \"green\")\r\n \r\n canvas.update()\r\n started = False\r\n else:\r\n temp_x = mouse.x\r\n temp_y = mouse.y\r\n started = True\r\n\r\n\r\ndef generate_points(count):\r\n points = []\r\n for i in range(count):\r\n point = Point(canvas, randint(0, 700), randint(0, 700), \"white\")\r\n points.append(point)\r\n\r\n Q.add_points(points)\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"QuadTree\")\r\ncanvas = Canvas(root, width=700, height=700, background=\"#333333\")\r\ncanvas.pack()\r\ncanvas.bind(\"\", select_rectangle)\r\ncanvas.bind(\"\", add_point)\r\n\r\nstarted = False\r\nQ = Quad(canvas, 0, 0, 700)\r\n\r\nroot.after(0, generate_points(1000))\r\nroot.mainloop()\r\n","sub_path":"Quadtree.py","file_name":"Quadtree.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"209129019","text":"# -*- coding: utf-8 -*-\n# __author__ = 'XingHuan'\n# 8/29/2018\n\n# Copyright 2018 XingHuan\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom sins.module.sqt import *\nfrom sins.utils.res import resource\n\n\nTAG_SHAPES = [\n '',\n 'Rect',\n 'Ellipse',\n 'Triangle0',\n 'Triangle2',\n]\nTAG_W = 25\nTAG_H = 25\n\n\ndef get_rect_of_ellipse(center_x, center_y, radius):\n x = center_x - radius\n y = center_y - radius\n w = 2.0 * radius\n h = 2.0 * radius\n return QRectF(x, y, w, h)\n\n\ndef get_shapes_of_triangle(a, roundness=0, direction=0):\n points = [\n QPointF(1.732 * roundness, a),\n QPointF(1.732 / 2 * roundness, a - 1.5 * roundness),\n QPointF(0.5 * a - 1.732 / 2 * roundness, 1.5 * roundness),\n QPointF(0.5 * a + 1.732 / 2 * roundness, 1.5 * roundness),\n QPointF(a - 1.732 / 2 * roundness, a - 1.5 * roundness),\n QPointF(a - 1.732 * roundness, a),\n ]\n centers = [\n QPointF(0.5 * a, 2.0 * roundness),\n QPointF(1.732 * roundness, a - roundness),\n QPointF(a - 1.732 * roundness, a - roundness),\n ]\n if direction == 0:\n points = [point - QPointF(0, 1.732 / 4 * roundness) for point in points]\n centers = [point - QPointF(0, 1.732 / 4 * roundness) for point in centers]\n elif direction == 2:\n points = [QPointF(point.x(), -1 * point.y() + a + 1.732 / 4 * roundness) for point in points]\n centers = [QPointF(point.x(), -1 * point.y() + a + 1.732 / 4 * roundness) for point in centers]\n rects = [get_rect_of_ellipse(center.x(), center.y(), roundness) for center in centers]\n return points, rects\n\n\nclass Tag(QGraphicsItem):\n \"\"\"A Tag is a socket of a Node and can be connected to other Tags.\"\"\"\n\n def __init__(self, shape=None, text=None, color=None, **kwargs):\n super(Tag, self).__init__(**kwargs)\n\n self.shape = shape\n self.text = text\n self.color = color\n\n self.x = 0\n self.y = 0\n self.w = TAG_W\n self.h = TAG_H\n\n self.margin = 5\n\n self.name = \"value\"\n self.displayName = self.name\n self.auto_hide = True\n\n self.textColor = QColor(10, 10, 10)\n self.fillColor = self.color if self.color is not None else QColor(170, 170, 170)\n self.highlightColor = QColor(255, 255, 0)\n\n self.setAcceptHoverEvents(True)\n\n def node(self):\n \"\"\"The Node that this Tag belongs to is its parent item.\"\"\"\n return self.parentItem()\n\n def boundingRect(self):\n \"\"\"Return the bounding box of this Tag.\"\"\"\n rect = QRectF(self.x,\n self.y,\n self.w,\n self.h)\n return rect\n\n def paint(self, painter, option, widget):\n \"\"\"Draw the Tag's shape and label.\"\"\"\n bbox = self.boundingRect()\n\n if self.shape is not None:\n painter.setPen(QPen(Qt.NoPen))\n painter.setBrush(QBrush(self.fillColor))\n\n # Draw a filled rectangle.\n if self.shape == 1:\n roundness = 3\n painter.drawRoundedRect(bbox, roundness, roundness)\n\n # Ellipse\n if self.shape == 2:\n painter.drawEllipse(bbox)\n\n # Triangle0\n if self.shape == 3:\n points, rects = get_shapes_of_triangle(self.w, roundness=2, direction=0)\n painter.drawPolygon(QPolygonF(points))\n for rect in rects:\n painter.drawEllipse(rect)\n\n # Triangle2\n if self.shape == 4:\n points, rects = get_shapes_of_triangle(self.w, roundness=2, direction=2)\n painter.drawPolygon(QPolygonF(points))\n for rect in rects:\n painter.drawEllipse(rect)\n\n if self.text is not None:\n painter.setPen(QPen(self.textColor))\n font = painter.font()\n fm = QFontMetrics(font)\n w = fm.boundingRect(self.text).width() + 10\n h = fm.boundingRect(self.text).height()\n rect = QRectF(0 - (w - bbox.width()) / 2.0,\n 0 - (h - bbox.height()) / 2.0,\n w,\n h)\n painter.drawText(rect, Qt.AlignCenter, self.text)\n\n\nclass PixmapTag(QGraphicsPixmapItem):\n def __init__(self, icon=None, **kwargs):\n super(PixmapTag, self).__init__(**kwargs)\n\n self.scale_factor = 10\n self.w = TAG_W\n self.h = TAG_H\n self.icon = icon\n\n self.auto_hide = True\n self.setAcceptHoverEvents(True)\n\n def set_pixmap(self, color=None):\n self.setPixmap(resource.get_pixmap(self.icon,\n color=color,\n scale=QSize(TAG_W * self.scale_factor, TAG_H * self.scale_factor)))\n self.setScale(1.0 / self.scale_factor)\n\n\nclass WarningTag(PixmapTag):\n def __init__(self, warning=''):\n super(WarningTag, self).__init__()\n self.icon = resource.get_pic()\n self.setToolTip(warning)\n self.set_pixmap(color=[250, 20, 20])\n self.auto_hide = False\n","sub_path":"sins/ui/widgets/version_dependence/graph/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"1572992","text":"#!/usr/bin/env python3\n\n\nclass Solution:\n\n def isMatch(self, s, p):\n # print(\"isMatch, s: {}, p: {}\".format(s, p))\n dp = [[False for i in range(len(p) + 1)] for j in range(len(s) + 1)]\n\n dp[len(s)][len(p)] = True\n\n for i in range(len(s), -1, -1):\n for j in range(len(p) - 1, -1, -1):\n if j + 1 < len(p) and p[j + 1] == '*':\n dp[i][j] = i < len(s) and p[j] in {s[i], '.'} and dp[i + 1][j] or dp[i][j + 2]\n else:\n dp[i][j] = i < len(s) and p[j] in {s[i], '.'} and dp[i + 1][j + 1]\n\n # for i in dp:\n # print(i)\n return dp[0][0]\n","sub_path":"10_regular_expression_matching/regular_expression_matching_dp_reverse.py","file_name":"regular_expression_matching_dp_reverse.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"559032735","text":"import time\nfrom uniback.tools.ub_process import UBProcess\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom uniback.misc import credential_manager\n\n# WORK IN PROGRESS\n# Job scheduler will be a thread instead of a process\n# this class left as a reference\n\n\nclass JobSchedulerOld(UBProcess):\n \n description = (\"Periodically checks the job database and submits \"\n \"jobs that are due for running.\")\n ub_name = \"Job Scheduler\"\n ub_category = \"system\"\n\n def run(self):\n self.init_db_session()\n # group = self.session.query(JobQueue).filter_by(id=0)\n # self.logger.debug(\"process says\" + group.description)\n self.queue.put(\n {'process_id': self.pid,\n 'data_name': 'testvar',\n 'data': \"It totally worked :^)\"})\n self.logger.debug(\"This pid == \" + str(self.pid))\n try:\n test = credential_manager.get_all_credential_groups()\n for butt in test:\n self.logger.debug(\"butt\" + butt['description'])\n except Exception as e:\n self.logger.debug(e)\n while True:\n credential_manager.get_crypt_key()\n time.sleep(10)\n\n def init_db_session(self):\n engine = create_engine('sqlite:///../ub_system.db')\n Session = sessionmaker(bind=engine)\n self.session = Session()\n","sub_path":"uniback/tools/DEL_job_scheduler_old.py","file_name":"DEL_job_scheduler_old.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"583666153","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: TencentComments.py\n Description : 爬取腾讯新闻下的用户评论\n Author : zhangbo\n date: 2018/5/30\n-------------------------------------------------\nUpdate Content:\n 1.利用mongodb,实现从上一次断点继续执行爬虫系统\n 2.comments_info_db : 保存请求评论url的信息\n 3.news_id_db : 保存请求新闻文章id和总新闻数,仅一条数据\n 4.date : 2018/5/31\n\n\n-------------------------------------------------\n\"\"\"\nimport re\nimport sys\nimport json\nimport time\nimport random\nimport queue\nimport requests\nimport threading\nimport pymongo\n\n\nclass TencentComments(object):\n def __init__(self):\n self.q = queue.Queue()\n self.lock = threading.Lock()\n client = pymongo.MongoClient('localhost', 27017)\n db = client['tencent']\n self.comments_info_db = db['comments_info_db']\n self.news_id_db = db['news_id_db']\n\n\n def reqheader(self):\n ua_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',\n 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2 ',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16'\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'\n ]\n headers = {\n 'User-Agent': random.choice(ua_list),\n 'Accept': '*/*',\n 'Connection': 'keep-alive',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Host': 'coral.qq.com',\n 'Cookie':'pgv_info=ssid=s1526278171; pgv_pvid=8133316984; pgv_pvi=7224922112; pgv_si=s9208759296; qqmusic_fromtag=66; ptisp=ctc; ptui_loginuin=654580465; pt2gguin=o0654580465; RK=pHB1vNC1a1; ptcz=9128a13267300bdaf1279912bb516f7f3c6032cd5df1c3ad70cb53d1544cbc97; o_cookie=654580465; uin=o0654580465; pac_uid=1_654580465; g_tk=c90e7f3a4e31c8ecded6a0082387b0d69affd2d6; tvfe_boss_uuid=6657370b2c683020; mobileUV=1_163a0acc265_a0285; skey=@vBiymAZEN; webwx_data_ticket=gSeri+s5xmynBNRBQm/rrLHA; ts_last=coral.qq.com/2150468744; ts_uid=67245812; ad_play_index=40'\n }\n return headers\n\n\n def get(self,url):\n headers = self.reqheader()\n index = 0\n while index<3:\n try:\n res = requests.get(url,headers=headers)\n st = res.status_code\n if st>=200 and st<=304:\n return res.text\n except Exception as e:\n index = index + 1\n\n\n def show_percent(self,title,total,ok):\n per = 100 - 100*(total-ok)/total\n if per>100:\n per = 100\n print('[正在处理]正在爬取新闻< %s >的评论...[%.2f %%]' % (title,per)+'\\r',end='')\n\n\n\n def get_new_id(self,lastid,current_total):\n while True:\n now_time = int(time.time())\n url = 'http://coral.qq.com/home/comment?lastid={}&reqnum=50&callback=mainComment&_={}'.format(lastid,now_time)\n time.sleep(0.5)\n content = self.get(url)\n if not content:\n print('[请求报错]无法请求该url链接资源!!!')\n sys.exit(0)\n mainComment = re.findall('\\((.*)\\)',content)[0]\n jsonInfo = json.loads(mainComment)\n data = jsonInfo['data']\n lastid = data['last']\n if lastid:\n for target in data['targetlist']:\n title = target['title']\n targetid= target['targetid']\n info = {\n 'title':title,\n 'tid':targetid\n }\n self.comments_info_db.insert(info)\n current_total = current_total+len(data['targetlist'])\n print('[已完成]共有< %s >篇新闻,已处理 < %d >\\r' % (data['total'],current_total),end='')\n self.news_id_db.remove({})\n self.news_id_db.insert({'lastid': lastid, 'current_total': current_total})\n else:\n print('[已完成]没有新闻数据了!')\n break\n\n\n def save_comments_text(self,title,targetId):\n lastid = '0'\n comment_num = 0\n self.ok = False\n while True:\n now_time = int(time.time())\n url = 'http://coral.qq.com/article/{}/comment/v2?callback=_article{}commentv2&' \\\n 'orinum=10&oriorder=o&pageflag=1&cursor={}&scorecursor=0&orirepnum=2&' \\\n 'reporder=o&reppageflag=1&source=1&_={}'.format(targetId, targetId, lastid, now_time)\n time.sleep(0.5)\n content = self.get(url)\n if not content:\n continue\n try:\n comment = re.findall('\\((.*)\\)', content)[0]\n jsonInfo = json.loads(comment)\n data = jsonInfo['data']\n lastid = data['last']\n if lastid:\n oritotal = int(data['oritotal'])\n for target in data['oriCommList']:\n with open('tencentComments.txt', 'a+') as f:\n f.write(target['content'] + '\\n')\n comment_num = comment_num + len(data['oriCommList'])\n self.show_percent(title, oritotal, comment_num)\n else:\n break\n except Exception as e:\n print('[解析报错]', e)\n break\n\n\n def news_id_process(self):\n if self.news_id_db.find_one({}):\n print('[开始]腾讯新闻评论爬虫系统 - again -')\n new_info = self.news_id_db.find_one({})\n lastid = new_info['lastid']\n current_total = new_info['current_total']\n else:\n print('[开始]腾讯新闻评论爬虫系统 - start -')\n lastid = '0'\n current_total = 0\n self.get_new_id(lastid,current_total)\n\n\n def handler_comments(self):\n while True:\n self.lock.acquire()\n if not self.comments_info_db.find_one({}):\n self.lock.release()\n break\n info = self.comments_info_db.find_one({})\n self.save_comments_text(info['title'],info['tid'])\n print('[已完成]删除< %s >新闻的评论id!!!' % info['title'])\n self.comments_info_db.remove({'tid':info['tid']})\n self.lock.release()\n\n\n def main(self):\n exist_Flag, threads = 1, []\n while exist_Flag < 6:\n try:\n self.news_id_process()\n for i in range(0, 10):\n t = threading.Thread(target=self.handler_comments)\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n break\n except Exception as e:\n print('[服务等待]等待5分钟' + (exist_Flag * 3) * '.' + '\\r', end='')\n time.sleep(60)\n exist_Flag = exist_Flag + 1\n if exist_Flag == 6:\n print('[爬虫系统报错]请联系开发人员!!!!!!')\n else:\n print('[结束]腾讯新闻评论爬虫系统 - end -')\n\n\nif __name__ == '__main__':\n tencent = TencentComments()\n tencent.main()\n\n\n\n","sub_path":"scrapyExercise/TencentComments.py","file_name":"TencentComments.py","file_ext":"py","file_size_in_byte":8735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"519540682","text":"import collections\n\n\nclass NodesComparison:\n def __init__(self, head_node_1, head_node_2):\n self.head_node_1 = head_node_1\n self.head_node_2 = head_node_2\n self.store_get_all_nodes_type = []\n self.store_get_tables_for_join = []\n self.list_joins = ['Nested Loop', 'Hash Join', 'Merge Join']\n self.unique_tables_for_both_query = []\n\n \"\"\"\n recursive function to get all the node based on the node types\n \"\"\"\n def get_all_node_type(self, node, request_node_type):\n if node.node_type in request_node_type:\n self.store_get_all_nodes_type.append(node)\n for child in node.children:\n self.get_all_node_type(child, request_node_type)\n\n \"\"\"\n recursive function to get the tables used for join\n access self.store_get_tables_for_join for the list of tables used for join\n \"\"\"\n def get_tables_for_join(self, node):\n if node.relation_name != None:\n self.store_get_tables_for_join.append(node)\n\n if node.children != None:\n for child_node in node.children:\n self.get_tables_for_join(child_node)\n\n ## get tables node for a join\n def get_tables_for_join(self, node):\n if node.relation_name != None:\n self.store_get_tables_for_join.append(node)\n\n if node.children != None:\n for child_node in node.children:\n self.get_tables_for_join(child_node)\n\n # populate the table attribute for those node type has a join.\n # populate with the nodes.\n def populate_tables_to_join_nodes(self, node):\n # if node.node_type is in one of the joins\n if node.node_type in self.list_joins:\n self.store_get_tables_for_join.clear()\n self.get_tables_for_join(node)\n node.tables = self.store_get_tables_for_join.copy()\n\n for child in node.children:\n self.populate_tables_to_join_nodes(child)\n\n def populate_nodes_with_level(self,node,starting_level):\n if node.node_type != None:\n node.level = starting_level\n for child in node.children:\n self.populate_nodes_with_level(child ,starting_level+1)\n\n # unused\n def get_node_and_tables(self, list_node_type):\n nodes_and_tables = []\n for node in list_node_type:\n self.store_get_tables_for_join.clear()\n self.get_tables_for_join(node)\n tables = self.store_get_tables_for_join.copy()\n nodes_and_tables.append([node, tables])\n\n return nodes_and_tables\n\n def compare_joins(self):\n compareString = \"\"\n # clear attribute of this class\n self.store_get_all_nodes_type.clear()\n self.store_get_tables_for_join.clear()\n\n # We populate the 'tables' attribute for those node has a join type (Merge Join, Nested Loop, Hash Join).\n # It will populate with Node object, and not the table name instead.\n # e.g Hash Join uses relation table of Customer and Order. Hence, In the Hash Join, the 'tables' attribute keeps the Node Object\n # of Customer and Order in an array.\n self.populate_tables_to_join_nodes(self.head_node_1)\n self.populate_tables_to_join_nodes(self.head_node_2)\n\n # We populate the 'level' attribute for all the nodes in both query 1 and query 2.\n self.populate_nodes_with_level(self.head_node_1, 1) # starting level is 1 - Means the highest level of the tree has a level of 1.\n self.populate_nodes_with_level(self.head_node_2, 1)\n\n # get all nodes that has join type for Query 1\n self.get_all_node_type(self.head_node_1, self.list_joins)\n all_join_nodes_in_query_1 = self.store_get_all_nodes_type.copy()\n\n # We clear this so we can run the function, get_all_node_type, again for Query 2.\n self.store_get_all_nodes_type.clear()\n\n # get all nodes that has join type for Query 2\n self.get_all_node_type(self.head_node_2, self.list_joins)\n all_join_nodes_in_query_2 = self.store_get_all_nodes_type.copy()\n\n # We reverse the list so, the biggest node (in terms of no. of table joins - usually is the must top level of join node)\n # is at the back of the list.\n all_join_nodes_in_query_1.reverse()\n all_join_nodes_in_query_2.reverse()\n\n all_join_node_full_difference = []\n all_join_nodes_in_query_2_duplicate = all_join_nodes_in_query_2.copy()\n found = False\n for node2 in all_join_nodes_in_query_2:\n for node1 in all_join_nodes_in_query_1:\n if self.is_there_similiarities(node1, node2):\n # check what is the difference between this two join nodes,\n # and check whether they have the same table used, and the types of scan they used.\n compareString += self.difference(node1, node2)\n\n # store all the tables in the array so that for the next loop, it WILL NOT print duplicate changes.\n self.store_tables(node1)\n self.store_tables(node2)\n\n # We remove from the nodes involved over here from the lists.\n all_join_nodes_in_query_1.remove(node1)\n all_join_nodes_in_query_2_duplicate.remove(node2)\n found = True\n break\n\n # Did not find any join nodes in Query 1 that is similar.\n # Hence, we deem this join node (and its tables joined) is totally different from Query 1.\n if found != False:\n all_join_nodes_in_query_2_duplicate.remove(node1)\n all_join_node_full_difference.append(node2)\n\n\n # Whatever left in query 1 is not used in Query 2. Hence, we just display the changes out.\n for node in all_join_nodes_in_query_1:\n string_table = \"\"\n for table in node.tables:\n string_table = string_table + table.relation_name + \" \"\n string_table = string_table.strip()\n compareString = \"The \"+node.node_type +\" node with tables of \"+string_table+\" in Query 1 is not used in Query 2.\" + \"\\n\"\n\n # Those nodes that are not similar at all, will be push to all_join_node_full_difference\n # We can say this nodes is does not appear in the previous query (query 1) at all.\n for node in all_join_node_full_difference:\n string_table = \"\"\n for table in node.tables:\n string_table = string_table + table.relation_name + \" \"\n string_table = string_table.strip()\n compareString += \"There is a new \"+node.node_type+\" node with tables of \"+string_table+ \" in Query 2 \" +\"\\n\"\n\n return compareString\n\n def store_tables(self, node):\n for node in node.tables:\n if node.relation_name not in self.unique_tables_for_both_query:\n self.unique_tables_for_both_query.append(node.relation_name)\n\n\n def is_there_similiarities(self, node1, node2):\n dependency_nodes_1 = node1.tables\n dependency_nodes_2 = node2.tables\n\n total_matches = 0\n # index2 = 0\n match = False\n for node1 in dependency_nodes_1:\n for node2 in dependency_nodes_2:\n if node1.relation_name == node2.relation_name:\n return True\n\n if match == False:\n return False\n\n # check whether both join node uses the same/different no. of tables,\n # and check whether the tables that are the same uses the same scan (index, bitmap, seq scan)\n def difference(self, node1, node2):\n diffString = \"\"\n previous_node = node1\n current_node = node2\n if node1.node_type != node2.node_type:\n diffString += \"The \"+node1.node_type + \" (Level \" + str(node1.level) + \") in Query 1 has evolved to \" + node2.node_type + \" (Level \"+ str(node2.level) + \") in Query 2\" + \"\\n\"\n\n dependency_nodes_1 = node1.tables.copy() # query 1\n dependency_nodes_2 = node2.tables.copy() # query 2\n\n table_pairs_nodes = []\n # Lets do a n^2 comparison (not always n^2 as we will be removing items from inner loop)\n # And then we create table pairs for the Query 1 and Query 2\n # e.g [[A],[A]] -> The first array contains the relation that is found in Query 1. The second array contains the relation that is found in Query 2.\n # Both must has the same relation, which is A - for this example. Note that we will be keeping the Node Object instead of the relation name.\n #e.g [[A],[]] -> This pair could happen too. It means A is found in Query 1, but not in Query 2.\n # However, for this pair, [[],[A]], it will not happen. Although it is valid to say that A is only found in Query 2,\n # we decided to take what is left over in dependency_nodes_2.\n for node1 in dependency_nodes_1:\n found = False\n for node2 in dependency_nodes_2:\n # print(\"Accessing \"+node1.relation_name + \"=\"+node2.relation_name)\n if (node1.relation_name == node2.relation_name):\n table_pairs_nodes = self.insert_table_pairs(node1, node2, table_pairs_nodes)\n dependency_nodes_2.remove(node2)\n found = True\n\n # if is not found, then we add a table pair with 2nd array being empty. (not found in Q2)\n if found == False:\n table_pairs_nodes = self.insert_table_pairs(node1, None, table_pairs_nodes)\n\n # remove all matching scan type of the tables for both query\n table_pairs = table_pairs_nodes.copy()\n for pair in table_pairs:\n if pair[0][0].relation_name not in self.unique_tables_for_both_query:\n if len(pair[0]) > 0 and len(pair[1]) > 0:\n for node1 in pair[0]:\n for node2 in pair[1]:\n # If both has the same node_type\n # e.g Seq Scan == Seq Scan\n if (node1.node_type == node2.node_type):\n pair[0].remove(node1)\n pair[1].remove(node2)\n\n # remove empty pairs\n i = 0\n while i < len(table_pairs):\n if len(table_pairs[i][0]) == 0 and len(table_pairs[i][1]) == 0:\n del table_pairs[i]\n else:\n i+=1\n\n # print the difference in scan type for the same tables.\n for pair in table_pairs:\n if pair[0][0].relation_name not in self.unique_tables_for_both_query:\n for node1 in pair[0]:\n for node2 in pair[1]:\n # print(\"With the evolved \"+current_node.node_type+\", the type of scan has evolve from \"+node1.node_type +\" to \"+node2.node_type +\" for the table, \"+node1.relation_name)\n diffString += \"With the evolved \"+current_node.node_type+ \", the type of scan has evolve from \" + node1.node_type + \" to \" + node2.node_type + \" for the table, \" + node1.relation_name + \"\\n\"\n pair[0].remove(node1)\n pair[1].remove(node2)\n\n # remove those empty pairs E.g [[],[]]\n i = 0\n while i < len(table_pairs):\n if len(table_pairs[i][0]) == 0 and len(table_pairs[i][1]) == 0:\n del table_pairs[i]\n else:\n i += 1\n\n tables_used_in_q1_only = False\n tables_used_in_q2_only = False\n\n string_table_used_in_q1_but_not_in_q2 = \"\"\n for pair in table_pairs:\n if len(pair[1]) == 0:\n if pair[0][0].relation_name not in self.unique_tables_for_both_query:\n tables_used_in_q1_only = True\n string_table_used_in_q1_but_not_in_q2 = string_table_used_in_q1_but_not_in_q2 + pair[0][0].relation_name + \" \"\n\n string_table_used_in_q2_but_not_in_q1 = \"\"\n\n for node in dependency_nodes_2:\n if node.relation_name not in self.unique_tables_for_both_query:\n tables_used_in_q2_only = True\n string_table_used_in_q2_but_not_in_q1 = string_table_used_in_q2_but_not_in_q1 + node.relation_name + \" \"\n\n string_table_used_in_q2_but_not_in_q1 = string_table_used_in_q2_but_not_in_q1.strip()\n string_table_used_in_q1_but_not_in_q2 = string_table_used_in_q1_but_not_in_q2.strip()\n\n\n # if there are tables in used in q1 and tables used in q2. We know that there tables in q1 is not used in q2.\n if tables_used_in_q1_only and tables_used_in_q2_only:\n diffString += \"It seems like the tables (\"+string_table_used_in_q1_but_not_in_q2 + \\\n \") for \"+previous_node.node_type+\" (Level \"+str(previous_node.level) + \\\n \") in Query 1 is replaced with tables (\"+string_table_used_in_q2_but_not_in_q1 + \") for \"+current_node.node_type+\" (Level \"+str(current_node.level)+\") in Query 2\" + \"\\n\"\n elif(not tables_used_in_q1_only and tables_used_in_q2_only):\n # If there is no tables in q1, and there is table in q2. We can say that there are new tables used in Q2\n diffString += \"It seems like that there is new tables (\"+string_table_used_in_q2_but_not_in_q1+\") used in Q2\" + \"\\n\"\n elif(tables_used_in_q1_only and not tables_used_in_q2_only):\n # If there is tables in q1 but there is no table in q2. We can say that those tables in Q1 is not in Q2\n diffString += \"It seems like the \"+current_node.node_type+\" (Level \"+str(current_node.level)+\") for Query 2 did not use the tables (\"+string_table_used_in_q1_but_not_in_q2+\") used by the \"+previous_node.node_type+\" (Level \"+str(previous_node.level)+\") in Query 1 at all.\"+\"\\n\"\n\n return diffString\n\n\n def insert_table_pairs(self, node1, node2, table_pairs):\n found = False\n for pair in table_pairs:\n # if found, append into the pair\n if node1.relation_name in pair[0]:\n found = True\n pair[0].append(node1)\n if node2 != None:\n pair[1].append(node2)\n\n # if not found, create a new pair.\n if found == False:\n if node2 != None:\n table_pairs.append([[node1], [node2]])\n else:\n table_pairs.append([[node1], []])\n\n return table_pairs\n\n","sub_path":"NodesComparison.py","file_name":"NodesComparison.py","file_ext":"py","file_size_in_byte":14416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"557396597","text":"# -*- coding:utf-8 -*-\n\"\"\"\n===================================================================\nProvide coordinate file class which do operations on these files.\n===================================================================\nWritten by PytLab , November 2014\nUpdated by PytLab , August 2015\n\n==============================================================\n\n\"\"\"\nimport numpy as np\n\nfrom vaspy import VasPy, CarfileValueError\nfrom functions import *\n\n\nclass AtomCo(VasPy):\n \"Base class to be inherited by atomco classes.\"\n def __init__(self, filename):\n VasPy.__init__(self, filename)\n\n def __repr__(self):\n return self.get_content()\n\n def __str__(self):\n return self.__repr__()\n\n def __getattribute__(self, attr):\n '''\n 确保atomco_dict能够及时根据data值的变化更新.\n '''\n if attr == 'atomco_dict':\n return self.get_atomco_dict(self.data)\n else:\n return object.__getattribute__(self, attr)\n\n def verify(self):\n if len(self.data) != self.ntot:\n raise CarfileValueError('Atom numbers mismatch!')\n\n def get_atomco_dict(self, data):\n \"根据已获取的data和atoms, atoms_num, 获取atomco_dict\"\n # [1, 1, 1, 16] -> [0, 1, 2, 3, 19]\n idx_list = [sum(self.atoms_num[:i]) for i in xrange(1, len(self.atoms)+1)]\n idx_list = [0] + idx_list\n data_list = data.tolist()\n atomco_dict = {}\n for atom, idx, next_idx in zip(self.atoms, idx_list[:-1], idx_list[1:]):\n atomco_dict.setdefault(atom, data_list[idx: next_idx])\n\n self.atomco_dict = atomco_dict\n\n return atomco_dict\n\n\nclass XyzFile(AtomCo):\n \"\"\"\n Create a .xyz file class.\n\n Example:\n\n >>> a = XyzFile(filename='ts.xyz')\n\n Class attributes descriptions\n =======================================================================\n Attribute Description\n ============ =======================================================\n filename string, name of the file the direct coordiante data\n stored in\n ntot int, the number of total atom number\n step int, STEP number in OUT.ANI file\n atoms list of strings, atom types\n natoms list of tuples, same shape with atoms.\n (atom name, atom number)\n atom number of atoms in atoms\n atomco_dict dict, {atom name: coordinates}\n data np.array, coordinates of atoms, dtype=float64\n ============ =======================================================\n \"\"\"\n def __init__(self, filename):\n AtomCo.__init__(self, filename)\n self.load()\n self.verify()\n\n # 加载文件内容\n def load(self):\n with open(self.filename, 'r') as f:\n content_list = f.readlines()\n ntot = int(content_list[0].strip()) # total atom number\n step = int(str2list(content_list[1])[-1]) # iter step number\n\n #get atom coordinate and number info\n data_list = [str2list(line) for line in content_list[2:]]\n data_array = np.array(data_list) # dtype=np.string\n atoms_list = list(data_array[:, 0]) # 1st column\n data = np.float64(data_array[:, 1:]) # rest columns\n\n #get atom number for each atom\n atoms = []\n for atom in atoms_list:\n if atom not in atoms:\n atoms.append(atom)\n atoms_num = [atoms_list.count(atom) for atom in atoms]\n natoms = zip(atoms, atoms_num)\n\n #set class attrs\n self.ntot = ntot\n self.step = step\n self.atoms = atoms\n self.atoms_num = atoms_num\n self.natoms = natoms\n self.data = data\n\n #get atomco_dict\n self.get_atomco_dict(data)\n\n return\n\n def coordinate_transfrom(self, axes=np.array([[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0]])):\n \"相对坐标和实坐标转换\"\n \"Use Ax=b to do coordinate transform. direct to cartesian\"\n b = np.matrix(self.data.T)\n A = np.matrix(axes).T\n x = A.I*b\n\n return np.array(x.T)\n\n def get_content(self):\n \"获取最新文件内容字符串\"\n ntot = \"%12d\\n\" % self.ntot\n step = \"STEP =%9d\\n\" % self.step\n data = atomdict2str(self.atomco_dict, self.atoms)\n content = ntot + step + data\n\n return content\n\n def tofile(self, filename='atomco.xyz'):\n \"XyzFile object to .xyz file.\"\n content = self.get_content()\n\n with open(filename, 'w') as f:\n f.write(content)\n\n return\n\n\nclass PosCar(AtomCo):\n def __init__(self, filename='POSCAR'):\n \"\"\"\n Class to generate POSCAR or CONTCAR-like objects.\n\n Example:\n\n >>> a = PosCar(filename='POSCAR')\n\n Class attributes descriptions\n =======================================================================\n Attribute Description\n ============ =======================================================\n filename string, name of the file the direct coordiante data\n stored in\n axes_coeff float, Scale Factor of axes\n axes np.array, axes of POSCAR\n atoms list of strings, atom types\n ntot int, the number of total atom number\n natoms list of int, same shape with atoms\n atom number of atoms in atoms\n tf list of list, T&F info of atoms\n data np.array, coordinates of atoms, dtype=float64\n ============ =======================================================\n \"\"\"\n AtomCo.__init__(self, filename)\n #load all data in file\n self.load()\n self.verify()\n\n def load(self):\n \"获取文件数据信息\"\n \"Load all information in POSCAR.\"\n with open(self.filename, 'r') as f:\n content_list = f.readlines()\n #get scale factor\n axes_coeff = float(content_list[1])\n #axes\n axes = [str2list(axis) for axis in content_list[2:5]]\n #atom info\n atoms = str2list(content_list[5])\n atoms_num = str2list(content_list[6]) # atom number\n #data\n data, tf = [], [] # data and T or F info\n for line_str in content_list[9:]:\n line_list = str2list(line_str)\n data.append(line_list[:3])\n if len(line_list) > 3:\n tf.append(line_list[3:])\n #data type convertion\n axes = np.float64(np.array(axes)) # to float\n atoms_num = [int(i) for i in atoms_num]\n data = np.float64(np.array(data))\n\n #set class attrs\n self.axes_coeff = axes_coeff\n self.axes = axes\n self.atoms = atoms\n self.atoms_num = atoms_num\n self.ntot = sum(atoms_num)\n self.natoms = zip(atoms, atoms_num)\n self.data = data\n self.tf = tf\n\n #get atomco_dict\n self.get_atomco_dict(data)\n\n return\n\n def get_content(self):\n \"根据对象数据获取文件内容字符串\"\n content = 'Created by VASPy\\n'\n axe_coeff = \" %.9f\\n\" % self.axes_coeff\n #axes\n axes_list = self.axes.tolist()\n axes = ''\n for axis in axes_list:\n axes += \"%14.8f%14.8f%14.8f\\n\" % tuple(axis)\n #atom info\n atoms, atoms_num = zip(*self.natoms)\n atoms = (\"%5s\"*len(atoms)+\"\\n\") % atoms\n atoms_num = (\"%5d\"*len(atoms_num)+\"\\n\") % atoms_num\n #string\n info = \"Selective Dynamics\\nDirect\\n\"\n #data and tf\n data_tf = ''\n for data, tf in zip(self.data.tolist(), self.tf):\n data_tf += (\"%18.12f\"*3+\"%5s\"*3+\"\\n\") % tuple(data+tf)\n #merge all strings\n content += axe_coeff+axes+atoms+atoms_num+info+data_tf\n\n return content\n\n def tofile(self, filename='POSCAR_c'):\n \"生成文件\"\n \"PosCar object to POSCAR or CONTCAR.\"\n content = self.get_content()\n\n with open(filename, 'w') as f:\n f.write(content)\n\n return\n","sub_path":"vaspy/atomco.py","file_name":"atomco.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"465540928","text":"import sys\nimport pygame\nfrom pygame.locals import *\nimport board\nimport math\n\ndef update_board(screen, board):\n SEP_WIDTH = 14\n SQ_WIDTH = 107\n WHITE_SPACE_X = 38\n WHITE_SPACE_Y = 37\n COLORS = ([255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0],\n [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255],\n [0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255],\n [255, 255, 255], [170, 170, 170], [85, 85, 85], [0, 0, 0],)\n screen.fill(BLACK)\n screen.blit(background_board, background_boardrect)\n\n for i in range(0, len(game_board.matrix)):\n if game_board.matrix[i] != 0:\n power_2 = math.log(game_board.matrix[i], 2)\n pygame.draw.rect(\n screen, \n COLORS[int(power_2) % 15], \n [ #Rectangle\n (((i % 4) * SQ_WIDTH) + ((i % 4) * SEP_WIDTH)) + WHITE_SPACE_X, \n (((i // 4) * SQ_WIDTH) + ((i // 4) * SEP_WIDTH)) + WHITE_SPACE_Y, \n SQ_WIDTH, \n SQ_WIDTH\n ],#Rectangle Border\n 0)\n textsurface = myfont.render(str(game_board.matrix[i]), True, (255, 255, 255))\n screen.blit(textsurface, ((((i % 4) * SQ_WIDTH) + ((i % 4) * SEP_WIDTH)) + WHITE_SPACE_X + 45, \n (((i // 4) * SQ_WIDTH) + ((i // 4) * SEP_WIDTH)) + WHITE_SPACE_Y + 30))\n pygame.display.flip()\n\n#initialize classes\npygame.init()\n\n#Window Size\nsize = width, height = 548, 549\n\n#Color\nBLACK = 0, 0, 0\nINIT_SQUARE_COLOR = 0, 0, 0\n#Decrease order 0, 1, 2\n\n#Set the screen size\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption('2048-AI')\npygame.display.set_icon(pygame.image.load(\"C:/Users/Alex/Documents/GitHub/2048-AI/game_environment/background_board.png\"))\n\n#Set the font\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\n\n#Get the background image from a picture\nbackground_board = pygame.image.load(\"C:/Users/Alex/Documents/GitHub/2048-AI/game_environment/background_board.png\")\n#Create the object as a moving object\nbackground_boardrect = background_board.get_rect()\n\n#Create the board\nvalid = False\nis_over = False\ngame_board = board.Board()\nprevious_board = board.Board()\n\n#Spawn a number\ngame_board.spawn_number()\n\nwhile not is_over:\n if(game_board.is_full()):\n is_over = True\n continue\n #Copy the matrix to make a previous board with the newly spawned number\n board.copy_length_16_matrix(game_board.matrix, previous_board.matrix)\n\n #Determine if the move is valid\n #Make this a separate thread to allow the program to run faster\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n move = \"left\"\n elif event.key == pygame.K_RIGHT:\n move = \"right\"\n elif event.key == pygame.K_DOWN:\n move = \"down\"\n elif event.key == pygame.K_UP:\n move = \"up\"\n else:\n move = \"\"\n board.determine_move(move, game_board)\n #Print board to screen \n update_board(screen, game_board)\n\n #Determine if the resulting move did anything\n if(previous_board.matrix != game_board.matrix):\n #Copy the matrix to make a previous board with the newly completed move\n board.copy_length_16_matrix(game_board.matrix, previous_board.matrix)\n #Spawn a number\n game_board.spawn_number()\n #Print board to screen\n update_board(screen, game_board)\n\nprint(\"Score:\", max(game_board.matrix)) \nprint(\"Game Over\")\nsys.exit()","sub_path":"game_environment/game_visual.py","file_name":"game_visual.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"162523690","text":"class BoardO:\n board = None\n pieces = 0\n def create_board(self, n):\n board = []\n for i in range(n):\n board.append([1,] * n)\n self.board = board\n \n @classmethod\n def copy_board(cls, b):\n new_board = BoardO()\n new_board.board = []\n for row in b.board:\n new_board.board.append(list(row))\n new_board.pieces = b.pieces\n return new_board\n\n def find_empty_spot(self, start_y=0, start_x=0):\n lenb = len(self.board)\n for i in range(start_y, lenb):\n for j in range(start_x, lenb):\n if self.board[i][j] == 1:\n return i,j\n return -1, -1\n\n def place_piece(self, y, x):\n if self.board[y][x] != 1:\n return False\n else:\n self.board[y][x] = 'Q'\n self.pieces += 1\n\n lenb = len(self.board)\n # Straight boxes\n for i in range(lenb):\n if self.board[y][i] == 1:\n self.board[y][i] = 0\n if self.board[i][x] == 1:\n self.board[i][x] = 0\n\n # diagonal boxes\n for i in range(lenb):\n if x+i >=lenb or y+i >= lenb:\n break\n if self.board[y+i][x+i] == 1:\n self.board[y+i][x+i] = 0\n for i in range(lenb):\n if x - i <0 or y-i <0:\n break\n if self.board[y-i][x-i] == 1:\n self.board[y-i][x-i] = 0\n for i in range(lenb):\n if x+i >= lenb or y-i <0:\n break\n if self.board[y-i][x+i] == 1:\n self.board[y-i][x+i] = 0\n for i in range(lenb):\n if x - i <0 or y+i >= lenb:\n break\n if self.board[y+i][x-i] == 1:\n self.board[y+i][x-i] = 0\n def print_board(self):\n lenb = len(self.board)\n for i in range(lenb):\n print(self.board[i])\n \n def get_conv_board(self):\n lenb = len(self.board)\n b = []\n for i in range(lenb):\n row = \"\"\n for j in range(lenb):\n piece = self.board[i][j]\n if piece == 'Q':\n row += \"Q\"\n else:\n row += \".\"\n b.append(row)\n return b\n \nclass Solution:\n def solveNQueens(self, n: int):\n if n ==0:\n return [[]]\n boards = {}\n for i in range(1, n+1):\n boards[i] = []\n\n for i in range(n):\n b = BoardO()\n b.create_board(n)\n b.place_piece(0, i)\n boards[1].append(b)\n\n for i in range(1, n):\n boards_list = boards[i]\n for board in boards_list:\n cur_y = i\n cur_x = 0\n \n while True:\n y, x = board.find_empty_spot(cur_y,cur_x)\n if y == -1:\n break\n elif cur_y != y:\n break\n else:\n b = BoardO.copy_board(board)\n cur_x = x + 1\n b.place_piece(y,x)\n boards[i+1].append(b)\n\n blist = []\n for b in boards[n]:\n print()\n b.print_board()\n blist.append(b.get_conv_board())\n return blist\n\ns = Solution()\nprint(s.solveNQueens(0))","sub_path":"leetcode/100/51_n_queens.py","file_name":"51_n_queens.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"140503501","text":"import glob\n# Add to wes to the sys path\nimport sys\nimport os\nwesDir = os.path.realpath(os.path.join(__file__, \"..\", \"..\"))\nsys.path.append(wesDir)\nfrom wes.framework_plugins.common import Framework\n\n\nclass CustomFramework(Framework):\n def __init__(self, workingDir, processors):\n self.workingDir = workingDir\n self.webContextDir = self._find_java_web_context()\n self.processor = processors['java']\n\n def identify(self):\n \"\"\"\n This method attempts to identify all files within the web context folder with the jsp extension and not under\n the WEB-INF directory. This method returns True if there are files and false if there aren't any.\n :return: Boolean\n \"\"\"\n globPath = os.path.join(self.workingDir, self.webContextDir, '**', '*.jsp')\n files = glob.glob(globPath, recursive=True)\n\n files = list(filter(lambda x: os.path.isfile(x) and 'WEB-INF' not in x, files))\n\n if len(files) > 0:\n return True\n else:\n return False\n\n def find_endpoints(self):\n \"\"\"\n This is just a wrapper method around the find_public_jsps to make if consistent across all plugins.\n :return: Dictionary from self.find_public_jsps()\n \"\"\"\n return self.find_public_jsps()\n\n def find_public_jsps(self):\n \"\"\"\n This method attempts to identify all files within the web context folder with the jsp extension and not under\n the WEB-INF directory. It then adds parses the params out of the jsp and adds those to the dictionary.\n :return: A Dictionary with the endpoints\n \"\"\"\n # Find all of the java files\n globPath = os.path.join(self.workingDir, self.webContextDir, '**', '*.jsp')\n projectFiles = glob.glob(globPath, recursive=True)\n projectFiles = list(filter(lambda x: os.path.isfile(x) and 'WEB-INF' not in x, projectFiles))\n\n endpoints = []\n\n for jsp in projectFiles:\n filepath = self.processor.strip_work_dir(jsp)\n params = self.processor.get_jsp_params(jsp.split(self.webContextDir)[-1])\n params = list(map(lambda x: {'name': x, 'filepath': filepath}, params))\n endpoints.append({\n 'filepath': filepath,\n 'endpoints': set([jsp.split(self.webContextDir)[1]]),\n 'params': params if params else [],\n 'methods': set(['GET']),\n 'templates': set([filepath]) if filepath else set()\n })\n\n return endpoints\n\n def _find_java_web_context(self):\n \"\"\"\n Finds the web context directory for the java project. It does this by looking for directories that contain\n the 'WEB-INF' directory.\n :return: A string with the directory\n \"\"\"\n globPath = os.path.join(self.workingDir, '**')\n results = glob.glob(globPath, recursive=True)\n webContextDir = None\n for r in results:\n if 'WEB-INF' in r:\n webContextDir = r\n if not webContextDir:\n return \"web/\"\n\n webContextDir = webContextDir.split('WEB-INF')[0].replace(self.workingDir, '').lstrip('/')\n\n return webContextDir\n","sub_path":"wes/framework_plugins/plugin_public_jsps.py","file_name":"plugin_public_jsps.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"108846790","text":"#形态学操作,通常是二值图\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('dige.png')\ncv2.imshow('img',img)\n\n#腐蚀操作\nkernel = np.ones((5,5),np.uint8)#定义一个核(5X5),规定迭代次数为1\nerosion = cv2.erode(img,kernel,iterations=1)#进行腐蚀操作\n#cv2.imshow('erosion',erosion)\n\npie = cv2.imread('pie.png')\n#cv2.imshow('pie',pie)\n\nkernel1 = np.ones((30,30),np.uint8)\nerosion_1 = cv2.erode(pie,kernel1,iterations=1)\nerosion_2 = cv2.erode(pie,kernel1,iterations=2)\nerosion_3 = cv2.erode(pie,kernel1,iterations=3)\nres = np.hstack((erosion_1,erosion_2,erosion_3))\n#cv2.imshow('res',res)\n\n#膨胀操作 与腐蚀操作互为逆运算\ndige_dilate = cv2.dilate(erosion,kernel,iterations=1)\n#cv2.imshow('dilate',dige_dilate)\n\n#开运算和闭运算\n#开运算:先腐蚀后膨胀\nopening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel)#第二个参数指定开还是闭,可以看单词就知道\n#cv2.imshow('opening',opening)\n\n#闭运算:先膨胀后腐蚀\nclosing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel)\n#cv2.imshow('closing',closing)\n\n#梯度运算\n#梯度=膨胀-腐蚀\npie1 = cv2.imread('pie.png')\nkernel2 = np.ones((7,7),np.uint8)\ndilate = cv2.dilate(pie1,kernel2,iterations=5)\nerosion4 = cv2.erode(pie1,kernel2,iterations=5)\nres1 = np.hstack((dilate,erosion4))\n#cv2.imshow('res1',res1)\n#梯度运算\ngradient = cv2.morphologyEx(pie1,cv2.MORPH_GRADIENT,kernel2)\n#cv2.imshow('gradient',gradient)\n\n#礼帽和黑帽\n#礼帽 = 原始输入 - 开运算结果\n#黑帽 = 闭运算 - 原始输入\n\n#礼帽\nimg2 = cv2.imread('dige.png')\ntophat = cv2.morphologyEx(img2,cv2.MORPH_TOPHAT,kernel)\ncv2.imshow('tophat',tophat)\n\n#黑帽\nblackhat = cv2.morphologyEx(img2,cv2.MORPH_BLACKHAT,kernel)\ncv2.imshow('blackhat',blackhat)\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"opencv/Class/class3/class3.py","file_name":"class3.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"436359842","text":"\nfrom ibl_pipeline.ingest import alyxraw, data, reference, acquisition, QueryBuffer\nfrom ibl_pipeline.ingest import get_raw_field as grf\nimport uuid\nfrom tqdm import tqdm\n\n\nif __name__ == '__main__':\n # ingest dataset entries\n key_source = (alyxraw.AlyxRaw & 'model=\"data.dataset\"').proj(\n dataset_uuid=\"uuid\") - data.DataSet\n\n data_set = QueryBuffer(data.DataSet)\n\n for key in tqdm(key_source.fetch('KEY'), position=0):\n key_ds = key.copy()\n key['uuid'] = key['dataset_uuid']\n\n session = grf(key, 'session')\n if not len(acquisition.Session &\n dict(session_uuid=uuid.UUID(session))):\n print('Session {} is not in the table acquisition.Session'.format(\n session))\n print('dataset_uuid: {}'.format(str(key['uuid'])))\n continue\n\n key_ds['subject_uuid'], key_ds['session_start_time'] = \\\n (acquisition.Session &\n dict(session_uuid=uuid.UUID(session))).fetch1(\n 'subject_uuid', 'session_start_time')\n\n key_ds['dataset_name'] = grf(key, 'name')\n\n dt = grf(key, 'dataset_type')\n key_ds['dataset_type_name'] = \\\n (data.DataSetType & dict(dataset_type_uuid=uuid.UUID(dt))).fetch1(\n 'dataset_type_name')\n\n user = grf(key, 'created_by')\n\n if user != 'None':\n try:\n key_ds['dataset_created_by'] = \\\n (reference.LabMember & dict(user_uuid=uuid.UUID(user))).fetch1(\n 'user_name')\n except:\n print(user)\n else:\n key_ds['dataset_created_by'] = None\n\n format = grf(key, 'data_format')\n key_ds['format_name'] = \\\n (data.DataFormat & dict(format_uuid=uuid.UUID(format))).fetch1(\n 'format_name')\n\n key_ds['created_datetime'] = grf(key, 'created_datetime')\n\n software = grf(key, 'generating_software')\n if software != 'None':\n key_ds['generating_software'] = software\n else:\n key_ds['generating_software'] = None\n\n directory = grf(key, 'provenance_directory')\n if directory != 'None':\n key_ds['provenance_directory'] = directory\n else:\n key_ds['provenance_directory'] = None\n\n md5 = grf(key, 'md5')\n if md5 != 'None':\n key_ds['md5'] = md5\n else:\n key_ds['md5'] = None\n\n file_size = grf(key, 'file_size')\n if file_size != 'None':\n key_ds['file_size'] = file_size\n else:\n key_ds['file_size'] = None\n\n data_set.add_to_queue1(key_ds)\n\n if data_set.flush_insert(\n skip_duplicates=True,\n allow_direct_insert=True, chunksz=100):\n print('Inserted 100 dataset tuples')\n\n if data_set.flush_insert(skip_duplicates=True, allow_direct_insert=True):\n print('Inserted all remaining dataset tuples')\n\n\n # ingest file record entries\n records = alyxraw.AlyxRaw & 'model=\"data.filerecord\"'\n repos = (data.DataRepository & 'repo_name LIKE \"flatiron%\"').fetch(\n 'repo_uuid')\n records_flatiron = alyxraw.AlyxRaw.Field & records & \\\n 'fname = \"data_repository\"' & [{'fvalue': str(repo)} for repo in repos]\n record_exists = alyxraw.AlyxRaw.Field & records & \\\n 'fname = \"exists\"' & 'fvalue=\"True\"'\n key_source = (alyxraw.AlyxRaw & record_exists & records_flatiron).proj(\n record_uuid='uuid') - data.FileRecord\n\n file_record = QueryBuffer(data.FileRecord)\n\n for key in tqdm(key_source.fetch('KEY'), position=0):\n key_fr = key.copy()\n key['uuid'] = key['record_uuid']\n key_fr['exists'] = True\n\n dataset = grf(key, 'dataset')\n if not len(data.DataSet & dict(dataset_uuid=uuid.UUID(dataset))):\n print('Dataset {} is not in the table data.DataSet')\n print('Record_uuid: {}'.format(str(key['uuid'])))\n continue\n\n key_fr['subject_uuid'], key_fr['session_start_time'], \\\n key_fr['dataset_name'] = \\\n (data.DataSet & dict(dataset_uuid=uuid.UUID(dataset))).fetch1(\n 'subject_uuid', 'session_start_time', 'dataset_name')\n\n repo = grf(key, 'data_repository')\n key_fr['repo_name'] = \\\n (data.DataRepository & dict(repo_uuid=uuid.UUID(repo))).fetch1(\n 'repo_name')\n\n key_fr['relative_path'] = grf(key, 'relative_path')\n\n file_record.add_to_queue1(key_fr)\n\n if file_record.flush_insert(\n skip_duplicates=True, allow_direct_insert=True, chunksz=1000):\n print('Inserted 1000 raw field tuples')\n\n if file_record.flush_insert(skip_duplicates=True, allow_direct_insert=True):\n print('Inserted all remaining file record tuples')\n","sub_path":"scripts/ingest_data_tables.py","file_name":"ingest_data_tables.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"650362680","text":"import os\n\nimport numpy as np\nimport cv2\n\nfrom imutils.video import VideoStream\nimport imutils\n\nimport argparse\n\nCANVAS_SIZE = (337, 600)\n\nFINAL_LINE_COLOR = (255, 255, 255)\nWORKING_LINE_COLOR = (127, 127, 127)\n\nclass PolygonDrawer(object):\n def __init__(self, window_name):\n self.window_name = window_name # Name for our window\n\n self.done = False # Flag signalling we're done\n self.current = (0, 0) # Current position, so we can draw the line-in-progress\n self.points = [] # List of points defining our polygon\n\n\n def on_mouse(self, event, x, y, buttons, user_param):\n # Mouse callback that gets called for every mouse event (i.e. moving, clicking, etc.)\n\n if self.done: # Nothing more to do\n return\n\n if event == cv2.EVENT_MOUSEMOVE:\n # We want to be able to draw the line-in-progress, so update current mouse position\n self.current = (x, y)\n elif event == cv2.EVENT_LBUTTONDOWN:\n # Left click means adding a point at current position to the list of points\n print(\"Adding point #%d with position(%d,%d)\" % (len(self.points), x, y))\n self.points.append((x, y))\n elif event == cv2.EVENT_RBUTTONDOWN:\n # Right click means we're done\n print(\"Completing polygon with %d points.\" % len(self.points))\n self.done = True\n\n\n def run(self):\n vs = VideoStream(src=0).start()\n\n # Let's create our working window and set a mouse callback to handle events\n cv2.namedWindow(self.window_name, flags=cv2.WINDOW_AUTOSIZE)\n cv2.imshow(self.window_name, np.zeros(CANVAS_SIZE, np.uint8))\n cv2.waitKey(1)\n cv2.setMouseCallback(self.window_name, self.on_mouse)\n\n while(not self.done):\n # This is our drawing loop, we just continuously draw new images\n # and show them in the named window\n\n canvas = vs.read()\n canvas = imutils.resize(canvas, width=CANVAS_SIZE[1])\n (h, w) = canvas.shape[:2]\n\n if (len(self.points) > 0):\n # Draw all the current polygon segments\n cv2.polylines(canvas, np.array([self.points]), False, FINAL_LINE_COLOR, 1)\n # And also show what the current segment would look like\n cv2.line(canvas, self.points[-1], self.current, WORKING_LINE_COLOR)\n # Update the window\n cv2.imshow(self.window_name, canvas)\n # And wait 50ms before next iteration (this will pump window messages meanwhile)\n if cv2.waitKey(50) == 27: # ESC hit\n self.done = True\n\n # User finised entering the polygon points, so let's make the final drawing\n canvas = np.zeros((h, w), np.uint8)\n # of a filled polygon\n if (len(self.points) > 0):\n cv2.fillPoly(canvas, np.array([self.points]), FINAL_LINE_COLOR)\n # And show it\n cv2.imshow(self.window_name, canvas)\n # Waiting for the user to press any key\n cv2.waitKey()\n\n cv2.destroyWindow(self.window_name)\n return canvas\n\nif __name__ == \"__main__\":\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-d\", default=\"zone_files\", help=\"folder to save zone files\")\n ap.add_argument(\"-n\", default=\"zoneq\", help=\"name of zone\")\n args = vars(ap.parse_args())\n\n save_dir = args['d']\n filename = args['n']\n\n pd = PolygonDrawer(\"Draw Zone\")\n image = pd.run()\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n cv2.imwrite(os.path.join(save_dir, filename + '.png'), image)\n\n print(\"Polygon = %s\" % pd.points)\n with open(os.path.join(save_dir, filename + '.txt'), 'w') as f:\n for point in pd.points:\n f.write(','.join([str(p) for p in point]) + '\\n')\n","sub_path":"draw_zone.py","file_name":"draw_zone.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"558280301","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render\n\nfrom validator.forms import ResultsSortingForm\nfrom validator.models import ValidationRun\n\n\ndef published_results(request):\n page = request.GET.get('page', 1)\n\n # get sorting key and order\n sorting_form, order = ResultsSortingForm.get_sorting(request)\n\n published = (\n ValidationRun.objects.filter(doi__isnull=False)\n .exclude(doi__exact='')\n .order_by(order)\n )\n\n paginator = Paginator(published, 10)\n try:\n paginated_runs = paginator.page(page)\n except PageNotAnInteger:\n paginated_runs = paginator.page(1)\n except EmptyPage:\n paginated_runs = paginator.page(paginator.num_pages)\n\n context = {\n 'validations': paginated_runs,\n 'sorting_form': sorting_form,\n }\n return render(request, 'validator/published_results.html', context)\n","sub_path":"validator/views/published_results.py","file_name":"published_results.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"27075667","text":"#!/usr/bin/env python\n#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n\n\"\"\"\n@File:letcode_23_合并K个排序链表.py\n@Data:2019/7/24\n@param:\n@return:\n\"\"\"\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n# 分治算法\n\"\"\"\n算法的主要��路是先分再合\n\"\"\"\nclass Solution:\n # 合\n def merge(self, left: ListNode, right: ListNode) -> ListNode:\n l = ListNode(-1)\n start = l\n while left and right:\n if left.val <= right.val:\n l.next = left\n left = left.next\n else:\n l.next = right\n right = right.next\n l = l.next\n l.next = left if left else right\n return start.next\n # 分\n def div(self, lists):\n if len(lists) <= 1:\n return lists[0]\n mid = len(lists) // 2\n # 左侧元素\n left = self.div(lists[:mid])\n right = self.div(lists[mid:])\n return self.merge(left, right)\n\n def mergeKLists(self, lists: list) -> ListNode:\n self.this_len = len(lists)\n if self.this_len == 0:\n return None\n\n return self.div(lists)\n\n# 暴力法\nclass Solutions:\n def mergeKLists(self, lists: list) -> ListNode:\n vals = []\n for node in lists:\n while node:\n vals.append(node.val)\n node = node.next\n start = l = ListNode(-1)\n for i in sorted(vals):\n l.next = ListNode(i)\n l = l.next\n return start.next\n\n# 堆排序\n\"\"\"\n本题中使用了python的heapq 堆队列模块。\n其中包括:\n 1、heapq.heappush(heap, item)\n 将 item 的值加入 heap 中,保持堆的不变性。\n 2、heapq.heappop(heap)\n 弹出并返回 heap 的最小的元素,保持堆的不变性。如果堆为空,抛出 IndexError 。\n 使用 heap[0] ,可以只访问最小的元素而不弹出它。\n 3、heapq.heappushpop(heap, item)\n 将 item 放入堆中,然后弹出并返回 heap 的最小元素。\n 该组合操作比先调用 heappush() 再调用 heappop() 运行起来更有效率。\n 4、heapq.heapify(x)\n 将list x 转换成堆,原地,线性时间内。\n\"\"\"\nimport heapq\nclass Solutionss:\n def mergeKLists(self, lists: list) -> ListNode:\n vals = []\n for i in range(len(lists)):\n # 将第一个数据放入堆中\n if lists[i]:\n heapq.heappush(vals, (lists[i].val, i))\n lists[i] = lists[i].next\n start = l = ListNode(-1)\n\n while len(vals) > 0:\n # 此处弹出堆中最小的元素。因链表是有序的,所以不考虑后面比第一个元素大的情况\n (val, index) = heapq.heappop(vals)\n l.next = ListNode(val)\n l = l.next\n\n if lists[index]:\n heapq.heappush(vals, (lists[index].val, index))\n lists[index] = lists[index].next\n return start.next\n\n","sub_path":"letcode_23_合并K个排序链表.py","file_name":"letcode_23_合并K个排序链表.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"140683744","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/bridge.py\n# Compiled at: 2016-11-30 17:21:00\n# Size of source mod 2**32: 6645 bytes\nimport random\nfrom bridge_util import Card, has_suit, find_max, winner, Bid, Contract\nfrom bridge_util import Suit, suit_real, rank_str, Bids, val, over_bonus\nfrom bridge_util import under_bonus\nfrom bridge_hum import Hum\nfrom bridge_ai import AI\n\nclass Trick:\n\n def __init__(self, leader, show_dum):\n self.leader = leader\n self.lead_suit = None\n self.cards = [None] * 4\n for self.cur_player in list(range(leader, 4)) + list(range(leader)):\n if self.cur_player == dummy:\n if show_dum:\n for b in brains:\n b.show_dum(hands[dummy], dummy)\n\n thinker = (self.cur_player + 2) % 4\n else:\n thinker = self.cur_player\n while 1:\n play = brains[thinker].ask(self, self.cur_player)\n if play in hands[self.cur_player]:\n if self.cur_player == leader:\n self.lead_suit = play.suit\n break\n if self.lead_suit == play.suit or not has_suit(hands[self.cur_player], self.lead_suit):\n break\n\n hands[self.cur_player].remove(play)\n self.cards[self.cur_player] = play\n for b in brains:\n b.show_card(self, play)\n\n self.winner = winner(self, trump)[0]\n trick_score[(self.winner % 2)] += 1\n for b in brains:\n b.show_trick(self)\n\n\ndef deal():\n \"\"\"Shuffle and deal hands. The RNG must be initialized first.\n \"\"\"\n global hands\n deck = list(range(52))\n random.shuffle(deck)\n hands = [set(Card(Suit(n % 4), n % 13) for n in deck[i * 13:i * 13 + 13]) for i in range(4)]\n\n\ndef honors(trump):\n \"\"\"Check if either side scores for honors. If N/S does then the\n return value is positive, if E/W then negative, else 0.\n \"\"\"\n if trump.is_suit():\n for i, h in enumerate(hands):\n n = len([c for c in h if c.suit == trump and c.rank > 7])\n if n > 3:\n if i % 2:\n if n == 4:\n return -10\n else:\n return -15\n else:\n if n == 4:\n return 10\n else:\n return 15\n\n return 0\n else:\n for i, h in enumerate(hands):\n n = len([c for c in h if c.rank == 12])\n if n == 4:\n if i % 2:\n return -15\n else:\n return 15\n\n return 0\n\n\ndef print_hand(hand):\n for s in suit_real():\n ch = [rank_str(c.rank) for c in hand if c.suit is s]\n print(s, end=' ')\n for i in ch:\n print(i, end='')\n\n print()\n\n print()\n\n\ndef play_hand():\n \"\"\"Play one hand. The RNG must be initialized first.\n \"\"\"\n global declarer\n global dummy\n global trick_score\n global trump\n trick_score = [\n 0, 0]\n con = Contract()\n deal()\n for i in range(4):\n brains[i].show_deal(hands[i], i, dealer)\n\n i = dealer\n passes = -1\n declarer = {}\n for s in Suit:\n declarer[s] = [\n None, None]\n\n cur_dec = None\n last_bid = 0\n while passes < 3:\n while True:\n b = brains[i].ask_bid(con, last_bid)\n try:\n con.apply(b, (i - last_bid) % 2)\n except ValueError:\n pass\n else:\n break\n\n for br in brains:\n br.show_bid(i, b)\n\n if b is Bids.Pass:\n passes += 1\n else:\n last_bid = i\n passes = 0\n try:\n fake_suit = (\n b.suit, i % 2)\n except AttributeError:\n pass\n else:\n if declarer[fake_suit[0]][fake_suit[1]] is None:\n declarer[fake_suit[0]][fake_suit[1]] = i\n cur_dec = declarer[fake_suit[0]][fake_suit[1]]\n if i == 3:\n i = 0\n else:\n i += 1\n\n for br in brains:\n if cur_dec is None:\n br.show_misdeal()\n else:\n declarer = cur_dec\n br.show_con(con, declarer)\n\n if cur_dec is not None:\n trump = con.suit\n leader = (declarer + 1) % 4\n dummy = (leader + 1) % 4\n honor_score = honors(trump)\n for i in range(13):\n leader = Trick(leader, i == 0).winner\n for b in brains:\n b.show_score(trick_score)\n\n for b in brains:\n b.show_hand()\n\n over = trick_score[(declarer % 2)] - 6 - con.n\n if over >= 0:\n game_score[(declarer % 2)] += val(con)\n rub_score[(declarer % 2)] += over_bonus(over, con, vul[(declarer % 2)])\n else:\n rub_score[((declarer + 1) % 2)] += under_bonus(-over, con, vul[(declarer % 2)])\n if honor_score > 0:\n rub_score[0] += honor_score\n else:\n rub_score[1] -= honor_score\n\n\ndef game_over():\n if rub_score[0] > rub_score[1]:\n brains[0].congratulate(rub_score)\n else:\n if rub_score[0] < rub_score[1]:\n brains[0].console(rub_score)\n else:\n brains[0].sister(rub_score)\n quit()\n\n\ndef main():\n random.seed()\n brains = [Hum(), AI(), AI(), AI()]\n dealer = random.randrange(4)\n game_score = [0, 0]\n rub_score = [0, 0]\n vul = [False, False]\n while True:\n play_hand()\n for b in brains:\n b.show_game_score(game_score, rub_score)\n\n if game_score[0] >= 10:\n for i in range(2):\n rub_score[i] += game_score[i]\n\n if vul[0]:\n rub_score[0] += 50 if vul[1] else 70\n game_over()\n else:\n vul[0] = True\n game_score[0] = 0\n game_score[1] = 0\n for b in brains:\n b.show_rubber_score(rub_score, vul)\n\n elif game_score[1] >= 10:\n for i in range(2):\n rub_score[i] += game_score[i]\n\n if vul[1]:\n rub_score[1] += 50 if vul[0] else 70\n game_over()\n else:\n vul[1] = True\n game_score[0] = 0\n game_score[1] = 0\n for b in brains:\n b.show_rubber_score(rub_score, vul)\n\n if dealer < 3:\n dealer += 1\n else:\n dealer = 0","sub_path":"pycfiles/PyWhist-0.1-py3.5/bridge.cpython-35.py","file_name":"bridge.cpython-35.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"362017476","text":"import asyncio\nimport cmd\nimport marshal\nimport os\nimport sqlite3\nimport traceback\nfrom collections import deque\nfrom concurrent.futures import ThreadPoolExecutor\nfrom itertools import chain\nfrom multiprocessing import (set_start_method,\n Semaphore as ProcSemaphore,\n Pool\n )\nfrom random import sample, randint\nfrom threading import (Thread,\n Semaphore as ThreadSemaphore,\n Event as ThreadEvent)\nfrom time import perf_counter, time, sleep\n\nimport aiohttp\nimport lxml.html as ET\nimport psutil\nimport uvloop\nfrom multidict import CIMultiDict\nfrom yarl import URL\n\nHEADERS = CIMultiDict({\"user-agent\": \"Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 \" +\n \"(KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36\"})\n\nEXCLUDE = (\"/wp-login.php\", \"/wp-admin\")\nINITIAL_DELAY = (0, 60)\nDELAY = (30, 45)\n\n#########################\n# Pool-global\nuser_down_semaphore = None\nuser_error_semaphore = None\nsave_semaphore = None\n\n#########################\n# Fork-local\nloop = None # type: asyncio.AbstactEventLoop\nloop_set_semaphore = None\nloop_barrier = None\nloop_thread = None\nuser_id = 0\nprocess_id = 0\nstopped = False\n\ntp_executor = None # type: ThreadPoolExecutor\nstats_file = None\n\nfetch_stats = []\nuser_stats = []\nerror_stats = []\n\nurl_cache = {}\nurl_str_cache = {}\n\n\ndef intern_url(url):\n if url in url_cache:\n return url_cache[url]\n\n url_cache[url] = url\n return url\n\n\ndef intern_url_str(url: URL):\n url = url.human_repr()\n if url in url_str_cache:\n return url_str_cache[url]\n\n url_str_cache[url] = url\n return url\n\n\nasync def write_stats(loop, data):\n global stats_file, tp_executor\n\n buf = marshal.dumps(data)\n loop.run_in_executor(tp_executor, stats_file.write, buf)\n\n\nclass TimingRequestClass(aiohttp.ClientRequest):\n async def write_bytes(self, writer, conn):\n old_drain = writer.drain\n\n async def drain(last=False):\n try:\n return await old_drain(last)\n finally:\n self.response._request_end = perf_counter()\n\n writer.drain = drain\n start = perf_counter()\n try:\n return await super().write_bytes(writer, conn)\n finally:\n self.response._request_start = start\n\n\nclass TimingResponseClass(aiohttp.ClientResponse):\n def __init__(self, method, url, *,\n writer=None, continue100=None, timer=None):\n\n self._request_start = None\n self._request_end = None\n self._response_start = None\n self._response_end = None\n super().__init__(method, url, writer=writer, continue100=continue100, timer=timer)\n\n async def start(self, connection, read_until_eof=False):\n proto = connection.protocol\n old_data_received = proto.data_received\n\n def data_received(data):\n if self._response_start is None:\n self._response_start = perf_counter()\n\n try:\n return old_data_received(data)\n finally:\n self._response_end = perf_counter()\n\n proto.data_received = data_received\n return await super().start(connection, read_until_eof=read_until_eof)\n\n\nasync def fetch(session: aiohttp.ClientSession, url, from_url, transaction_id, user_id, process_id, main=True):\n rr_stats = None\n start_time = time()\n start = perf_counter()\n\n log_fetch = True\n try:\n try:\n async with session.get(url) as response: # type: aiohttp.ClientResponse\n try:\n if response.content_type.startswith(\"text/\"):\n return await response.text()\n\n return await response.read()\n\n finally:\n rr_stats = (response._request_start, response._request_end,\n response._response_start, response._response_end)\n\n except aiohttp.ClientError as e:\n log_fetch = False\n err = traceback.format_exception_only(type(e), e)\n error_stats.append((process_id, user_id, transaction_id,\n intern_url_str(url), intern_url_str(from_url), main,\n start_time, \"\\n\".join(err)))\n user_error_semaphore.release()\n raise e\n finally:\n if log_fetch:\n end = perf_counter()\n fetch_stats.append((process_id, user_id, transaction_id,\n intern_url_str(url), intern_url_str(from_url), main,\n response.status, response.reason, response.content_length, response.content_type,\n start_time, end - start, - rr_stats[1] + rr_stats[2]))\n\n\nclass User:\n def __init__(self, user_id, loop, start_url):\n self.loop = loop\n self.user_id = user_id\n self.session = aiohttp.ClientSession(loop=loop, headers=HEADERS,\n response_class=TimingResponseClass,\n request_class=TimingRequestClass)\n self.start_url = intern_url(URL(start_url))\n self.current_url = self.start_url\n self.traversed = set()\n\n loop_barrier.is_set()\n\n async def start(self):\n loop = self.loop\n start_url = self.start_url\n current_url = self.current_url\n from_url = current_url\n traversed = self.traversed\n pending_set = set()\n pending = deque()\n session = self.session\n transaction_id = -1\n user_id = self.user_id\n\n await asyncio.sleep(randint(*INITIAL_DELAY), loop=loop)\n while current_url or pending:\n loop_barrier.is_set()\n if stopped:\n return\n\n if not current_url:\n current_url, from_url = pending.popleft()\n pending_set.remove(current_url)\n\n transaction_id += 1\n traversed.add(current_url)\n\n html = await fetch(session, current_url, from_url, transaction_id, user_id, process_id)\n if not isinstance(html, str):\n current_url = None\n from_url = None\n continue\n\n html = ET.document_fromstring(html)\n\n all_anchors = html.findall(\".//a[@href]\")\n all_res = chain(html.findall(\".//script[@src]\"),\n html.findall(\".//img[@src]\"),\n html.findall(\".//link[@href]\"))\n all_res_urls = (start_url.join(URL(e.attrib[\"src\"] if \"src\" in e.attrib else e.attrib[\"href\"]))\n for e in all_res)\n resources = set(u.with_fragment(\"\") for u in all_res_urls if u.origin() == start_url)\n anchors = set(u.with_fragment(\"\") for u in (start_url.join(URL(u.attrib[\"href\"]))\n for u in all_anchors if u.attrib[\"href\"] != \"#\")\n if u.origin() == start_url)\n\n for a in sample(anchors, len(anchors)):\n if a not in traversed and a not in pending_set:\n if not [a for excluded in EXCLUDE if a.path.startswith(excluded)]:\n pending.append((a, current_url))\n pending_set.add(a)\n\n await asyncio.gather(*[fetch(session, r, current_url, transaction_id, user_id, process_id, False)\n for r in sample(resources, len(resources))],\n loop=loop)\n current_url = None\n from_url = None\n html = None\n all_anchors = None\n all_res = None\n all_res_urls = None\n resources = None\n anchors = None\n await asyncio.sleep(randint(*DELAY), loop=loop)\n\n async def close(self):\n self.session.close()\n\n\nasync def _start_user(loop, start_domain):\n global user_id, user_down_semaphore, fetch_stats, user_stats, error_stats\n\n user = User(user_id, loop, start_domain)\n user_id += 1\n\n user_start = time()\n try:\n try:\n await user.start()\n finally:\n await user.close()\n user_end = time()\n user_stats.append((process_id, user_id, user_start, user_end))\n user_down_semaphore.release()\n finally:\n if len(fetch_stats) > 500:\n await write_stats(loop, (1, fetch_stats))\n fetch_stats = []\n\n if len(user_stats) > 2000:\n await write_stats(loop, (2, user_stats))\n user_stats = []\n\n if len(error_stats) > 500:\n await write_stats(loop, (3, error_stats))\n error_stats = []\n\n\ndef start_user(start_domain):\n global loop, user_semaphore\n\n asyncio.run_coroutine_threadsafe(_start_user(loop, start_domain), loop)\n sleep(.5) # True not to grab the next task\n\n\ndef save_data(db_name):\n fetch_count = 0\n user_count = 0\n error_count = 0\n save_semaphore.acquire()\n try:\n try:\n conn = sqlite3.connect(db_name, isolation_level=\"EXCLUSIVE\")\n conn.execute(\"PRAGMA journal_mode=wal\")\n conn.execute(\"PRAGMA busy_timeout=7200000\")\n # conn.execute(\"PRAGMA journal_mode=OFF\")\n # conn.execute(\"PRAGMA synchrounous=OFF\")\n\n with open(\"stats_%d\" % process_id, \"rb\") as f:\n try:\n while True:\n type, data = marshal.load(f)\n if data:\n conn.execute(\"BEGIN TRANSACTION\")\n if type == 1:\n conn.executemany(\"INSERT INTO fetch_stats VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)\", data)\n fetch_count += len(data)\n elif type == 2:\n conn.executemany(\"INSERT INTO user_stats VALUES (?,?,?,?)\", data)\n user_count += len(data)\n elif type == 3:\n conn.executemany(\"INSERT INTO error_stats VALUES (?,?,?,?,?,?,?,?)\", data)\n\n conn.execute(\"COMMIT\")\n except EOFError:\n pass\n\n if os.path.exists(\"stats_%d\" % process_id):\n os.unlink(\"stats_%d\" % process_id)\n finally:\n conn.close()\n\n return process_id, fetch_count, user_count, error_count\n finally:\n save_semaphore.release()\n\n\ndef start_test():\n global stats_file, tp_executor, stopped\n\n stats_file = open(\"stats_%d\" % process_id, \"wb\")\n tp_executor = ThreadPoolExecutor()\n stopped = False\n\n loop_barrier.set()\n sleep(.3)\n\n\ndef stop_test():\n global stopped\n stopped = True\n\n loop_barrier.set()\n sleep(.3)\n\n\ndef end_test():\n global stats_file, tp_executor, fetch_stats, user_stats, error_stats\n\n loop_barrier.is_set()\n\n asyncio.run_coroutine_threadsafe(write_stats(loop, (1, fetch_stats)), loop).result()\n fetch_stats = []\n\n asyncio.run_coroutine_threadsafe(write_stats(loop, (2, user_stats)), loop).result()\n user_stats = []\n\n asyncio.run_coroutine_threadsafe(write_stats(loop, (3, error_stats)), loop).result()\n error_stats = []\n\n tp_executor.shutdown()\n tp_executor = None\n stats_file.close()\n sleep(.3)\n\n\ndef init_proc():\n global loop_thread, loop_set_semaphore, process_id, loop_barrier\n\n def start_loop():\n global loop, stats_file, loop_set_semaphore\n try:\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n finally:\n loop_set_semaphore.release()\n\n loop.run_forever()\n\n loop_set_semaphore = ThreadSemaphore(0)\n loop_barrier = ThreadEvent()\n process_id = os.getpid()\n loop_thread = Thread(target=start_loop, daemon=True)\n loop_thread.start()\n loop_set_semaphore.acquire()\n print(\"Initialized Worker (pid %d)\" % process_id)\n\n\nclass LoadTester(cmd.Cmd):\n def preloop(self):\n self.start_domain = None\n self.started = False\n\n self.target_users = 0\n self.current_users = 0\n self.errors = 0\n\n self.governor_thread = None\n # This serves as a memory barrier\n self.governor_event = ThreadEvent()\n\n self.proc_count = psutil.cpu_count(logical=False) - 1\n\n print(\"Load Tester\")\n print(\"-----------\")\n print(\"Starting (pid %d) with %d workers\" % (os.getpid(), self.proc_count))\n self.pool = Pool(processes=self.proc_count, initializer=init_proc)\n sleep(.2)\n\n def postloop(self):\n self.pool.close()\n\n def do_start(self, arg):\n \"\"\"Start the test for the URL: start \"\"\"\n self.governor_event.is_set()\n if self.started or self.current_users > 0:\n print(\"*** Already started or still running\")\n self.onecmd(\"status\")\n return\n\n if arg is None or not isinstance(arg, str) or not arg:\n print(\"*** Error: wrong URL\")\n self.onecmd(\"help start\")\n return\n\n self.start_domain = arg\n self.started = True\n self.errors = 0\n\n results = set()\n for i in range(self.proc_count):\n results.add(self.pool.apply_async(start_test))\n for result in results:\n result.get()\n\n self.governor_thread = Thread(target=self.governor)\n self.governor_thread.start()\n\n def do_add(self, arg):\n \"\"\"Add N users, N > 0: add 10\"\"\"\n if not self.started:\n print(\"*** Error: not started!\")\n return\n try:\n arg = int(arg)\n if arg < 1:\n raise ValueError(\"must be greater than 0\")\n except ValueError as e:\n print(\"*** Error:\", e)\n self.onecmd(\"help add\")\n return\n\n self.target_users += arg\n\n def do_remove(self, arg):\n \"\"\"Remove N users, N > 0: remove 10\"\"\"\n if not self.started:\n print(\"*** Error: not started!\")\n return\n try:\n arg = int(arg)\n if arg < 1:\n raise ValueError(\"must be greater than 0\")\n except ValueError as e:\n print(\"*** Error:\", e)\n self.onecmd(\"help remove\")\n return\n\n if arg > self.target_users:\n self.target_users = 0\n else:\n self.target_users -= arg\n self.governor_event.set()\n\n def do_stop(self, arg):\n \"\"\"Stop the test\"\"\"\n if not self.started:\n print(\"*** Error: not started!\")\n return\n\n self.started = False\n self.target_users = 0\n self.governor_event.set()\n\n results = set()\n for i in range(self.proc_count):\n results.add(self.pool.apply_async(stop_test))\n\n for result in results:\n result.get()\n\n def do_status(self, arg):\n self.governor_event.is_set()\n print(\"Started:\\t%r\\nURL:\\t\\t%s\\nUsers:\\t\\t%d/%d (current/target)\\nErrors:\\t\\t%d\\nCPU:\\t\\t%r\\nMEM:\\t\\t%r\\n\" % (\n self.started,\n self.start_domain,\n self.current_users,\n self.target_users,\n self.errors,\n psutil.cpu_percent(percpu=True),\n psutil.virtual_memory()\n ))\n\n def do_quit(self, arg):\n \"\"\"Exit without saving any data\"\"\"\n self.pool.terminate()\n return True\n\n def do_abort(self, arg):\n \"\"\"Abort all running users\"\"\"\n self.started = False\n self.target_users = 0\n self.current_users = 0\n self.governor_event.set()\n\n def do_save(self, arg):\n \"\"\"Save collected statistics: save \"\"\"\n\n self.governor_event.is_set()\n\n if self.started:\n print(\"*** Error: not stopped!\")\n return\n if self.current_users > 0:\n print(\"*** Error: still stopping!\")\n return\n\n if not arg:\n print(\"*** Error: must specify a name\")\n self.onecmd(\"help save\")\n return\n\n if os.path.exists(arg):\n os.unlink(arg)\n\n conn = sqlite3.connect(arg, isolation_level=None)\n try:\n conn.execute(\"PRAGMA journal_mode=wal\")\n\n conn.execute(\"\"\"CREATE TABLE fetch_stats(\n process_id INTEGER,\n user_id INTEGER, \n transaction_id INTEGER,\n url TEXT, \n from_url TEXT,\n main INTEGER,\n status INTEGER,\n reason TEXT,\n content_length INTEGER,\n content_type TEXT,\n start_time REAL,\n total_duration REAL,\n request_duration REAL\n );\"\"\")\n\n conn.execute(\"\"\"CREATE TABLE user_stats(\n process_id INTEGER,\n user_id INTEGER, \n start_time REAL,\n end_time REAL\n );\"\"\")\n\n conn.execute(\"\"\"CREATE TABLE error_stats(\n process_id INTEGER,\n user_id INTEGER, \n transaction_id INTEGER,\n url TEXT,\n from_url TEXT,\n main INTEGER,\n start_time REAL,\n err TEXT\n );\"\"\")\n finally:\n conn.close()\n\n results = set()\n for i in range(self.proc_count):\n results.add(self.pool.apply_async(save_data, (arg,)))\n\n save_semaphore.release()\n\n while results:\n for result in list(results):\n if result.ready():\n save_result = result.get()\n print(\"Worker (pid %d): fetches %d, users %d, errors %d\" % save_result)\n results.remove(result)\n sleep(.3)\n\n conn = sqlite3.connect(arg, isolation_level=None)\n try:\n conn.execute(\"PRAGMA journal_mode=wal\")\n\n conn.execute(\"CREATE INDEX fs_start_time ON fetch_stats (start_time);\")\n conn.execute(\"CREATE INDEX fs_transaction ON fetch_stats (transaction_id, main, url);\")\n conn.execute(\"CREATE INDEX us_start_time ON user_stats (start_time, end_time);\")\n finally:\n conn.close()\n\n def governor(self):\n ev = self.governor_event\n pool = self.pool\n ev.is_set()\n while self.started or self.current_users > 0:\n while user_down_semaphore.acquire(False):\n self.current_users -= 1\n\n user_errors = 0\n while user_error_semaphore.acquire(False):\n user_errors += 1\n\n if user_errors:\n self.errors += user_errors\n ev.set()\n\n while self.started and self.current_users < self.target_users:\n pool.apply_async(start_user, (self.start_domain,))\n self.current_users += 1\n\n sleep(0.3)\n ev.is_set()\n\n results = set()\n for i in range(self.proc_count):\n results.add(self.pool.apply_async(end_test))\n\n for result in results:\n result.get()\n\n\ndef main():\n global user_down_semaphore, user_error_semaphore, save_semaphore\n set_start_method(\"fork\")\n\n user_down_semaphore = ProcSemaphore(0)\n user_error_semaphore = ProcSemaphore(0)\n save_semaphore = ProcSemaphore(0)\n\n LoadTester().cmdloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main/python/traiana/lt.py","file_name":"lt.py","file_ext":"py","file_size_in_byte":19620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"192641182","text":"import statistics as stats\nimport numpy as np\n\n\ndef media(lista):\n u = stats.mean(lista)\n return u\n\n\ndef normalizacion(x, u, maxi, mini): # <--- Funcion que normaliza los datos.\n normalizacion = (x - u) / (maxi - mini)\n return normalizacion\n\n\nfile = open(\"ex1data2.txt\", 'r') # <--- abrimos el archivo y lo leemos.\ndatos = file.readlines() # <--- leemos el archivo y readlines() devuelve una lista que contiene las líneas.\n\nlista = []\nlistOperacion = []\nlista60 = []\nlista40 = []\nauxi = []\nvalores = []\nnorma = []\ncont = 1\n\nfor i in range(len(datos)): # <-- añadimos los datos del archivo en una lista, desaparecemos '\\n' con .strip y\n lista.append(datos[i].strip().split(\",\")) # convertimos cada linea en una lista con split().\nfile.close() # <-- Cerramos el archivo usando .close()\n\nfor i in lista: # <--- contamos la cantidad de datos dentro de lista.\n r = cont\n # print(r, i)\n cont += 1\n\n#np.random.shuffle(lista) # <--- revolvemos las listas dentro de lista usando .shuffle()\n\ntam = int((cont - 1) * 0.80) # <-- Tomamos el equivalente al 60% de los datos\n\nfor j in range(0, tam): # <--- guarda el 60% de los datos en lista60\n lista60.append(lista[j])\n\nfor m in range(len(lista[0])):\n for n in range(len(lista60)):\n listOperacion.append(float(lista60[n][m])) # <-- se incertan uno por uno los valores dentro de listOperacion.\n\n\nauxi = np.array(listOperacion).reshape(len(lista[0]), tam) #<-- crea una matriz en base a la listOperacion\n\nfor i in range(len(lista[0])):\n minimo = min(auxi[i]) #<-- Valor minimo de la fila i\n maximo = max(auxi[i]) #<-- Valor maximo de la fila i\n medi = media(auxi[i]) #<-- Valor de la media de la fila i\n valores.append([medi, minimo, maximo]) # <-- Guarda los valores de la Media, el valor Minimo y el Maximo.\n\nfor i in range(len(lista[0])):\n for j in range(len(lista60)):\n x = auxi[i][j]\n u = valores[i][0]\n mini = valores[i][1]\n maxi = valores[i][2]\n norma.append(x) #< -- Datos no normalizados\n #norma.append(normalizacion(x, u, maxi, mini))\n\nfor i in range(len(lista60)):\n norma.insert(0, 1) # <--- insertamos x0 que es igual a 1\n\nlistNorma = np.array(norma).reshape(4, len(lista60)) #construye una lista de 4 filas por el numero de columnas de la lista60.\n\nf = open(\"60.txt\", \"w\")\n\nfor i in listNorma:\n f.write(str(i) + '\\n')\nf.close()\n\n# ------------------------GRADIENTE---------------------------------#\nx = listNorma # <----- 60% #\n\ndelta = [0, 0, 0]\n\nfor N in range(10000):\n print(\"------------------------------------------\")\n print(delta)\n print(\"------------------------------------------\")\n alfa = 0.0000000001\n m = 37\n listaSumatoria = []\n\n for i in range(3):\n sumatoria = 0\n for j in range(37):\n h = delta[0] * x[0][j] + delta[1] * x[1][j] + delta[2] * x[2][j]\n y = x[3][j]\n sumatoria = sumatoria + (h - y) * x[i][j]\n\n listaSumatoria.append(sumatoria)\n\n # print(delta)\n # print(\"------------\")\n for p in range(3):\n tempo = delta[p] - alfa * (1 / m) * listaSumatoria[p]\n delta[p] = tempo\n\n# --------------------------FIN-----------------------------------#\n\"\"\"\nlistOperacion = []\nauxi = []\nlistNorma = []\nnorma = []\nhs = []\ncont = 0\n\nfor i in range(28, 47):\n lista40.append(lista[i])\n\nfor m in range(len(lista40[0])):\n for n in range(len(lista40)):\n listOperacion.append(float(lista40[n][m]))\n\nauxi = np.array(listOperacion).reshape(len(lista40[0]), 19)\n\nfor i in range(len(auxi)):\n for j in range(len(auxi[0])):\n x = auxi[i][j]\n u = valores[i][0]\n mini = valores[i][1]\n maxi = valores[i][2]\n norma.append(x) #< -- Datos no normalizados\n #norma.append(normalizacion(x, u, maxi, mini))\n\nfor i in range(19):\n norma.insert(0, 1)\n\nlistNorma = np.array(norma).reshape(4, 19)\n\nf = open(\"40.txt\", \"w\")\n\nfor i in listNorma:\n f.write(str(i) + '\\n')\n\nf.close()\n\nx = listNorma\nfor j in range(len(lista40)):\n h = delta[0] * x[0][j] + delta[1] * x[1][j] + delta[2] * x[2][j]\n hs.append(h)\n\nsumaError = 0\nfor j in range(len(lista40)):\n sumaError = sumaError + (abs((x[3][j] - hs[j]) / x[3][j]))\nn = len(lista40)\n\nMAPE = (sumaError / n) * 100\n\nprint(\"H: \", delta)\nprint(\"MAPE: \", MAPE, \"%\")\n\"\"\"","sub_path":"TallerRegresionLineal.py","file_name":"TallerRegresionLineal.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"246247061","text":"import utils\nimport urllib\nimport urlparse\nimport urllib2\nimport io\nimport httplib\nimport mimetypes\nimport json\n\nfrom foxdox_session import FoxdoxSession\n\n\nclass FoxdoxClient:\n def __init__(self):\n self._developerId = 'zp1xwCn976mX4kd5j2VM'\n self._applicationId = 'bPEcEdEYqg68ryWBcct2'\n self._appVersion = '0.1'\n self._language = 'en'\n self._default_result = {'Status': 0, 'Error': 0, 'StatusMsg': ''}\n self.session = FoxdoxSession()\n\n def auth_requesttoken(self, username, password):\n url = 'https://api.foxdox.de/auth/requesttoken'\n values = {'username': username, 'password': password}\n\n try:\n result = self._post_request(url, values, None)\n\n self.session.user_sid = result['SID']\n self.session.token = result['Token']\n self.session.root_folder = self.folder_rootfolder()\n self.session.current_folder = self.folder_details(\n folder_id=utils.get_safe_value_from_dict(self.session.root_folder, 'Id', default=''))\n self.session.folders = self.folder_listfolders()\n self.session.documents = self.folder_listdocuments()\n\n except Exception:\n result = self._default_result\n\n return result\n\n def auth_deletetoken(self):\n url = 'https://api.foxdox.de/auth/deletetoken'\n values = {}\n\n try:\n result = self._post_request(url, values, self.session.token)\n self.session.reset()\n except:\n result = self._default_result\n\n return result\n\n def folder_rootfolder(self):\n url = 'https://api.foxdox.de/folder/rootfolder'\n values = {}\n\n try:\n result = self._post_request(url, values, self.session.token)\n except Exception as ex:\n result = self._default_result\n\n return result\n\n def folder_foldertree(self):\n url = 'https://api.foxdox.de/folder/foldertree'\n values = {'rootfoldername': 'Root', 'flat': 'true'}\n\n try:\n result = self._post_request(url, values, self.session.token)\n except:\n result = self._default_result\n\n return result\n\n def folder_listfolders(self):\n url = 'https://api.foxdox.de/folder/listfolders'\n values = {'folderid': str(utils.get_safe_value_from_dict(self.session.current_folder, 'Id'))}\n\n try:\n result = self._post_request(url, values, self.session.token)\n except:\n result = self._default_result\n\n return result\n\n def folder_listdocuments(self):\n url = 'https://api.foxdox.de/folder/listdocuments'\n values = {'folderid': utils.get_safe_value_from_dict(self.session.current_folder, 'Id')}\n\n try:\n result = self._post_request(url, values, self.session.token)\n except Exception as ex:\n result = self._default_result\n\n return result\n\n def folder_details(self, folder_id=None):\n url = 'https://api.foxdox.de/folder/details'\n fid = utils.get_safe_value_from_dict(self.session.current_folder, 'Id') if folder_id is None else folder_id\n values = {'folderid': fid}\n\n try:\n result = self._post_request(url, values, self.session.token)\n except Exception as ex:\n result = self._default_result\n\n return result\n\n def changefolder(self, folder_name):\n if folder_name == '..':\n if self.session.current_folder['ParentFolderId'] == str(utils.EMPTY_UUID):\n return True\n else:\n self.session.current_folder = self.folder_details(folder_id=self.session.current_folder['ParentFolderId'])\n self.session.folders = self.folder_listfolders()\n self.session.documents = self.folder_listdocuments()\n return True\n elif folder_name == '/':\n self.session.current_folder = self.folder_details(folder_id=self.session.root_folder['Id'])\n self.session.folders = self.folder_listfolders()\n self.session.documents = self.folder_listdocuments()\n return True\n else:\n folders = self.folder_listfolders()\n for folder in folders['Items']:\n if folder['Name'] == folder_name:\n self.session.current_folder = self.folder_details(folder_id=folder['Id'])\n self.session.folders = self.folder_listfolders()\n self.session.documents = self.folder_listdocuments()\n return True\n\n return False\n\n def _post_request(self, url, values, token):\n headers = {\n 'X-DEVID': self._developerId,\n 'X-APPID': self._applicationId,\n 'X-APPVER': self._appVersion,\n 'X-LANG': self._language,\n 'X-TOKEN': token\n }\n\n data = urllib.urlencode(values)\n req = urllib2.Request(url, data.encode('utf-8'), headers)\n res = urllib2.urlopen(req)\n result = json.loads(res.read().decode('utf-8'))\n\n return result\n\n def _get_binary(self, url, values, token):\n headers = {\n 'X-DEVID': self._developerId,\n 'X-APPID': self._applicationId,\n 'X-APPVER': self._appVersion,\n 'X-LANG': self._language,\n 'X-TOKEN': token\n }\n\n data = urllib.urlencode(values)\n req = urllib2.Request(url + '?' + data, data=None, headers=headers)\n res = urllib2.urlopen(req)\n result = res.read()\n\n return result\n\n def _get_secure_binary(self, url, values, key, token):\n headers = {\n 'X-DEVID': self._developerId,\n 'X-APPID': self._applicationId,\n 'X-APPVER': self._appVersion,\n 'X-LANG': self._language,\n 'X-KEYPASS': key,\n 'X-TOKEN': token\n }\n\n data = urllib.urlencode(values)\n req = urllib2.Request(url + '?' + data, data=None, headers=headers)\n res = urllib2.urlopen(req)\n result = res.read()\n\n return result\n\n def _post_binary(self, url, file_name, values, token):\n headers = {\n 'X-DEVID': self._developerId,\n 'X-APPID': self._applicationId,\n 'X-APPVER': self._appVersion,\n 'X-LANG': self._language,\n 'X-TOKEN': token\n }\n\n url_parts = urlparse.urlparse(url)\n host = url_parts.netloc\n selector = url_parts.path\n result = FoxdoxClient.post_multipart(host, selector, values.items(),\n [('file', file_name, open(file_name, 'rb'))], headers, 'FOXDOXpy1337')\n return result\n\n @staticmethod\n def post_multipart(host, selector, fields, files, headers, boundary):\n headers['Content-type'] = 'multipart/form-data; boundary=' + boundary\n body = FoxdoxClient.encode_multipart_formdata(fields, files, boundary)\n client = httplib.HTTPSConnection(host)\n client.request('POST', selector, body, headers)\n res = client.getresponse()\n return json.loads(res.read().decode('utf-8'))\n\n @staticmethod\n def encode_multipart_formdata(fields, files, boundary):\n body = io.BytesIO()\n mime_boundary = boundary\n\n for (key, value) in fields:\n title = 'Content-Disposition: form-data; name=\"%s\"\\r\\n' % key\n body.write(str('--' + mime_boundary + '\\r\\n').encode('utf-8'))\n body.write(str(title).encode('utf-8'))\n body.write(b'\\r\\n')\n body.write(str(value).encode('utf-8') + b'\\r\\n')\n\n for (key, filename, value) in files:\n title = 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"\\r\\n' % (key, filename)\n ctype = 'Content-Type: %s\\r\\n' % FoxdoxClient.get_content_type(filename)\n body.write(str('--' + mime_boundary + '\\r\\n').encode('utf-8'))\n body.write(str(title).encode('utf-8'))\n body.write(str(ctype).encode('utf-8'))\n body.write(b'\\r\\n')\n body.write(value.read())\n body.write(b'\\r\\n')\n\n value.close()\n\n body.write(str('--' + mime_boundary + '--\\r\\n').encode('utf-8'))\n body.write(b'\\r\\n')\n\n return body.getvalue()\n\n @staticmethod\n def get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n","sub_path":"foxdox_client.py","file_name":"foxdox_client.py","file_ext":"py","file_size_in_byte":8413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"561626512","text":"from django.contrib.auth.models import User\nfrom classcast.classcast_search_fetch.models import question\nfrom django.db import models\n\n\nclass Challenge(models.Model):\n ChallengeStatus = (\n ('REQUESTED', 'requested'),\n ('STARTED', 'started'),\n\n )\n challenge_id = models.AutoField(primary_key = True)\n chl_by = models.ForeignKey(User, null = True, related_name = 'challenge_by')\n chl_to = models.ForeignKey(User, null = True, related_name = 'challenge_to')\n accepted = models.BooleanField(default = False)\n rejected = models.BooleanField(default = False)\n chl_class = models.IntegerField(null = True)\n sub = models.CharField(max_length = 50, null = True)\n chapter = models.CharField(max_length = 50, null = True)\n status = models.CharField(max_length = 10, choices = ChallengeStatus, \\\n default = 'requested')\n requested_at = models.DateTimeField(default = None)\n started_at_by = models.DateTimeField(null = True, blank=True)\n started_at_to = models.DateTimeField(null = True, blank=True)\n ended_at_by = models.DateTimeField(null = True, blank=True)\n ended_at_to = models.DateTimeField(null = True, blank=True)\n question_count = models.IntegerField(default = 10)\n\n class Meta:\n db_table = 'classcast_challenge'\n\n def __str__(self):\n return u'%s %s %s' % (str(self.challenge_id), str(self.chl_by.username), str(self.chl_to.username))\n\nclass ChallengeQuestion(models.Model):\n challenge_id = models.ForeignKey(Challenge)\n question_id = models.ForeignKey(question)\n opponent_submitted = models.BooleanField(default = False)\n defendant_submitted = models.BooleanField(default = False)\n opponent_correctly_attempted = models.BooleanField(default = False)\n defendant_correctly_attempted = models.BooleanField(default = False)\n \n class Meta:\n db_table = 'classcast_challenge_question'\n unique_together = ('challenge_id','question_id')\n\n def __str__(self):\n return u'%s %s' % (str(self.challenge_id), str(self.question_id))\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"197513955","text":"#!/usr/bin/env python\n# coding: utf-8\n#\nfrom wxbot import *\nimport rospy\nimport sys\nimport os\nimport time\nimport tuling\nfrom voice_move.msg import pose_info\nfrom std_msgs.msg import String,Int32\nreload(sys)\nsys.setdefaultencoding('utf8')\nhello_str = \"hello world \"\nrospy.loginfo(hello_str)\n\npub = rospy.Publisher('/voice_system/tts_topic', String, queue_size=10)\npub_pose = rospy.Publisher('/voice_move/topic_name', pose_info, queue_size=10)\nrospy.init_node('wxchat')\naa=pose_info()\necho_flag=0\n\nclass MyWXBot(WXBot):\n def handle_msg_all(self, msg):\n if msg['msg_type_id'] == 4 and msg['content']['type'] == 0:\n #self.send_msg_by_uid(u'【扎哥机器人自动回复】:您好,请留言', msg['user']['id'])\n wx_user=msg['user']\n wx_user_name=wx_user['name']\n wx_content=msg['content']\n wx_user_content=wx_content['data']\n self.send_msg(u'zhage', u'来自 : %s 的消息: %s' % (wx_user_name,wx_user_content))\n rospy.loginfo(wx_user_content[0:2])\n \n\n if wx_user_name == u'zhage' or wx_user_name == u'老师' or wx_user_name == u'学长' or wx_user_name == u'tx':\n #回话功能:\n \n if wx_user_content[0:2] == u'回话':\n hello_str=wx_user_content[3:]\n pub.publish(hello_str)\n rospy.loginfo(hello_str)\n\n if wx_user_content[0:2] == u'聊天':\n hello_str=wx_user_content[3:]\n tuling_send=tuling.tuling_get(hello_str)\n #self.send_msg(msg['user']['id'], u'[图灵妹妹]:%s' % tuling_send )\n self.send_msg_by_uid(u'[图灵妹妹]:%s' % tuling_send,msg['user']['id'])\n\n if wx_user_content[0:2] == u'面聊':\n hello_str=wx_user_content[3:]\n tuling_send=tuling.tuling_get(hello_str)\n pub.publish(tuling_send)\n \n #坐标定位功能:\n elif wx_user_content[0:2] == u'坐标':\n a=float(wx_user_content[3:5])\n b=float(wx_user_content[7:9])\n aa.pose_x=a\n aa.pose_y=b\n pub_pose.publish(aa)\n \n #有声拍照功能:\n elif wx_user_content[0:2] == u'拍照':\n hello_str=\"启动拍照\"\n pub.publish(hello_str)\n os.system('rosrun camera camera.py')\n hello_str=\"拍照成功,正在发送到微信\"\n pub.publish(hello_str)\n self.send_img_msg_by_uid(\"/home/xiaoqiang/Videos/1.jpeg\",msg['user']['id'])\n\n #无声拍照功能:\n elif wx_user_content[0:4] == u'无声拍照':\n os.system('rosrun camera camera.py')\n self.send_img_msg_by_uid(\"/home/xiaoqiang/Videos/1.jpeg\",msg['user']['id'])\n\n #传话功能:\n elif wx_user_content[0:2] == u'传话':\n echo_flag=1\n \n\n \n\n\n if msg['content']['type'] == 4:\n print('dskjfh')\n voice=msg['content']\n rospy.loginfo(voice)\n wx_voice =voice['voice']\n if wx_user_name == u'zhage':\n print('dskjfh')\n os.system('play %s',wx_voice)\n\n\n\n \n #self.send_img_msg_by_uid(\"img/1.png\", msg['user']['id'])\n #self.send_file_msg_by_uid(\"img/1.png\", msg['user']['id'])\n\n\n\n\ndef main():\n bot = MyWXBot()\n bot.DEBUG = True\n bot.conf['qr'] = 'png'\n bot.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"zhage.py","file_name":"zhage.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"257326546","text":"import math\nimport random\nfrom StringBinaryConverter import *\n# USAGE:\n# to_binary(string)\n# to_string(binary)\n# Encoder32(string)\n# Decode32(binary)\n\n\n# from the Jacobian method on CrypoSec_L7.1.pdf page 23\n# /x\\\n# |-|\n# \\n/\ndef Jacobian(x,n):\n if x == 1:\n return 1\n if x == 0:\n if n == 1:\n return 1\n return 0\n if x == -1:\n if n % 2 == 0:\n return 1\n return -1\n if x == 2:\n if n % 8 in [1,7]:\n return 1\n if n % 8 in [3,5]:\n return -1 \n if x >= n:\n return Jacobian(x%n, n)\n if x % 2 == 0:\n return Jacobian(2, n)*Jacobian(x//2, n)\n if x % 4 == 3 and n % 4 == 3:\n return -1 * Jacobian( n, x)\n else:\n return Jacobian(n, x ) \n #if ((n**2 -1)/8 % 2) == 0:\n #return Jacobian(x/2,n)\n #return -1 * Jacobian(x/2,n) \n if (((x-1) * (n-1) / 4 ) % 2) == 0:\n return Jacobian(n % x, x)\n return -1 * Jacobian (n % x, x)\n \ndef HasGCD(n1, n2):\n while n2 != 0:\n swap = n1 % n2\n n1 = n2\n n2 = swap\n if n1 > 0:\n return False;\n return True;\n\n\ndef moduloExponent( base, power, modulo):\n return pow(base,power,modulo)\n\n## use solovay-strassen to test whether the number is a prime or not\ndef primeTest(number):\n for i in range (32):\n rand = random.randint(1,number-1)\n if HasGCD(rand,number):\n return False;\n ## CrypoSec_L7.1.pdf, page 25\n if not Jacobian(rand,number) % number == moduloExponent(rand, (number-1)//2, number):\n return False;\n return True\n\n\n\ndef generatePrime():\n while(True):\n ## generation of a 32bit number\n rand = random.randint(2 ** 30, 2** 31)\n if rand % 2 == 0:\n continue;\n if not primeTest(rand):\n continue;\n if not primeTest(rand * 2 + 1):\n continue;\n return rand * 2 + 1\n \n \n#imported from internet, credit to: \n#http://modular.math.washington.edu/edu/2007/spring/ent/ent-html/node31.html\ndef find_primitive_root( p ):\n if p == 2:\n return 1\n p1 = 2\n p2 = (p-1) // p1\n while( 1 ):\n g = random.randint( 2, p-1 )\n if not (moduloExponent( g, (p-1)//p1, p ) == 1):\n if not moduloExponent( g, (p-1)//p2, p ) == 1:\n return g\n \n\n\n## generate 32 bit key pair\n## on return: [0]: public key [1]: private key\ndef KeyGen(): \n p = generatePrime()\n g = moduloExponent(find_primitive_root(p), 2 , p)\n x = random.randint(1, (p-1) // 2)\n h = moduloExponent(g,x,p)\n #[public,private]\n return [[p,g,h],[p,g,x]]\n \n\n## param: publicKeySet: the public key set in sequence p,g,h\ndef Encrypt32bit(publicKeySet,msg):\n ## seperate the keyset\n p = publicKeySet[0]\n g = publicKeySet[1]\n h = publicKeySet[2]\n \n message = Encoder32(msg)\n \n ## for debugging\n if __name__ == \"__main__\": \n print(msg+\":\")\n print(message)\n \n \n encrypt_pair = []\n for i in range(0,len(message),8):\n thisNum = int(message[i:i+8])\n y = random.randint(0, p )\n c = moduloExponent( g, y, p)\n d = (thisNum*moduloExponent( h, y, p)) % p\n encrypt_pair.append( [c, d] )\n encryptedStr = \"\"\n for thisPair in encrypt_pair:\n encryptedStr += str(thisPair[0]) + ' ' + str(thisPair[1]) + ' '\n return encryptedStr \n \n\n## param: privateKeySet: the private key set in sequence p,g,x\ndef Decrypt32bit(privateKeySet ,msg):\n \n p = privateKeySet[0]\n g = privateKeySet[1]\n x = privateKeySet[2]\n \n ret = \"\"\n msgList = msg.split()\n for i in range(0, len(msgList), 2):\n c = int(msgList[i])\n d = int(msgList[i+1])\n s = moduloExponent( c, x, p)\n text = (d*moduloExponent( s,p-2, p)) % p\n text = str(text)\n while(len(text)< 8):\n text = \"0\"+text\n ret += text \n retDec = Decoder32(ret)\n return retDec\n \n \n \nif __name__ == \"__main__\":\n \n print(KeyGen())\n print(KeyGen())\n \n #keySet = KeyGen()\n ### p,g,h\n #publicKey = keySet[0]\n ### p,g,x\n #privateKey = keySet[1]\n \n \n #plainText = \"AAAA\" \n #cipherText = (Encrypt32bit(publicKey,plainText));\n #print(cipherText)\n #recoveredText = (Decrypt32bit(privateKey,cipherText));\n #print(recoveredText) \n","sub_path":"WhiteHat/Codes/ElGamal.py","file_name":"ElGamal.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"271002152","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\n\nfrom flask import json\nfrom six import BytesIO\n\nfrom swagger_server.models.error_model_schema import ErrorModelSchema # noqa: E501\nfrom swagger_server.models.light_source_material_schema import LightSourceMaterialSchema # noqa: E501\nfrom swagger_server.models.succesfully_created_schema import SuccesfullyCreatedSchema # noqa: E501\nfrom swagger_server.test import BaseTestCase\n\n\nclass TestLightSourceMaterialController(BaseTestCase):\n \"\"\"LightSourceMaterialController integration test stubs\"\"\"\n\n def test_material_light_source_post(self):\n \"\"\"Test case for material_light_source_post\n\n Create a new light_source material object\n \"\"\"\n light_source_material = LightSourceMaterialSchema()\n response = self.client.open(\n '/api/material/light_source',\n method='POST',\n data=json.dumps(light_source_material),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_material_light_source_uuid_put(self):\n \"\"\"Test case for material_light_source_uuid_put\n\n Modify an existing light_source material file\n \"\"\"\n light_source_material = LightSourceMaterialSchema()\n response = self.client.open(\n '/api/material/light_source/{uuid}'.format(uuid='uuid_example'),\n method='PUT',\n data=json.dumps(light_source_material),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n\nif __name__ == '__main__':\n import unittest\n unittest.main()\n","sub_path":"swagger_server/test/test_light_source_material_controller.py","file_name":"test_light_source_material_controller.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"460100031","text":"\"\"\"Quick plotting tools go here\n\nNote: older code from Rupert used pylab, which is now discouraged. KD changed to pyplot on 7-10-19 but has\nnot tested all older aspects of the code to ensure proper switch from pylab (though should be the same)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm, SymLogNorm\nimport matplotlib.ticker as ticker\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits import axes_grid1\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfrom matplotlib.collections import LineCollection\nimport warnings\n\nfrom medis.params import tp, sp, ap\nfrom medis.utils import dprint\nimport medis.optics as opx\nfrom medis.twilight_colormaps import sunlight\n\n# MEDIUM_SIZE = 17\n# plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes\n\nfrom matplotlib import rcParams\n# rcParams['text.usetex'] = False\nrcParams['font.family'] = 'DejaVu Sans'\n# rcParams['mathtext.fontset'] = 'custom'\n# rcParams['mathtext.fontset'] = 'stix'\n# rcParams['mathtext.rm'] = 'Bitstream Vera Sans'\n# rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'\n\n######################################################################################\n# Preset Plots\n######################################################################################\n###########################\n# Single White Light Image\n###########################\ndef quick2D(image, dx=None, title=None, logZ=False, vlim=(None,None),\n colormap='YlGnBu_r', zlabel='Intensity', show=True):\n \"\"\"\n Looks at a 2D array, has bunch of handles for plot.imshow\n\n :param image: 2D array to plot (data)\n :param dx: sampling of the image in m. Hardcoded to convert to um on axis\n :param title: string--must be set or will error!\n :param logZ: flag to set logscale plotting on z-axis\n :param vlim: tuple of limits on the colorbar axis, otherwise default matplotlib (pass in logscale limits if logZ=True)\n :param colormap: specify colormap as string\n :param zlabel: string label of the colorbar axis\n :param show: if true, shows the image now and blocks the sim, else false waits until the next plt.show() call\n :return:\n \"\"\"\n # Create figure & adjust subplot number, layout, size, whitespace\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Title\n while title is None:\n warnings.warn(\"Plots without titles: Don't Do It!\")\n title = input(\"Please Enter Title: \")\n\n # X,Y lables\n if dx is not None:\n # if dx < 1e-4:\n # dx *= 1e6\n # scale = np.round(\n # np.linspace(-dx * sp.maskd_size / 2, dx * sp.maskd_size / 2, 5)).astype(int)\n # tic_spacing = np.linspace(0, sp.maskd_size, 5)\n # tic_spacing[0] = tic_spacing[0] + 1 # hack for edge effects\n # tic_spacing[-1] = tic_spacing[-1] -1 # hack for edge effects\n tic_spacing, tic_labels, axlabel = scale_lD(dx, tp.fn_fp)\n plt.xticks(tic_spacing, tic_labels)\n plt.yticks(tic_spacing, tic_labels)\n plt.xlabel(axlabel)\n\n # Setting z-axis by mean\n # if vlim == (None, None):\n if vlim == (-1, -1): # Hack this to have it not be default. Only use it if you want it purposely\n nstd = 2\n std = np.std(image)\n mean = np.mean(image)\n vlim = mean - nstd * std, mean + nstd * std\n\n # Setting Logscale\n if colormap == 'sunlight':\n colormap = sunlight\n if not logZ:\n norm = None\n elif vlim[0] is None or vlim[0] > 0:\n norm = LogNorm()\n else:\n norm = SymLogNorm()\n # norm = None if not logZ else LogNorm() if vlim[0] > 0 else SymLogNorm(1e-7))\n\n # Plot the damn thing\n cax = ax.imshow(image, interpolation='none', origin='lower', vmin=vlim[0], vmax=vlim[1],\n norm=norm, cmap=colormap)\n\n # Plotting\n plt.title(title, fontweight='bold', fontsize=16)\n cb = plt.colorbar(cax)\n cb.set_label(zlabel)\n if show:\n plt.show(block=False)\n\n###############################\n# Spectra from multi-axis data\n###############################\ndef grid(fields, title='body spectra', logZ=False, show=True, nstd=1, vlim=(None, None), cmap='inferno'):\n \"\"\"\n General purpose plotter for multi-dimensional input tensors from 2D up to 6D. The tensor will be converted to 4D\n and plot as a grid of 2D images\n\n :param fields:\n :param title:\n :param logZ:\n :param show:\n :return:\n \"\"\"\n if cmap == 'sunlight':\n cmap = sunlight\n fields = np.array(fields) # just in case its a list\n assert fields.ndim > 2\n if np.iscomplexobj(fields.flat[0]):\n fields = np.abs(fields)**2 # convert to intensity if complex\n while len(fields.shape) > 4:\n try:\n boring_ind = fields.shape.index(1)\n fields = np.mean(fields, axis=boring_ind)\n except ValueError:\n fields = fields[0] # if none are zero slice out first dimension\n while len(fields.shape) < 4:\n fields = fields[:,np.newaxis]\n\n slices = np.int_(np.ceil(np.array(fields.shape)[:2]/5))\n fields = fields[::slices[0],::slices[1]]\n print(f'fields being sliced by {slices} making new fields size {fields.shape}')\n nwave, nobj, x, y = fields.shape\n\n try:\n std = np.std(fields)\n mean = np.mean(fields)\n except ValueError:\n std = np.std(fields[0])\n mean = np.mean(fields[0])\n\n if vlim == (None, None):\n vmin, vmax = mean - nstd*std, mean + nstd*std\n else:\n vmin, vmax = vlim\n\n fig = plt.figure(figsize=(16, 9))\n fig.suptitle(title)\n norm = None if not logZ else (LogNorm() if vmin > 0 else SymLogNorm(1e-7))\n\n imgrid = ImageGrid(fig, 111, # as in plt.subplot(111)\n nrows_ncols=(nobj, nwave),\n axes_pad=0.15,\n share_all=True,\n cbar_location=\"right\",\n cbar_mode=\"single\",\n cbar_size=\"7%\",\n cbar_pad=0.15,\n )\n for i, ax in enumerate(imgrid):\n x, y = i % nwave, i // nwave\n im = ax.imshow(fields[x, y], norm=norm, vmin=vmin, vmax=vmax, cmap=cmap)\n\n ax.cax.colorbar(im)\n ax.cax.toggle_label(True)\n\n plt.tight_layout()\n plt.subplots_adjust(right=0.9)\n\n if show:\n plt.show(block=True)\n\n#####################################\n# Spectral Plot from Spectral Cube\n#####################################\ndef view_spectra(datacube, title=None, show=True, logZ=False, use_axis=True, vlim=(None,None), subplt_cols=3,\n dx=None):\n \"\"\"\n view plot of intensity in each wavelength bin at a single (last) timestep\n\n :param datacube: 3D spectral cube (n_wavelengths, nx, ny) at single timestep\n :param title: string, must be set or will error!\n :param show: flag possibly useful for plotting loops of things?\n :param logZ: turn logscale plotting for Z axis on or off\n :param use_axis: turn on/off using axis ticks, colorbar, etc\n :param vlim: tuple of colorbar axis limits (min,max)\n :param subplt_cols: number of subplots per row\n :param dx: sampling of the image in m. Hardcoded to convert to um\n :return:\n \"\"\"\n # Create figure & adjust subplot number, layout, size, whitespace\n fig = plt.figure()\n n_colors = len(datacube)\n n_rows = int(np.ceil(n_colors / float(subplt_cols))+1)\n plt.axis('off')\n gs = gridspec.GridSpec(n_rows, subplt_cols, wspace=0.08, top=0.9)\n\n # Title\n if title is None:\n warnings.warn(\"Plots without titles: Don't Do It!\")\n title = input(\"Please Enter Title: \")\n pass\n fig.suptitle(title, fontweight='bold', fontsize=16)\n\n # Wavelength Strings for Subplot Titles\n w_string = np.array(np.linspace(ap.wvl_range[0] * 1e9, ap.wvl_range[1] * 1e9, ap.n_wvl_final, dtype=int), dtype=str)\n\n for w in range(n_colors):\n ax = fig.add_subplot(gs[w])\n ax.set_title(r'$\\lambda$ = ' + f\"{w_string[w]} nm\")\n\n slice = opx.extract_center(datacube[w])\n\n # X,Y lables\n if dx is not None:\n dx[w] = dx[w] * 1e6 # [convert to um]\n # dprint(f\"sampling = {sampl[w]}\")\n tic_spacing = np.linspace(0, slice.shape[0], 5) # 5 (number of ticks) is set by hand, arbitrarily chosen\n tic_lables = np.round(\n np.linspace(-dx[w] * sp.maskd_size / 2, dx[w] * sp.maskd_size / 2, 5)).astype(int) # nsteps must be same as tic_spacing\n tic_spacing[0] = tic_spacing[0] + 1 # hack for edge effects\n tic_spacing[-1] = tic_spacing[-1] - 1 # hack for edge effects\n plt.xticks(tic_spacing, tic_lables, fontsize=6)\n plt.yticks(tic_spacing, tic_lables, fontsize=6)\n # plt.xlabel('[um]', fontsize=8)\n # plt.ylabel('[um]', fontsize=8)\n\n # Z-axis scale\n if logZ:\n if np.min(slice) < 0:\n im = ax.imshow(slice, interpolation='none', origin='lower',\n vmin=vlim[0], vmax=vlim[1],\n norm=SymLogNorm(linthresh=1e-5),\n cmap=\"YlGnBu_r\")\n clabel = \"Log Normalized Intensity\"\n else:\n im = ax.imshow(slice, interpolation='none', origin='lower',\n vmin=vlim[0], vmax=vlim[1], norm=LogNorm(),\n cmap=\"YlGnBu_r\")\n clabel = \"Log Normalized Intensity\"\n else:\n im = ax.imshow(slice,\n interpolation='none', origin='lower', vmin=vlim[0], vmax=vlim[1], cmap=\"YlGnBu_r\")\n clabel = \"Normalized Intensity\"\n\n if use_axis == 'anno':\n ax.annotate_axis(im, ax, datacube.shape[1])\n if use_axis is None:\n plt.axis('off')\n\n if use_axis:\n warnings.simplefilter(\"ignore\", category=UserWarning)\n gs.tight_layout(fig, pad=1.08, rect=(0, 0, 1, 0.85)) # rect = (left, bottom, right, top)\n # fig.tight_layout(pad=50)\n cbar_ax = fig.add_axes([0.55, 0.3, 0.2, 0.05]) # Add axes for colorbar @ position [left,bottom,width,height]\n cb = fig.colorbar(im, cax=cbar_ax, orientation='horizontal') #\n cb.set_label(clabel)\n\n if show is True:\n plt.show(block=False)\n\n################################\n# Timeseries from Temporal Cube\n################################\ndef view_timeseries(img_tseries, cdi, title=None, logZ=False, vlim =(None,None),\n dx=None, subplt_cols=3, box={'use':False, 'threshold':1e-7}):\n \"\"\"\n view white light images in the timeseries\n\n :param img_tseries: intensity timeseries [n_tsteps, nx,ny]\n :param cdi: struct that contains the CDI params (from CDI.py)\n :param title: string, must be set or will error!\n :param logZ: turn logscale plotting for Z-axis on or off\n :param use_axis: turn on/off using axis ticks, colorbar, etc\n :param vlim: tuple of colorbar axis limits (min,max)\n :param subplt_cols: number of subplots per row\n :param dx: sampling of the image in m. Hardcoded to convert to um\n :param box: if True, draw box around CDI region\n :return:\n \"\"\"\n # Recreate CDI phase stream for plot titles\n if cdi.use_cdi:\n phases = cdi.phase_series\n\n # Create figure & adjust subplot number, layout, size, whitespace\n n_tsteps = len(img_tseries)\n n_rows = int(np.ceil(n_tsteps / float(subplt_cols)))\n\n fig, subplot = plt.subplots(n_rows, subplt_cols, figsize=(12, 10))\n fig.subplots_adjust(bottom=0.1, top=0.85, left=0.01, hspace=.4, wspace=0.02, right=0.9,) # wspace=0.2, right=0.95, left=0.05,\n\n # Title\n if title is None:\n warnings.warn(\"Plots without titles: Don't Do It!\")\n title = input(\"Please Enter Title: \")\n pass\n fig.suptitle(title, fontweight='bold', fontsize=16)\n\n for ax, t in zip(subplot.flatten(), range(n_rows*subplt_cols)): # range(n_tsteps)\n if t > n_tsteps-1:\n ax.axis('off') # hides axis\n pass\n else:\n # Axis (Subplot) title\n if cdi.use_cdi and not np.isnan(phases[t]):\n ax.set_title(f\"probe \" r'$\\theta$' + f\"={phases[t] / np.pi:.2f}\" + r'$\\pi$')\n else:\n ax.set_title(f\"t={t * sp.sample_time}\")\n\n if logZ:\n if vlim[0] is not None and vlim[0] <= 0:\n im = ax.imshow(img_tseries[t], interpolation='none', origin='lower',\n vmin=vlim[0], vmax=vlim[1],\n norm=SymLogNorm(linthresh=1e-5), cmap=\"YlGnBu_r\")\n clabel = \"Log Normalized Intensity\"\n else:\n im = ax.imshow(img_tseries[t], interpolation='none', origin='lower',\n vmin=vlim[0], vmax=vlim[1],\n norm=LogNorm(), cmap=\"YlGnBu_r\")\n clabel = \"Log Normalized Intensity\"\n else:\n im = ax.imshow(img_tseries[t], interpolation='none', origin='lower',\n vmin=vlim[0], vmax=vlim[1],\n cmap=\"YlGnBu_r\")\n clabel = \"Normalized Intensity\"\n\n # XY axis Labels\n tic_spacing, tic_labels, xylabel = scale_lD(dx, tp.fn_fp)\n ax.set_xticks(tic_spacing)\n ax.set_xticklabels(tic_labels)\n ax.set_yticks(tic_spacing)\n ax.set_yticklabels(tic_labels)\n ax.set_xlabel(xylabel)\n\n if box['use']:\n from medis.CDI import get_fp_mask\n fp_mask, edges, _, _, _, _ = get_fp_mask(cdi, thresh=box['threshold'])\n cl = LineCollection(edges, colors='r')\n ax.add_collection(cl)\n\n warnings.simplefilter(\"ignore\", category=UserWarning)\n cbar_ax = fig.add_axes([0.86, 0.1, 0.04, 0.8]) # Add axes for colorbar @ position [left,bottom,width,height]\n cb = fig.colorbar(im, cax=cbar_ax, orientation='vertical') #\n cb.set_label(clabel, fontsize=14)\n cb.ax.tick_params(labelsize=10)\n\n########################################################################\n# White Light Image at Different Optical Planes of Telescope Simulator\n########################################################################\ndef plot_planes(cpx_seq, title=None, logZ=[False], use_axis=True, vlim=[None, None], subplt_cols=3,\n dx=None, first=False):\n \"\"\"\n view plot of intensity in each wavelength bin at a single (last) timestep\n will pull out the plane(s) of sp.save_list at last tstep of cpx_sequence, convert to intensity, and sum over\n wavelength and object\n\n Currently, the atmosphere and enterance pupil are plotted in units of phase vs intensity. I think we should change\n this later for user-specification\n\n :param cpx_seq:\n :param title: string, must be set or will error!\n :param logZ: turn logscale plotting for z-axis on or off\n :param use_axis: turn on/off using axis ticks, colorbar, etc\n :param vlim: tuple of colorbar axis limits (min,max)\n :param subplt_cols: number of subplots per row\n :param dx: sampling of the image at each saved plane\n :return:\n \"\"\"\n # Create figure & adjust subplot number, layout, size, whitespace\n fig = plt.figure()\n n_planes = len(sp.save_list)\n n_rows = int(np.ceil(n_planes / float(subplt_cols)) )\n plt.axis('off')\n gs = gridspec.GridSpec(n_rows, subplt_cols, wspace=0.08)\n\n # Main Title\n if title is None:\n warnings.warn(\"Plots without titles: Don't Do It!\")\n title = input(\"Please Enter Title: \")\n pass\n fig.suptitle(title, fontweight='bold', fontsize=16)\n\n # Small Hack to repeat axis if defaults used\n np.array(vlim)\n if len(logZ) == 1:\n logZ = np.repeat(logZ,len(sp.save_list))\n if len(vlim) == 2:\n vlim = [vlim,]*len(sp.save_list)\n\n # Select first or last plane to plot (useful if CDI probes are being applied)\n if not first:\n f = -1\n else:\n f = 0 # select first or last timestep\n\n for p in range(n_planes):\n ax = fig.add_subplot(gs[p])\n\n ###################\n # Retreiving Data\n ##################\n # Standard-Way\n # [timestep, plane, wavelength, object, x, y]\n # converts to intensity of last timestep, THEN sums over wavelength, then sums over object\n plot_plane = sp.save_list[p]\n plane = opx.extract_plane(cpx_seq, plot_plane)\n # Distinguish plotting z-axis in phase units or intensity units\n if plot_plane == \"atmosphere\" or plot_plane == \"entrance_pupil\":\n plane = np.sum(np.angle(plane[f]), axis=(0,1))\n plane = opx.extract_center(plane, new_size=sp.grid_size*sp.beam_ratio+6)\n logZ[p] = False\n vlim[p] = [None, None]\n phs = \" phase\"\n elif plot_plane == \"woofer\" or plot_plane == \"tweeter\" or plot_plane == \"DM\":\n # only show the star phase map since phase at other bodies just offsets to shift focal plane position\n plane = np.sum(np.angle(plane[f]), axis=(0)) # only sum over object\n plane = plane[0] # plot the shortest wavelength\n plane = opx.extract_center(plane, new_size=sp.grid_size*sp.beam_ratio+6) # zoom in on DM\n logZ[p] = False\n vlim[p] = [-np.pi, np.pi]\n phs = ' phase'\n elif plot_plane == \"SubaruPupil\":\n plane = np.sum(opx.cpx_to_intensity(plane[f]), axis=(0, 1))\n plane = opx.extract_center(plane, new_size=sp.grid_size*sp.beam_ratio+6)\n phs = ''\n else:\n plane = np.sum(opx.cpx_to_intensity(plane[f]), axis=(0, 1))\n phs = ''\n\n ### Retreiving Data- Custom selection of plane ###\n # plot_plane = sp.save_list[w]\n # plane = opx.extract_plane(cpx_seq, plot_plane)\n # # plane = opx.cpx_to_intensity(plane[-1])\n # plane = opx.extract_center(np.angle(plane[0,1,1])) # wavelengths, objects\n\n # X,Y lables\n if dx is not None:\n # Converting Sampling Units to Readable numbers\n if dx[p, 0] < 1e-5:\n # dx[p, :] *= 1e6 # [convert to um]\n # axlabel = 'um'\n tic_spacing, tic_labels, axlabel = scale_lD(dx[p,:], tp.fn_fp,size=plane.shape[0]) # Assumes that this is the focal plane!\n else:\n if dx[p, 0] < 1e-3:\n dx[p, :] *= 1e3 # [convert to mm]\n axlabel = 'mm'\n elif 1e-2 > dx[p, 0] > 1e-3:\n dx[p, :] *= 1e2 # [convert to cm]\n axlabel = 'cm'\n else:\n axlabel = 'm'\n tic_spacing = np.linspace(0, plane.shape[0], 5) # 5 (# of ticks) is just set by hand, arbitrarily chosen\n tic_labels = np.round(\n np.linspace(-dx[p,0] * plane.shape[0] / 2, dx[p,0] * plane.shape[0] / 2, 5)).astype(int) # nsteps must be same as tic_spacing\n tic_spacing[0] = tic_spacing[0] + 1 # hack for edge effects\n tic_spacing[-1] = tic_spacing[-1] - 1 # hack for edge effects\n plt.xticks(tic_spacing, tic_labels, fontsize=6)\n plt.yticks(tic_spacing, tic_labels, fontsize=6)\n plt.ylabel(axlabel, fontsize=8)\n\n # Z-axis scale\n if phs == ' phase':\n cmap = sunlight\n else:\n cmap = \"YlGnBu_r\"\n ax.set_title(f\"{sp.save_list[p]}\" + phs)\n\n if logZ[p]:\n if vlim[p][0] is not None and vlim[p][0] <= 0:\n im = ax.imshow(plane, interpolation='none', origin='lower', vmin=vlim[p][0], vmax=vlim[p][1],\n norm=SymLogNorm(linthresh=1e-5),\n cmap=cmap)\n add_colorbar(im)\n # clabel = \"Log Normalized Intensity\"\n else:\n im = ax.imshow(plane, interpolation='none', origin='lower', vmin=vlim[p][0], vmax=vlim[p][1],\n norm=LogNorm(), cmap=cmap) #(1e-6,1e-3)\n add_colorbar(im)\n # clabel = \"Log Normalized Intensity\"\n # cb.set_label(clabel)\n else:\n im = ax.imshow(plane, interpolation='none', origin='lower', vmin=vlim[p][0], vmax=vlim[p][1],\n cmap=cmap) # \"twilight\"\n add_colorbar(im)\n # clabel = \"Normalized Intensity\"\n # cb.set_label(clabel)\n\n if use_axis:\n warnings.simplefilter(\"ignore\", category=UserWarning)\n gs.tight_layout(fig, pad=1.08, rect=(0, 0.02, 1, 0.9)) # rect = (left, bottom, right, top)\n # fig.tight_layout(pad=50)\n\n # plt.show(block=True)\n\n##########################################################\n# Plot Functions\n##########################################################\n\ndef add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):\n \"\"\"Add a vertical color bar to an image plot.\"\"\"\n divider = axes_grid1.make_axes_locatable(im.axes)\n width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)\n pad = axes_grid1.axes_size.Fraction(pad_fraction, width)\n current_ax = plt.gca()\n cax = divider.append_axes(\"right\", size=width, pad=pad)\n plt.sca(current_ax)\n return im.axes.figure.colorbar(im, cax=cax, **kwargs)\n\n\ndef scale_lD(samp, fn, size=sp.maskd_size):\n \"\"\"\n scales the focal plane into lambda/D units. Can use proper.prop_get_fratio to get the f_ratio that proper calculates\n at the focal plane. First convert the sampling in m/pix to rad/pix, then scale by the center wavelength lambda/D\n [rad].\n\n :param samp: sampling of the wavefront in m/pix\n :param fn: f# (focal ratio) of the beam in the focal plane\n :return:\n \"\"\"\n wvls = np.linspace(ap.wvl_range[0], ap.wvl_range[1], ap.n_wvl_init)\n cent = np.int(np.floor(ap.n_wvl_final / 2))\n\n if not samp.shape:\n pass # sampling is a single value\n else:\n samp = samp[cent] # sampling at the center wavelength\n\n # Convert to Angular Sampling Units via platescale\n fl = fn * tp.entrance_d\n rad_scale = samp / fl\n\n cw = wvls[cent] # center wavelength\n res = cw / tp.entrance_d\n\n tic_spacing = np.linspace(0, size, 5) # 5 (number of ticks) is set by hand, arbitrarily chosen\n tic_labels = np.round(np.linspace(-rad_scale * size / 2 , rad_scale * size / 2 , 5)/res) # nsteps must be same as tic_spacing\n tic_spacing[0] = tic_spacing[0] + 1 # hack for edge effects\n tic_spacing[-1] = tic_spacing[-1] - 1 # hack for edge effects\n\n axlabel = (r'$\\lambda$' + f'/D')\n\n return tic_spacing, tic_labels, axlabel\n\n\n","sub_path":"medis/plot_tools.py","file_name":"plot_tools.py","file_ext":"py","file_size_in_byte":22676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"114254592","text":"from __future__ import division\nimport pandas as pd\n\nfile1 = \"./Data/run.csv\"\nfile2 = \"./Data/experiment.csv\"\n\nd2 = open(file2, 'r')\nc2 = d2.readlines()\n\ndict2 = {}\nfor i, line in enumerate(c2):\n if i != 0:\n id, name = line.strip().split(',')\n dict2[id] = name\n\nd1 = open(file1, 'r')\nc1 = d1.readlines()\n\ndict1 = {}\nlines = []\nfor i, line in enumerate(c1):\n temp = line.strip().split(',')\n if i != 0:\n temp[1] = dict2[temp[1]]\n lines.append(temp)\n\nimport csv\n\nwith open(\"./Processed/transform1.csv\", \"wb\") as f:\n writer = csv.writer(f)\n writer.writerows(lines)\n\n\n\n","sub_path":"CherryPick/collect_0.py","file_name":"collect_0.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"408361201","text":"import numpy as np\nimport os\nfrom random import shuffle\nimport csv\nimport random\n\nimport matplotlib.pyplot as plt\n\n\n\n \n## https://www.kaggle.com/carolzhangdc/imdb-5000-movie-dataset\n\n######################### Edge feature ########################## \n# home_path = '/home/minje/ranking' \nwith open(os.path.join('data/movie/movie_metadata.csv'), 'r') as f:\n\treader = csv.reader(f)\n\traw_dataset = list(reader)\ndata_length = len(raw_dataset)\n\nlabel_list = raw_dataset[0]\nkey_idx = label_list.index('plot_keywords')\nimdb_idx = label_list.index('imdb_score')\n\nall_movie_rating = []\nfor i, data in enumerate(raw_dataset):\n\tkeywords = data[key_idx]\n\timdb_score = data[imdb_idx]\n\tif len(keywords) > 0:\n\t\tkeywords = keywords.split(\"|\")\n\t\tif len(keywords) == 5:\n\t\t\tall_movie_rating.append([keywords, imdb_score])\n\nprint(len(all_movie_rating))\n\nall_keywords = []\ndegree = []\nfor idx, movie_1 in enumerate(all_movie_rating):\n\tif idx % (10**2) == 0:\n\t\tprint('Processed: ' + str(idx) + '/' + str(len(all_movie_rating)))\n\t\n\tfor kwd in movie_1[0]:\n\t\tif kwd in all_keywords:\n\t\t\tkwd_idx = all_keywords.index(kwd)\n\t\t\tdegree[kwd_idx] += 1\n\t\telse:\n\t\t\tall_keywords.append(kwd)\n\t\t\tkwd_idx = len(all_keywords) - 1\n\t\t\tdegree.append(1)\n\nprint(np.mean(np.array(degree)))\nprint(len(degree))\n\n\nall_movie_rating_trn = all_movie_rating[:int(0.8*len(all_movie_rating))]\n# all_movie_rating_trn = all_movie_rating[int(0.8*len(all_movie_rating))-25:int(0.8*len(all_movie_rating))]\n# all_movie_rating_tst = all_movie_rating[int(0.8*len(all_movie_rating)):]\nall_movie_rating_tst = all_movie_rating[int(0.8*len(all_movie_rating)):int(0.8*len(all_movie_rating))+50]\n# all_movie_rating_tst = all_movie_rating[:50]\nAll_match_data_trn = []\nAll_match_data_tst = []\nfor idx, movie_1 in enumerate(all_movie_rating_trn[:-1]):\n\tif idx % (10**2) == 0:\n\t\tprint('Processed: ' + str(idx) + '/' + str(len(all_movie_rating_trn)))\n\n\tmovie_2_list = random.sample(range(len(all_movie_rating_trn))[idx+1:], np.min([15, len(all_movie_rating_trn) - idx - 1]))\n\t# np.random.choice(len(all_movie_rating_trn), 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n\t# print(movie_2_list)\n\t# [:20]\n\tfor movie_2_idx in movie_2_list:\n\t\tmovie_2 = all_movie_rating_trn[movie_2_idx]\n\t\tmovie_1_data = []\n\t\tmovie_2_data = []\n\n\t\t# if len(movie_1[0]) == and len(movie_2[0]) == 3:\n\t\tfor kwd in movie_1[0]:\n\t\t\tkwd_idx = all_keywords.index(kwd)\n\t\t\tdegree[kwd_idx] += 1\n\t\t\tmovie_1_data.append(kwd_idx)\n\n\t\tfor kwd in movie_2[0]:\n\t\t\tkwd_idx = all_keywords.index(kwd)\n\t\t\tdegree[kwd_idx] += 1\n\t\t\tmovie_2_data.append(kwd_idx)\n\t\n\n\t\tif movie_1[1] > movie_2[1]:\n\t\t\tedge_data = '0' \n\t\telif movie_1[1] < movie_2[1]:\n\t\t\tedge_data = '1'\n\t\telif movie_1[1] == movie_2[1]:\n\t\t\tedge_data = '1/2'\n\t\t\t\n\t\tif edge_data == '0':\n\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '1']\n\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '0']\n\t\telif edge_data == '1':\n\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '0']\n\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '1']\n\t\telif edge_data == '1/2':\n\t\t\tbern = np.random.binomial(1, 0.5, 1)\n\t\t\tif bern == 1:\n\t\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '1']\n\t\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '0']\n\t\t\telse:\n\t\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '0']\n\t\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '1']\n\t\telse:\n\t\t\tprint(edge_data)\n\t\t\tprint(\"ERROR\")\n\t\t\n\t\tflattened_1 = [val for sublist in match_data_1 for val in sublist]\n\t\tflattened_1 = [int(x) for x in flattened_1]\n\t\tflattened_2 = [val for sublist in match_data_2 for val in sublist]\n\t\tflattened_2 = [int(x) for x in flattened_2]\n\n\t\tAll_match_data_trn.append(flattened_1)\n\t\tAll_match_data_trn.append(flattened_2)\n\n\n\n\ntst_movie_rating = []\n# all_movie_rating_tst = all_movie_rating_trn\nfor idx, movie_1 in enumerate(all_movie_rating_tst):\n\tif idx % (10**2) == 0:\n\t\tprint('Processed: ' + str(idx) + '/' + str(len(all_movie_rating_tst)))\n\n\t# print(movie_1[1])\n\ttst_movie_rating.append(float(movie_1[1]))\n\tmovie_2_list = all_movie_rating_tst\n\t# movie_2_list = random.sample(range(len(all_movie_rating_tst))[idx+1:], np.min([15, len(all_movie_rating_tst) - idx - 1]))\n\t# print(movie_2_list)\n\t# [:20]\n\tfor movie_2_idx in range(len(movie_2_list)):\n\t# for movie_2_idx in movie_2_list[:-1]:\n\t\tmovie_2 = all_movie_rating_tst[movie_2_idx]\n\t\tmovie_1_data = []\n\t\tmovie_2_data = []\n\n\t\t# if len(movie_1[0]) == and len(movie_2[0]) == 3:\n\t\tfor kwd in movie_1[0]:\n\t\t\tkwd_idx = all_keywords.index(kwd)\n\t\t\tdegree[kwd_idx] += 1\n\t\t\tmovie_1_data.append(kwd_idx)\n\n\t\tfor kwd in movie_2[0]:\n\t\t\tkwd_idx = all_keywords.index(kwd)\n\t\t\tdegree[kwd_idx] += 1\n\t\t\tmovie_2_data.append(kwd_idx)\n\t\n\t\tif movie_1[1] > movie_2[1]:\n\t\t\tedge_data = '0' \n\t\telif movie_1[1] < movie_2[1]:\n\t\t\tedge_data = '1'\n\t\telif movie_1[1] == movie_2[1]:\n\t\t\tedge_data = '1/2'\n\t\t# print(movie_1[1], movie_2[1], edge_data)\n\n\t\tif edge_data == '0':\n\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '1']\n\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '0']\n\t\telif edge_data == '1':\n\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '0']\n\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '1']\n\t\telif edge_data == '1/2':\n\t\t\tbern = np.random.binomial(1, 0.5, 1)\n\t\t\tif bern == 1:\n\t\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '1']\n\t\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '0']\n\t\t\telse:\n\t\t\t\tmatch_data_1 = [movie_1_data, movie_2_data, '0']\n\t\t\t\tmatch_data_2 = [movie_2_data, movie_1_data, '1']\n\t\telse:\n\t\t\tprint(edge_data)\n\t\t\tprint(\"ERROR\")\n\t\t\n\t\tflattened_1 = [val for sublist in match_data_1 for val in sublist]\n\t\tflattened_1 = [int(x) for x in flattened_1]\n\t\tflattened_2 = [val for sublist in match_data_2 for val in sublist]\n\t\tflattened_2 = [int(x) for x in flattened_2]\n\n\t\tAll_match_data_tst.append(flattened_1)\n\t\tAll_match_data_tst.append(flattened_2)\t \n\n# np.random.shuffle(All_match_data)\n# print(np.array(All_match_data).shape)\n\n\nfeature = []\nn_items = len(all_keywords)\nfeature.append(range(n_items))\nfeature.append(range(n_items))\nfeature.append(range(n_items))\nfeature = list(map(list, zip(*feature)))\nGT = range(n_items)\n\nnp.savetxt(os.path.join('data/movie', 'movie.nodes'), feature)\nnp.savetxt(os.path.join('data/movie', 'movie.GT'), GT)\nnp.savetxt(os.path.join('data/movie', 'movie.edges'), All_match_data_trn)\nnp.savetxt(os.path.join('data/movie', 'movie.edges_test'), All_match_data_tst)\nnp.savetxt(os.path.join('data/movie', 'movie.GT_test'), tst_movie_rating)\n\n# np.savetxt(os.path.join(home_path, 'data/movie', 'movie.edges'), All_match_data[:int(0.8*len(All_match_data))])\n# np.savetxt(os.path.join(home_path, 'data/movie', 'movie.edges_test'), All_match_data[int(0.8*len(All_match_data))+1:])\n\n#################### Rank Centrality feature #################### \n# n_iteration = 10000\n# n_items = int(np.max(new_edges) + 1)\n# feature = []\n\n# score = np.ones(n_items)\n# Markov_chain = np.zeros((n_items, n_items))\n# adjacency = np.zeros((n_items, n_items))\n# for i, datum in enumerate(new_edges):\n# idx1, idx2 = int(datum[0]), int(datum[1])\n# Markov_chain[idx1][idx2] += datum[2]\n# Markov_chain[idx2][idx1] += 1 - datum[2]\n# adjacency[idx1][idx2] += 1\n# adjacency[idx2][idx1] += 1\n# degree = np.sum(adjacency, axis = 0)\n# dmax = max(degree)\n# count = np.sum(Markov_chain, axis = 1)\n# score2 = count / degree\n# Markov_chain = Markov_chain / dmax \n# for i in range(n_items):\n# Markov_chain[i][i] = 1 - np.sum(Markov_chain[:, i])\n# for i in range(n_iteration):\n# # if i % 1000 ==0:\n# # print(str(i) + '/' + str(n_iteration))\n# score = Markov_chain.dot(score)\n\n# dataset = new_edges\n# n_iteration = 1000\n# epsilon = 1e-10\n# score3 = np.ones(n_items) / n_items\n# n_win = np.zeros(n_items)\n# adjacency = np.zeros((n_items, n_items))\n# for i, datum in enumerate(dataset):\n# idx1, idx2, winloss = int(datum[0]), int(datum[1]), int(datum[2])\n# if winloss == 1:\n# n_win[idx1] += 1\n# elif winloss == 0:\n# n_win[idx2] += 1 \n\n# adjacency[idx1][idx2] += 1\n# adjacency[idx2][idx1] += 1\n\n# adjacency = np.array(adjacency, dtype = np.float)\n# for iter_idx in range(n_iteration):\n# score_matrix = np.repeat([score3], n_items, axis = 0)\n# score_matrix = score_matrix + np.transpose(score_matrix)\n\n# recipro_score = adjacency / score_matrix\n# recipro_score = np.sum(recipro_score, axis = 1)\n# score3 = (n_win + epsilon*np.ones(n_items)) / recipro_score\n\n# # for i in range(n_items):\n# # score3[i] = (n_win[i] + epsilon) / recipro_score[i]\n\n# score = score / max(score)\n# score2 = score2 / max(score2)\n# score3 = score3 / max(score3)\n\n# feature.append(range(n_items))\n# feature.append(score)\n# feature.append(score2)\n# feature.append(score3)\n# feature.append(range(n_items))\n\n# feature = list(map(list, zip(*feature)))\n# #################################################################\n# np.savetxt(os.path.join('data/GIFGIF', 'GIFGIF.nodes'), feature)\n","sub_path":"data/movie/Gen_movie_data.py","file_name":"Gen_movie_data.py","file_ext":"py","file_size_in_byte":8792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"524799622","text":"import pygame as pg\r\nimport random\r\nimport math\r\nfrom os import *\r\nfrom settings import *\r\n\r\nclass Mob(pg.sprite.Sprite):\r\n def __init__(self, game):\r\n pg.sprite.Sprite.__init__(self)\r\n self.game = game\r\n self.clase = \"Felltwin\"\r\n self.movement_dict = {\r\n \"up\": [0, -1],\r\n \"down\": [0, 1],\r\n \"right\": [1, 0],\r\n \"left\": [-1, 0]\r\n }\r\n\r\n self.current_frame = 0\r\n self.last_folder = \"down\"\r\n self.last_update = 0\r\n self.img_dir = path.join(path.join(game_folder, \"img\\Enemies\"), self.clase)\r\n self.images = self.get_images(path.join(self.img_dir, \"Movement\\down\"))\r\n self.image = self.images[self.current_frame]\r\n self.image.set_colorkey(WHITE)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (WIDTH / 2, 0)\r\n\r\n def update(self):\r\n self.speed = [0, 0]\r\n self.foldername = \"\"\r\n self.detect()\r\n\r\n self.rect.y += self.speed[1]\r\n self.rect.x += self.speed[0]\r\n # if self.rect.bottom > HEIGHT:\r\n # self.rect.right = WIDTH\r\n # if self.rect.left < 0:\r\n # self.rect.left = 0\r\n\r\n def detect(self):\r\n if math.sqrt((self.rect.centerx - self.game.player.rect.centerx) ** 2 + (self.rect.centery - self.game.player.rect.centery) ** 2) < 500:\r\n self.follow()\r\n\r\n def follow(self):\r\n self.choque = False\r\n difference = 80\r\n if self.rect.left <= self.game.player.rect.right - difference and self.rect.right >= self.game.player.rect.left + difference:\r\n if self.rect.bottom == self.game.player.rect.top + difference:\r\n self.choque = True\r\n self.speed = [0, 0]\r\n self.attack()\r\n elif self.rect.top == self.game.player.rect.bottom - difference:\r\n self.choque = True\r\n self.speed = [0, 0]\r\n self.attack()\r\n\r\n if self.rect.top <= self.game.player.rect.bottom - difference and self.rect.bottom >= self.game.player.rect.top + difference:\r\n if self.rect.left == self.game.player.rect.right - difference:\r\n self.choque = True\r\n self.speed = [0, 0]\r\n self.attack()\r\n elif self.rect.right == self.game.player.rect.left + difference:\r\n self.choque = True\r\n self.speed = [0, 0]\r\n self.attack()\r\n\r\n if self.rect.centerx > self.game.player.rect.centerx and not self.choque:\r\n if self.rect.centery > self.game.player.rect.centery:\r\n self.speed = [-1, -1]\r\n self.move(\"upleft\")\r\n elif self.rect.centery < self.game.player.rect.centery:\r\n self.speed = [-1, 1]\r\n self.move(\"downleft\")\r\n else:\r\n self.speed = [-1, 0]\r\n self.move(\"left\")\r\n elif self.rect.centerx < self.game.player.rect.centerx and not self.choque:\r\n if self.rect.centery > self.game.player.rect.centery:\r\n self.speed = [1, -1]\r\n self.move(\"upright\")\r\n elif self.rect.centery < self.game.player.rect.centery:\r\n self.speed = [1, 1]\r\n self.move(\"downright\")\r\n else:\r\n self.speed = [1, 0]\r\n self.move(\"right\")\r\n elif not self.choque:\r\n if self.rect.centery > self.game.player.rect.centery:\r\n self.speed = [0, -1]\r\n self.move(\"up\")\r\n elif self.rect.centery < self.game.player.rect.centery:\r\n self.speed = [0, 1]\r\n self.move(\"down\")\r\n\r\n def attack(self):\r\n pass\r\n '''\r\n attack_dir = path.join(self.img_dir, \"Attacks\")\r\n isattacking = False\r\n actionkeyspressed = self.keys.get_keynames(self.keys.action_keyspressed)\r\n for keyname in actionkeyspressed:\r\n self.speed = [0, 0]\r\n if len(actionkeyspressed) > 0:\r\n folder = path.join(attack_dir, self.last_folder)\r\n isattacking = True\r\n self.action(folder)\r\n\r\n return isattacking\r\n '''\r\n\r\n def standing(self, folder):\r\n stand_dir = \"\"\r\n stand_dir = path.join(self.img_dir, \"Standing\")\r\n stand_dir = path.join(stand_dir, folder)\r\n self.action(stand_dir)\r\n\r\n def folder_check(self, folder):\r\n spc = \" \"\r\n index = folder.index(spc) if spc in folder else None\r\n if index != None:\r\n s1 = folder[0:index]\r\n s2 = folder[(index + 1):len(folder)]\r\n if s2 == \"up\" or s2 == \"down\":\r\n folder = s2 + s1\r\n else:\r\n folder = s1 + s2\r\n\r\n return folder\r\n\r\n def move(self, foldername):\r\n mov_dir = path.join(self.img_dir, \"Movement\")\r\n folder = \"\"\r\n folder = path.join(mov_dir, foldername)\r\n #ismoving = False\r\n self.action(folder)\r\n\r\n #return folder, ismoving\r\n\r\n def get_images(self, frames_dir):\r\n files_range = listdir(frames_dir)\r\n images = []\r\n for i in range(len(files_range) - 1):\r\n images.append(pg.image.load(path.join(frames_dir, str(i) + \".png\")))\r\n images[i].set_colorkey(WHITE)\r\n return images\r\n\r\n def action(self, folder):\r\n self.images = self.get_images(folder)\r\n now = pg.time.get_ticks()\r\n if now - self.last_update > 100:\r\n self.last_update = now\r\n self.current_frame = (self.current_frame + 1) % len(self.images)\r\n self.image = self.images[self.current_frame]\r\n","sub_path":"mob.py","file_name":"mob.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"129972101","text":"import json\n\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.contrib.auth.decorators import login_required\n\nfrom mytestkh.contact.forms import ContactForm\nfrom mytestkh.contact.models import Contact\n\n\ndef index(request):\n c = {}\n c['cont'] = get_object_or_404(Contact, pk=1)\n return render(request, \"contact.html\", c, content_type=\"text/html\")\n\n@login_required\n@csrf_protect\ndef edit(request):\n cont = get_object_or_404(Contact, pk=1)\n c = {}\n\n if request.method == \"POST\":\n form = ContactForm(request.POST, request.FILES, instance=cont)\n if request.is_ajax() or request.POST.get('isajax', '0') == '1':\n c['success'] = False\n if form.is_valid():\n form.save()\n c['success'] = True\n if form.instance.photo:\n c['image_url'] = form.instance.photo.url\n c['image_width'] = form.instance.photo.width\n c['image_height'] = form.instance.photo.height\n else:\n c['errors'] = {}\n for err in form.errors:\n c['errors'][\"%s_errors\" % (err)] = form.errors[err].as_ul()\n return HttpResponse(json.dumps(c),\n mimetype='application/javascript')\n else:\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('contact_view'))\n else:\n form = ContactForm(instance=cont)\n\n c['form'] = form\n\n return render(request, \"contact_edit.html\", c, content_type=\"text/html\")\n","sub_path":"mytestkh/contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"586165866","text":"\nimport hashlib\nimport time\nimport base64\nfrom urllib import parse, request\nimport requests\nimport json\n\n\ndef md5(s, encoding=\"utf-8\"):\n \"\"\"md5快速接口\"\"\"\n hx = hashlib.md5()\n hx.update(bytes(s,encoding=encoding))\n return hx.hexdigest()\n\ndef str_json(s):\n \"\"\"str to json\"\"\"\n return json.loads(s)\n \n\ndef get_proxy_ip(page, num, click_btn):\n \"\"\"获取代理\"\"\"\n # 时间戳\n t = int(time.time())\n # token 值\n token = md5(str(page)+str(num)+str(t))\n # 组合url api\n url = \"../proxy?page={}&num={}&token={}&t={}\".format(page,num,token,t)\n url = parse.urljoin(\"https://nyloner.cn/proxy\",url)\n \n # 请求主页 获取cookie等信息\n session = requests.session()\n session.get('https://nyloner.cn/proxy')\n # 调用 api 获取数据\n data = session.get(url).json()\n \n return parse_data(data)\n \ndef parse_data(data):\n \"\"\"解析数据\"\"\"\n if 'list' in data:\n # 密文 解密\n s = decode_str(data['list'])\n return str_json(s)\n \ndef decode_str(s):\n \"\"\"解密\"\"\"\n \n s = base64.b64decode(s)# 先进性一次b64解码\n # js中的密钥\n key = '\\x6e\\x79\\x6c\\x6f\\x6e\\x65\\x72'\n # 密钥长度\n l = len(key)\n # b64明文 储存\n code = \"\"\n \n for i,item in enumerate(s):\n n = i % l\n # ord 内置函数 返回单个字符的unicode编码 10进制\n # ord('s') ---> 115\n # chr 内置函数 ord 的逆函数\n # chr(155) ---> 's'\n code += chr(item ^ ord(key[n]))\n \n return base64.b64decode(code).decode()\n\ndef main():\n data = get_proxy_ip(1,20,'last')\n data.sort(key = lambda x:x['time'])\n for i in data:\n print(i)\n\n\nif __name__ == \"__main__\": \n main()","sub_path":"proxy/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"210324646","text":"#!/usr/bin/env python3\n\n# Copyright Software Improvement Group\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport base64\nimport datetime\nimport dataclasses\nimport html\nimport json\nimport os\nimport sys\nimport time\nimport typing\nimport urllib.parse\nimport urllib.request\nimport zipfile\n\n\nLOG_HISTORY = []\n\n\ndef log(message):\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(f\"{timestamp} {message}\")\n LOG_HISTORY.append(message)\n \n \n@dataclasses.dataclass\nclass UploadOptions:\n sourceDir: str = None\n excludePatterns: typing.List[str] = dataclasses.field(default_factory=lambda: [])\n includeHistory: bool = False\n pathPrefix: str = \"\"\n showContents: bool = False\n\n\nclass SigridApiClient:\n PROTOCOL_VERSION = \"v1\"\n POLL_INTERVAL = 60\n POLL_ATTEMPTS = 30\n RETRY_ATTEMPTS = 5\n\n def __init__(self, args):\n self.baseURL = args.sigridurl\n self.account = os.environ[\"SIGRID_CI_ACCOUNT\"]\n self.token = os.environ[\"SIGRID_CI_TOKEN\"]\n self.urlPartnerName = urllib.parse.quote_plus(args.partner.lower())\n self.urlCustomerName = urllib.parse.quote_plus(args.customer.lower())\n self.urlSystemName = urllib.parse.quote_plus(args.system.lower())\n self.publish = args.publish or args.publishonly\n \n def callSigridAPI(self, api, path):\n url = f\"{self.baseURL}/rest/{api}{path}\"\n request = urllib.request.Request(url, None)\n request.add_header(\"Accept\", \"application/json\")\n request.add_header(\"Authorization\", \\\n b\"Basic \" + base64.standard_b64encode(f\"{self.account}:{self.token}\".encode(\"utf8\")))\n \n response = urllib.request.urlopen(request)\n if response.status == 204:\n return {}\n responseBody = response.read().decode(\"utf8\")\n if len(responseBody) == 0:\n log(\"Received empty response\")\n return {}\n return json.loads(responseBody)\n \n def submitUpload(self, options):\n log(\"Creating upload\")\n uploadPacker = SystemUploadPacker(options)\n upload = \"sigrid-upload-\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\") + \".zip\"\n uploadPacker.prepareUpload(options.sourceDir, upload)\n \n log(\"Preparing upload\")\n uploadLocation = self.obtainUploadLocation()\n uploadUrl = uploadLocation[\"uploadUrl\"]\n analysisId = uploadLocation[\"ciRunId\"]\n log(f\"Sigrid CI analysis ID: {analysisId}\")\n log(\"Publishing upload\" if self.publish else \"Submitting upload\")\n\n if not self.uploadBinaryFile(uploadUrl, upload):\n raise Exception(\"Uploading file failed\")\n \n return analysisId\n \n def obtainUploadLocation(self):\n for attempt in range(self.RETRY_ATTEMPTS):\n try:\n return self.callSigridAPI(\"inboundresults\", self.getRequestUploadPath())\n except urllib.error.HTTPError as e:\n if e.code == 502:\n log(\"Retrying\")\n time.sleep(self.POLL_INTERVAL)\n else:\n self.processHttpError(e)\n \n log(\"Sigrid is currently unavailable\")\n sys.exit(1)\n \n def getRequestUploadPath(self):\n path = f\"/{self.urlPartnerName}/{self.urlCustomerName}/{self.urlSystemName}/ci/uploads/{self.PROTOCOL_VERSION}\"\n if self.publish:\n path += \"/publish\"\n return path\n \n def uploadBinaryFile(self, url, upload):\n with open(upload, \"rb\") as uploadRef:\n uploadRequest = urllib.request.Request(url, data=uploadRef.read())\n uploadRequest.method = \"PUT\"\n uploadRequest.add_header(\"Content-Type\", \"application/zip\")\n uploadRequest.add_header(\"Content-Length\", \"%d\" % os.path.getsize(upload))\n uploadRequest.add_header(\"x-amz-server-side-encryption\", \"AES256\")\n uploadResponse = urllib.request.urlopen(uploadRequest)\n return uploadResponse.status in [200, 201, 202]\n \n def fetchAnalysisResults(self, analysisId):\n for attempt in range(self.POLL_ATTEMPTS):\n try:\n response = self.callSigridAPI(\"analysis-results\",\n f\"/sigridci/{self.urlCustomerName}/{self.urlSystemName}/{self.PROTOCOL_VERSION}/ci/results/{analysisId}\")\n if response != {}:\n return response \n except urllib.error.HTTPError as e:\n self.processHttpError(e)\n except json.JSONDecodeError as e:\n log(\"Received incomplete analysis results\")\n \n log(\"Waiting for analysis results\")\n time.sleep(self.POLL_INTERVAL)\n \n log(\"Analysis failed: waiting for analysis results took too long\")\n sys.exit(1)\n \n def processHttpError(self, e):\n if e.code in [401, 403]:\n log(\"You are not authorized to access Sigrid for this system\")\n sys.exit(1)\n elif e.code == 404:\n log(\"Analysis results not yet available\")\n elif e.code >= 500:\n log(f\"Sigrid is currently not available (HTTP status {e.code})\")\n sys.exit(1)\n else: \n raise Exception(f\"Received HTTP status {e.code}\")\n \n\nclass SystemUploadPacker:\n MAX_UPLOAD_SIZE_MB = 500\n\n DEFAULT_EXCLUDES = [\n \"coverage/\",\n \"build/\",\n \"dist/\",\n \"node_modules/\",\n \"sigridci/\",\n \"sigrid-ci-output/\",\n \"target/\",\n \".idea/\",\n \".jpg\",\n \".png\"\n ]\n \n def __init__(self, options):\n self.excludePatterns = [] + (options.excludePatterns or []) + self.DEFAULT_EXCLUDES\n self.excludePatterns = [excl for excl in self.excludePatterns if excl != \"\"]\n if not options.includeHistory:\n self.excludePatterns += [\".git/\", \".gitmodules\"]\n\n self.pathPrefix = options.pathPrefix.strip(\"/\")\n self.showContents = options.showContents\n\n def prepareUpload(self, sourceDir, outputFile):\n zipFile = zipfile.ZipFile(outputFile, \"w\", zipfile.ZIP_DEFLATED)\n \n for root, dirs, files in os.walk(sourceDir):\n for file in sorted(files):\n filePath = os.path.join(root, file)\n if file != outputFile and not self.isExcluded(filePath):\n relativePath = os.path.relpath(os.path.join(root, file), sourceDir)\n uploadPath = self.getUploadFilePath(relativePath)\n if self.showContents:\n log(f\"Adding file to upload: {uploadPath}\")\n zipFile.write(filePath, uploadPath)\n \n zipFile.close()\n \n self.checkUploadContents(outputFile)\n \n def checkUploadContents(self, outputFile):\n uploadSizeBytes = os.path.getsize(outputFile)\n uploadSizeMB = max(round(uploadSizeBytes / 1024 / 1024), 1)\n log(f\"Upload size is {uploadSizeMB} MB\")\n \n if uploadSizeMB > self.MAX_UPLOAD_SIZE_MB:\n raise Exception(f\"Upload exceeds maximum size of {self.MAX_UPLOAD_SIZE_MB} MB\")\n \n if uploadSizeBytes < 50000:\n log(\"Warning: Upload is very small, source directory might not contain all source code\")\n \n def getUploadFilePath(self, relativePath):\n if self.pathPrefix == \"\":\n return relativePath\n return f\"{self.pathPrefix}/{relativePath}\"\n \n def isExcluded(self, filePath):\n normalizedPath = filePath.replace(\"\\\\\", \"/\")\n for exclude in self.excludePatterns:\n if exclude.strip() in normalizedPath:\n return True\n return False\n \n \nclass Report:\n METRICS = [\"VOLUME\", \"DUPLICATION\", \"UNIT_SIZE\", \"UNIT_COMPLEXITY\", \"UNIT_INTERFACING\", \"MODULE_COUPLING\",\n \"COMPONENT_BALANCE_PROP\", \"COMPONENT_INDEPENDENCE\", \"COMPONENT_ENTANGLEMENT\", \"MAINTAINABILITY\"]\n \n REFACTORING_CANDIDATE_METRICS = [\"DUPLICATION\", \"UNIT_SIZE\", \"UNIT_COMPLEXITY\", \"UNIT_INTERFACING\",\n \"MODULE_COUPLING\"]\n\n def generate(self, feedback, args):\n pass\n \n def formatRating(self, ratings, metric):\n if ratings.get(metric, None) == None:\n return \"N/A\"\n return \"%.1f\" % ratings[metric]\n \n def formatBaselineDate(self, feedback):\n snapshotDate = datetime.datetime.strptime(feedback[\"baseline\"], \"%Y%m%d\")\n return snapshotDate.strftime(\"%Y-%m-%d\")\n \n def isPassed(self, feedback, metric, targetRating):\n value = feedback[\"newCodeRatings\"].get(metric, None)\n return value == None or value >= targetRating\n \n def getSigridUrl(self, args):\n return \"https://sigrid-says.com/\" + urllib.parse.quote_plus(args.customer) + \"/\" + \\\n urllib.parse.quote_plus(args.system);\n \n def getRefactoringCandidates(self, feedback, metric):\n refactoringCandidates = feedback.get(\"refactoringCandidates\", [])\n return [rc for rc in refactoringCandidates if rc[\"metric\"] == metric]\n\n\nclass TextReport(Report):\n ANSI_BOLD = \"\\033[1m\"\n ANSI_GREEN = \"\\033[92m\"\n ANSI_YELLOW = \"\\033[33m\"\n ANSI_RED = \"\\033[91m\"\n ANSI_BLUE = \"\\033[96m\"\n LINE_WIDTH = 89\n\n def generate(self, feedback, args):\n print(\"-\" * self.LINE_WIDTH)\n print(\"Refactoring candidates\")\n print(\"-\" * self.LINE_WIDTH)\n for metric in self.REFACTORING_CANDIDATE_METRICS:\n self.printMetric(feedback, metric)\n\n print(\"\")\n print(\"-\" * self.LINE_WIDTH)\n print(\"Maintainability ratings\")\n print(\"-\" * self.LINE_WIDTH)\n print(\"System property\".ljust(40) + f\"Baseline ({self.formatBaselineDate(feedback)}) New/changed code quality\")\n for metric in self.METRICS:\n if metric == \"MAINTAINABILITY\":\n print(\"-\" * self.LINE_WIDTH)\n self.printRatingColor(metric.replace(\"_PROP\", \"\").title().replace(\"_\", \" \").ljust(40) + \\\n \"(\" + self.formatRating(feedback[\"overallRatings\"], metric) + \")\".ljust(21) + \\\n self.formatRating(feedback[\"newCodeRatings\"], metric), feedback[\"newCodeRatings\"].get(metric))\n \n def printMetric(self, feedback, metric):\n print(\"\")\n print(metric.replace(\"_PROP\", \"\").title().replace(\"_\", \" \"))\n \n refactoringCandidates = self.getRefactoringCandidates(feedback, metric)\n if len(refactoringCandidates) == 0:\n print(\" None\")\n else:\n for rc in refactoringCandidates:\n print(self.formatRefactoringCandidate(rc))\n \n def formatRefactoringCandidate(self, rc):\n category = (\"(\" + rc[\"category\"] + \")\").ljust(14)\n subject = rc[\"subject\"].replace(\"\\n\", \"\\n\" + (\" \" * 21)).replace(\"::\", \"\\n\" + (\" \" * 21))\n return f\" - {category} {subject}\"\n \n def printRatingColor(self, message, rating):\n ansiCodes = {\n self.ANSI_GREEN : rating != None and rating >= 3.5,\n self.ANSI_YELLOW : rating != None and rating >= 2.5 and rating < 3.5,\n self.ANSI_RED : rating != None and rating >= 0.0 and rating < 2.5,\n self.ANSI_BLUE : rating == None\n }\n\n prefix = \"\".join([code for code in ansiCodes if ansiCodes[code]])\n self.printColor(message, prefix)\n\n def printColor(self, message, ansiPrefix):\n print(ansiPrefix + message + \"\\033[0m\")\n \n \nclass StaticHtmlReport(Report):\n HTML_STAR_FULL = \"★\"\n HTML_STAR_EMPTY = \"☆\"\n\n def generate(self, feedback, args):\n if not os.path.exists(\"sigrid-ci-output\"):\n os.mkdir(\"sigrid-ci-output\")\n \n with open(os.path.dirname(__file__) + \"/sigridci-feedback-template.html\", encoding=\"utf-8\", mode=\"r\") as templateRef:\n template = templateRef.read()\n template = self.renderHtmlFeedback(template, feedback, args)\n\n reportFile = os.path.abspath(\"sigrid-ci-output/index.html\")\n writer = open(reportFile, encoding=\"utf-8\", mode=\"w\")\n writer.write(template)\n writer.close()\n \n print(\"\")\n print(\"You can find the full results here:\")\n print(reportFile)\n print(\"\")\n print(\"You can find more information about these results in Sigrid:\")\n print(self.getSigridUrl(args))\n print(\"\")\n \n def renderHtmlFeedback(self, template, feedback, args):\n template = template.replace(\"@@@CUSTOMER\", args.customer)\n template = template.replace(\"@@@SYSTEM\", args.system)\n template = template.replace(\"@@@TARGET\", \"%.1f\" % args.targetquality)\n template = template.replace(\"@@@LINES_OF_CODE_TOUCHED\", \"%d\" % feedback.get(\"newCodeLinesOfCode\", 0))\n template = template.replace(\"@@@BASELINE_DATE\", self.formatBaselineDate(feedback))\n template = template.replace(\"@@@SIGRID_LINK\", self.getSigridUrl(args))\n for metric in self.METRICS:\n template = template.replace(f\"@@@{metric}_OVERALL\", self.formatRating(feedback[\"overallRatings\"], metric))\n template = template.replace(f\"@@@{metric}_NEW\", self.formatRating(feedback[\"newCodeRatings\"], metric))\n template = template.replace(f\"@@@{metric}_STARS_OVERALL\", self.formatHtmlStars(feedback[\"overallRatings\"], metric))\n template = template.replace(f\"@@@{metric}_STARS_NEW\", self.formatHtmlStars(feedback[\"newCodeRatings\"], metric))\n passed = self.isPassed(feedback, metric, args.targetquality)\n template = template.replace(f\"@@@{metric}_PASSED\", \"passed\" if passed else \"failed\")\n template = template.replace(f\"@@@{metric}_REFACTORING_CANDIDATES\", self.formatRefactoringCandidates(feedback, metric))\n return template\n \n def formatRefactoringCandidates(self, feedback, metric):\n refactoringCandidates = self.getRefactoringCandidates(feedback, metric)\n if len(refactoringCandidates) == 0:\n return \"None\"\n return \"\\n\".join([self.formatRefactoringCandidate(rc) for rc in refactoringCandidates])\n \n def formatRefactoringCandidate(self, rc):\n subjectName = html.escape(rc[\"subject\"]).replace(\"\\n\", \"
\").replace(\"::\", \"
\")\n category = html.escape(rc[\"category\"])\n return f\"({category})
{subjectName}
\"\n \n def formatHtmlStars(self, ratings, metric):\n if ratings.get(metric, None) == None:\n return \"N/A\"\n stars = min(int(ratings[metric] + 0.5), 5)\n fullStars = stars * self.HTML_STAR_FULL\n emptyStars = (5 - stars) * self.HTML_STAR_EMPTY\n rating = self.formatRating(ratings, metric)\n return f\"{fullStars}{emptyStars}   \" + rating\n \n \nclass ExitCodeReport(Report): \n def generate(self, feedback, args):\n asciiArt = TextReport()\n if self.isPassed(feedback, \"MAINTAINABILITY\", args.targetquality):\n asciiArt.printColor(\"\\n** SIGRID CI RUN COMPLETE: YOU WROTE MAINTAINABLE CODE AND REACHED THE TARGET **\\n\", \\\n asciiArt.ANSI_BOLD + asciiArt.ANSI_GREEN)\n else:\n asciiArt.printColor(\"\\n** SIGRID CI RUN COMPLETE: THE CODE YOU WROTE DID NOT MEET THE TARGET FOR MAINTAINABLE CODE **\\n\", \\\n asciiArt.ANSI_BOLD + asciiArt.ANSI_YELLOW)\n # Only break the build when not publishing to Sigrid,\n # i.e. when running on a branch or pull request.\n if not args.publish:\n sys.exit(1)\n \n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--partner\", type=str, default=\"sig\")\n parser.add_argument(\"--customer\", type=str)\n parser.add_argument(\"--system\", type=str)\n parser.add_argument(\"--source\", type=str)\n parser.add_argument(\"--targetquality\", type=float, default=3.5)\n parser.add_argument(\"--publish\", action=\"store_true\")\n parser.add_argument(\"--publishonly\", action=\"store_true\")\n parser.add_argument(\"--exclude\", type=str, default=\"\")\n parser.add_argument(\"--pathprefix\", type=str, default=\"\")\n parser.add_argument(\"--showupload\", action=\"store_true\")\n parser.add_argument(\"--history\", action=\"store_true\")\n parser.add_argument(\"--sigridurl\", type=str, default=\"https://sigrid-says.com\")\n args = parser.parse_args()\n \n if args.customer == None or args.system == None or args.source == None:\n parser.print_help()\n sys.exit(1)\n \n if sys.version_info.major == 2 or sys.version_info.minor < 7:\n print(\"Sigrid CI requires Python 3.7 or higher\")\n sys.exit(1)\n \n if not \"SIGRID_CI_ACCOUNT\" in os.environ or not \"SIGRID_CI_TOKEN\" in os.environ:\n print(\"Sigrid account not found in environment variables SIGRID_CI_ACCOUNT and SIGRID_CI_TOKEN\")\n sys.exit(1)\n \n if not os.path.exists(args.source):\n print(\"Source code directory not found: \" + args.source)\n sys.exit(1)\n \n if args.publish and len(args.pathprefix) > 0:\n print(\"You cannot use both --publish and --pathprefix at the same time, refer to the documentation for details\")\n sys.exit(1)\n \n log(\"Starting Sigrid CI\")\n options = UploadOptions(args.source, args.exclude.split(\",\"), args.history, args.pathprefix, args.showupload)\n apiClient = SigridApiClient(args)\n analysisId = apiClient.submitUpload(options)\n \n if args.publishonly:\n log(\"Your project's source code has been published to Sigrid\")\n else:\n feedback = apiClient.fetchAnalysisResults(analysisId)\n \n for report in [TextReport(), StaticHtmlReport(), ExitCodeReport()]:\n report.generate(feedback, args)\n","sub_path":"sigridci/sigridci.py","file_name":"sigridci.py","file_ext":"py","file_size_in_byte":18452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"498554073","text":"# socket client\r\n\r\nimport socket\r\nip_port=('192.168.0.103',7786)\r\n\r\n# 封装协议(对象)\r\ns = socket.socket()\r\n\r\n# 向服务端建立连接\r\ns.connect(ip_port)\r\n\r\nwhile True:\r\n # 发送消息\r\n print('conected')\r\n send_data=input('>>: ').strip()\r\n## if len(send_data) == 0:continue # 如果发送消息为空,不去执行以下发送\r\n s.send(bytes(send_data,encoding='utf8'))\r\n if send_data == 'exit': break # 如果输入exit,则退出\r\n\r\n # 接收消息\r\n recv_data = s.recv(1024) #\r\n print(str(recv_data,encoding='utf8'))\r\n\r\n# 结束连接\r\ns.close()\r\n","sub_path":"client_original.py","file_name":"client_original.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"24961352","text":"#!/usr/bin/python\n# -*- Encoding: utf-8 -*-\n######################################################################\n# An application for viewing and annotating the gesture paths\n# recorded by the DovUniStroke with a \"ground truth key\".\n#\n# Dov Grobgeld \n# 2020-03-15 Sun\n######################################################################\n\nimport gi\ngi.require_version('GooCanvas', '2.0')\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, GooCanvas, Gdk, Pango\nimport pdb\nimport glob,os,json,shutil\n\ndef is_valid_name(keyname):\n return (keyname in 'abcdefghijklmnopqrstuvwxyz1234567890'\n or keyname.startswith('hebrew_'))\n\nclass MyWindow(Gtk.Window):\n def __init__(self, glyphdir):\n Gtk.Window.__init__(self, title=\"View glyph files\")\n self.glyphdir = glyphdir\n self.glyphfiles = sorted(glob.glob(os.path.join(glyphdir, '*.json')))\n self.current_glyph_index = 0\n self.set_focus()\n self.connect('key-press-event',\n self.on_key_press)\n\n self.set_default_size (1200, 1200);\n vbox = Gtk.Box(spacing=6,orientation=Gtk.Orientation.VERTICAL)\n self.add(vbox)\n\n hpaned = Gtk.Paned(orientation=Gtk.Orientation.VERTICAL)\n hpaned.set_position(600)\n vbox.pack_start(hpaned, True, True, 0)\n\n self.cnv = GooCanvas.Canvas()\n hpaned.add1(self.cnv)\n\n sw = Gtk.ScrolledWindow()\n hpaned.add2(sw)\n\n self.textview = Gtk.TextView()\n self.textbuffer = self.textview.get_buffer()\n sw.add(self.textview)\n\n # Create some tags for the text view\n self.tag_bold = self.textbuffer.create_tag('fat',\n weight=Pango.Weight.BOLD)\n\n hbox = Gtk.ButtonBox()\n hbox.set_layout(Gtk.ButtonBoxStyle.END)\n\n button_prev = Gtk.Button(label='←')\n button_prev.connect(\"clicked\", self.prev_glyph)\n hbox.pack_start(button_prev, True, True, 0)\n\n button_next = Gtk.Button(label='→')\n button_next.connect(\"clicked\", self.next_glyph)\n hbox.pack_start(button_next, True, True, 0)\n\n button_quit = Gtk.Button(label='Quit')\n button_quit.connect(\"clicked\", Gtk.main_quit)\n hbox.pack_start(button_quit, True, True, 0)\n\n vbox.pack_start(hbox, False, False, 0)\n\n self.path = None\n self.show_current_glyph()\n\n\n def create_points(self,points):\n cp = GooCanvas.CanvasPoints.new(len(points))\n for i,p in enumerate(points):\n cp.set_point(i,p[0],p[1])\n return cp\n \n def show_current_glyph(self):\n glyph_filename = self.glyphfiles[self.current_glyph_index]\n jj = json.load(open(glyph_filename))\n\n root = self.cnv.get_root_item()\n if self.path is not None:\n self.path.remove()\n\n self.path = GooCanvas.CanvasGroup(parent=root)\n points = [(v['x'],v['y']) for v in jj['gesture']]\n \n GooCanvas.CanvasPolyline(\n parent = self.path,\n points = self.create_points(points),\n line_width=2.0,\n stroke_color = 'green',\n close_path=False)\n \n GooCanvas.CanvasEllipse(\n parent = self.path,\n center_x = points[0][0],\n center_y = points[0][1],\n radius_x = 10,\n radius_y = 10,\n stroke_color='black',\n line_width=2,\n fill_color='brown')\n\n text = (''+os.path.basename(glyph_filename) + '\\n\\n'\n + 'Predictions\\n')\n for i,pp in enumerate(jj['predictions']):\n text += ' ' + pp['name'] + (': %.02f'%pp['score'])+'\\n'\n if i==5:\n break\n text += 'Modifier: ' + str(jj.get('mModifier','??'))+'\\n'\n text += 'GestureSet: ' + str(jj.get('mGestureSet','??'))+'\\n'\n if 'ground_truth' in jj:\n text += 'Ground truth: '+jj['ground_truth']+'\\n'\n self.textbuffer.set_text('')\n start_iter = self.textbuffer.get_start_iter()\n self.textbuffer.insert_markup(start_iter, text, -1)\n\n def next_glyph(self,button=None):\n self.current_glyph_index += 1\n if self.current_glyph_index >= len(self.glyphfiles):\n self.current_glyph_index = 0\n self.show_current_glyph()\n \n def prev_glyph(self,button=None):\n self.current_glyph_index -= 1\n if self.current_glyph_index < 0:\n self.current_glyph_index = len(self.glyphfiles)-1\n self.show_current_glyph()\n\n def update_glyph_name(self, glyph_name):\n with open(self.glyphfiles[self.current_glyph_index]) as fp:\n obj = json.load(fp)\n if glyph_name is None:\n del obj['ground_truth']\n else:\n obj['ground_truth'] = glyph_name\n shutil.copy(self.glyphfiles[self.current_glyph_index],\n self.glyphfiles[self.current_glyph_index]+'.bak')\n with open(self.glyphfiles[self.current_glyph_index],'w') as fp:\n json.dump(obj, fp, indent=2)\n self.show_current_glyph()\n\n def on_marker_clicked(self,item,target_item,event,user_data):\n print('i=', user_data)\n\n def on_marker_enter(self,item,target_item,event):\n item.set_property('fill_color','orange')\n\n def on_marker_leave(self,item,target_item,event):\n item.set_property('fill_color','brown')\n\n def on_key_press(self, widget, event):\n keyval_name = Gdk.keyval_name(event.keyval)\n if keyval_name == 'Left':\n self.prev_glyph()\n elif keyval_name == 'Right':\n self.next_glyph()\n elif keyval_name == 'g' and event.state & Gdk.ModifierType.CONTROL_MASK:\n self.glyph_name = None\n self.dialog = Gtk.Dialog('GetKeyname',self)\n entry_name = Gtk.Entry()\n entry_name.connect(\"key-press-event\", self.on_get_key_name_key_press, self)\n \n self.dialog.get_content_area().pack_start(entry_name, False, False, 10)\n entry_name.show()\n self.dialog.add_button('cancel', Gtk.ResponseType.CANCEL)\n self.dialog.add_button('OK', Gtk.ResponseType.ACCEPT)\n res = self.dialog.run()\n if self.glyph_name is not None or res == Gtk.ResponseType.ACCEPT:\n if self.glyph_name is None:\n self.glyph_name = entry_name.get_text()\n self.update_glyph_name(self.glyph_name)\n print('Ok', self.glyph_name)\n else:\n print('Cancel')\n self.dialog.destroy()\n elif is_valid_name(keyval_name):\n self.glyph_name = keyval_name\n self.update_glyph_name(self.glyph_name)\n self.next_glyph()\n elif keyval_name == 'BackSpace':\n self.glyph_name = '?'\n self.update_glyph_name(None)\n else:\n print(keyval_name)\n\n return True\n\n \n def on_get_key_name_key_press(self, widget, event, undef):\n keyval_name = Gdk.keyval_name(event.keyval)\n if keyval_name == 'Return':\n self.glyph_name = widget.get_text()\n self.dialog.destroy()\n return 0\n\n \nif __name__ == '__main__':\n import sys\n\n argp = 1\n glyphdir = sys.argv[argp]\n\n win = MyWindow(glyphdir)\n win.connect(\"destroy\", Gtk.main_quit)\n win.show_all()\n Gtk.main()\n","sub_path":"scripts/view-glyph-files.py","file_name":"view-glyph-files.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"107798861","text":"# 100가지 이미지를 찾아서, 칼라(3)\n# CNN\n\nfrom keras.datasets import cifar100\nfrom keras.utils import np_utils\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, LSTM, Conv2D, Input\nfrom keras.layers import Flatten, MaxPooling2D, Dropout\nimport matplotlib.pyplot as plt\n\n(x_train, y_train),(x_test, y_test) = cifar100.load_data()\n\nprint(x_train[0])\nprint('y_train[0] : ', y_train[0])\n\nprint(x_train.shape) # (50000, 32, 32, 3)\nprint(x_test.shape) # (10000, 32, 32, 3)\nprint(y_train.shape) # (50000, 1)\nprint(y_test.shape) # (10000, 1)\n\nplt.imshow(x_train[0]) \n# plt.show()\n\n# 데이터 전처리 1. 원핫인코딩\n\nfrom keras.utils import np_utils\n\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\n\nprint(y_train.shape) # (50000, 100)\n\n# 데이터 전처리 2. 정규화 \n\nx_train = x_train.reshape(50000, 32, 32, 3).astype('float32') /255 \nx_test = x_test.reshape(10000, 32, 32, 3).astype('float32') /255\n\nprint(x_train.shape)\nprint(x_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n\n# 모델 구성 함수형\n\ninput1 = Input(shape=(32,32,3))\n\nconi_1 = Conv2D(10, (2, 2))(input1) \nconi_2 = Conv2D(15, (3, 3))(coni_1) \ndrop1 = Dropout(0.2)(coni_2)\n\nconi_3 = Conv2D(30, (3, 3))(drop1) \nconi_4 = Conv2D(40, (3, 3))(coni_3) \nconi_5 = Conv2D(50, (3, 3))(coni_4)\ndrop2 = Dropout(0.5)(coni_5)\n\nconi_6 = Conv2D(210, (3, 3))(drop2) \nconi_7 = Conv2D(310, (3, 3))(coni_6) \nconi_8 = Conv2D(410, (3, 3))(coni_7) \ndrop3 = Dropout(0.7)(coni_8)\n\nconi_9 = Conv2D(210, (3, 3))(drop3)\ndrop4 = Dropout(0.2)(coni_9)\n\nconi_10 = Conv2D(310, (3, 3))(drop4) \nconi_11 = Conv2D(110, (3, 3))(coni_10)\ndrop5 = Dropout(0.3)(coni_11)\n\nconi_12 = Conv2D(30, (3, 3))(drop5) \nconi_13 = Conv2D(20, (3, 3),padding = 'same')(coni_12) \ndrop6 = Dropout(0.5)(coni_13)\n\nconi_14 = Conv2D(10, (2, 2), padding = 'same')(drop6) \ndrop7 = Dropout(0.3)(coni_14)\n\n \nflatten = Flatten()(drop7) \n\nconi_15 = Dense(100, activation='softmax')(flatten) \n\nmodel = Model(inputs=input1, outputs = coni_15)\n\nmodel.summary()\n\n\n# 3. 컴파일, 훈련\n\n# model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n# loss = 'categorical_crossentropy' : 다중분류에서 사용 \n\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\nmodelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'\n\ncheckpoint = ModelCheckpoint(filepath = modelpath, monitor='val_loss',\n save_best_only=True, mode='auto')\n\nes = EarlyStopping(monitor = 'loss', patience = 20, mode= 'auto')\n\n\"\"\" Tensorboard \"\"\"\n\nfrom keras.callbacks import TensorBoard # Tensorboard 가져오기\n\ntb_hist = TensorBoard(log_dir='graph', histogram_freq= 0 , # log_dir=' 폴더 ' : 제일 많이 틀림\n write_graph= True, write_images= True) \n\nhist = model.fit(x_train, y_train, \n epochs=10, batch_size=256,\n validation_split=0.25, verbose=1,\n callbacks = [es, checkpoint, tb_hist]) \n # 콜백에는 리스트 형태 \n\n\n\n# 4. 평가\n\nloss_acc = model.evaluate(x_test, y_test, batch_size= 64)\n\nloss = hist.history['loss'] # model.fit 에서 나온 값\nval_loss = hist.history['val_loss']\nacc = hist.history['acc']\nval_acc = hist.history['val_acc']\n\nprint('acc: ', acc) \nprint('val_acc: ', val_acc)\nprint('loss_acc: ', loss_acc) \n \n\nimport matplotlib.pyplot as plt \n\nplt.figure(figsize = (10, 6)) # 10 x 6인치의 판이 생김\n\n\n# 1번 그림\n\nplt.subplot(2, 1, 1) # (2, 1, 1) 2행 1열의 그림 1번째꺼 / subplot : 2장 그림 \nplt.plot(hist.history['loss'], marker='.', c='red', label='loss') \nplt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss') \nplt.grid() # 격자 생성\nplt.title('loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\n# plt.legend(['loss','val_loss']) \nplt.legend(loc='upper right') \n\n\n# 2번 그림\n\nplt.subplot(2, 1, 2) # (2, 1, 2) 2행 1열의 그림 2번째꺼 \nplt.plot(hist.history['acc']) \nplt.plot(hist.history['val_acc']) \nplt.grid() # 격자 생성\nplt.title('accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['acc','val_acc'])\n\nplt.show() \n\n# acc: [0.017573332, 0.042826667, 0.06389333, 0.07848, 0.09029333, 0.09370667, 0.09765334, 0.10541333, 0.10808, 0.10970667]\n# val_acc: [0.035440001636743546, 0.0684799998998642, 0.0851999968290329, 0.10328000038862228, 0.10599999874830246, 0.09415999799966812, 0.10927999764680862, 0.11048000305891037, 0.1228799968957901, 0.12600000202655792]\n# loss_acc: [3.875994721221924, 0.13040000200271606]","sub_path":"keras/keras70_cifar100_cnn.py","file_name":"keras70_cifar100_cnn.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"433596676","text":"#!/usr/bin/env python\nimport asyncio\nimport zmq\nimport aiozmq\nimport pickle\nfrom urllib.parse import urlencode\nimport logging\nlogger = logging.getLogger(\"chatkit:\" + __name__)\n\n\nclass ZMQServer:\n\n def __init__(self, router, host='127.0.0.1', port='8999'):\n logger.info(\"ZMQ server starting on \" + host + \":\" + str(port))\n self.router = router\n self.host = host\n self.port = port\n\n @asyncio.coroutine\n def start(self, loop):\n router_closed = asyncio.Future()\n route_manager = self.router\n\n self.server, _ = yield from aiozmq.create_zmq_connection(\n lambda: ZmqRouterProtocol(router_closed, loop, route_manager),\n zmq.ROUTER,\n bind='tcp://' + self.host + ':' + self.port)\n return self.server\n\n\nclass ZmqRouterProtocol(aiozmq.ZmqProtocol):\n\n transport = None\n\n def __init__(self, on_close, loop, route_manager):\n self.loop = asyncio.new_event_loop()\n self.route_manager = route_manager\n self.on_close = on_close\n\n def connection_made(self, transport):\n self.transport = transport\n\n def msg_received(self, msg):\n logger.info(\"ZMQ message recieved: \" + msg) \n\n def connection_lost(self, exc):\n self.on_close.set_result(exc)\n","sub_path":"zmqserver.py","file_name":"zmqserver.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"109797321","text":"# Extract the top and bottom so many images\n# Transform to viewable format\n\nfrom mlworkflow import Operator, Data\nfrom easydict import EasyDict as edict\n\nimport scipy.io as sio #use sio.loadmat and sio.savemat\nimport numpy as np\nimport argparse\nimport os.path as osp\nimport sys\n\nclass ZipSamples(Operator):\n def __init__(self, config, args):\n super(ZipSamples,self).__init__(config, args)\n opt = {\n 'zipkey':'samples',\n 'collapseInterval':1,\n 'collapseTimeout':300,\n 'take':25\n }\n opt.update(args)\n self.opt = edict(opt)\n self.analysisData = []\n\n def run(self):\n # Collapse into one process\n nprocs = self.getNumProcs()\n self.log(\"Collapsing to process 0\")\n self.collapse(pid=0,timeout=self.opt.collapseTimeout,interval=self.opt.collapseInterval)\n self.log(\"In Process 0 (this should not be seen by other processes)\")\n\n # Do nothing if already have sampled\n if self.checkExists(self.opt.zipkey, threadSpecific=False):\n return\n\n opt = self.opt\n\n self.log(\"Loading mats\")\n loadedMats = []\n for i in range(nprocs):\n loadedMats.append(self.files.load(self.opt.zipkey, instance=i, loader='mat'))\n\n# keys = [k for k in loadedMats[0]]\n\n# allSamples = {k: np.concatenate([loadedMats[i][k] for i in range(nprocs)]) for k in keys if not k.startswith('__')}\n\n allSamples = {\n 'images': np.concatenate([loadedMats[i]['images'] for i in range(nprocs)]),\n 'jacob': np.concatenate([loadedMats[i]['jacob'] for i in range(nprocs)]),\n 'code': np.concatenate([loadedMats[i]['code'] for i in range(nprocs)]),\n 'prob': np.concatenate([loadedMats[i]['prob'].flatten() for i in range(nprocs)])\n }\n\n if 'feats' in loadedMats[0]:\n allSamples['feats'] = np.concatenate([loadedMats[i]['feats'] for i in range(nprocs)])\n\n self.log(\"Saving %s\"%self.opt.zipkey)\n self.files.save(allSamples,self.opt.zipkey,saver='mat',threadSpecific=False)\n\n #allSamples = sio.loadmat(opt.matfile)\n\n # images, code, jacob, prob\n\n # Need to transform images \n #sortedInds = np.argsort(allSamples['prob'].reshape((np.size(allSamples['prob']))))\n sortedInds = np.argsort(allSamples['prob'])\n topN = sortedInds[-opt.take:]\n bottomN = sortedInds[:opt.take]\n\n imTop = allSamples['images'][topN]\n probTop = allSamples['prob'][topN]\n jacobTop = allSamples['jacob'][topN]\n codeTop = allSamples['code'][topN]\n # imTop = (imTop+1.0)*127.5\n # imTop = imTop.astype(np.uint8)\n imTop = imTop*0.5+0.5 #Transform changes depending on dataset\n # Later make the transform flexible\n# imTop = np.transpose(imTop,(2,3,1,0)) # Transpose to RGB format\n imTop = np.transpose(imTop,(0,2,3,1))\n # put number of samples in last dimension because\n # that's hows matlab needs it for imshow\n\n self.analysisData.append(Data({'images':imTop},'imageArray','topIms'))\n\n imBot = allSamples['images'][bottomN]\n probBot = allSamples['prob'][bottomN]\n jacobBot = allSamples['jacob'][bottomN]\n codeBot = allSamples['code'][bottomN]\n# imBot = (imBot+1.0)*127.5\n# imBot = imBot.astype(np.uint8)\n# imBot = np.transpose(imBot,(2,3,1,0))\n imBot = imBot*0.5+0.5\n imBot = np.transpose(imBot,(0,2,3,1))\n\n self.analysisData.append(Data({'images':imBot},'imageArray','botIms')) \n\n# sio.savemat(osp.join(opt.matfolder,'topBottom%d.mat'%opt.take), \n# {\n# 'top':{\n# 'images':imTop,\n# 'jacob':jacobTop,\n# 'code':codeTop,\n# 'prob':probTop \n# },\n# 'bottom':{\n# 'images':imBot,\n# 'jacob':jacobBot,\n# 'code':codeBot,\n# 'prob':probBot \n# }\n# }\n# )\n def getAnalysisData(self):\n return self.analysisData\n\n","sub_path":"code/total/experiments/ZipSamples.py","file_name":"ZipSamples.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"91542324","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 15 16:39:38 2017\n\n@author: Cong Liu\n\n Software License Agreement (BSD License)\n\n Copyright (c) 2017, Han's Robot Co., Ltd.\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n * Neither the name of the copyright holders nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n# author: Cong Liu\nimport rospy\nfrom std_msgs.msg import Float64\nfrom cyton_msgs.msg import Float64Array\nfrom dynamixel_controllers.srv import TorqueEnable, TorqueEnableRequest\nfrom dynamixel_controllers.srv import SetSpeed, SetSpeedRequest, SetSpeedResponse\nfrom std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest\n\nclass RobotArmDynManager(object):\n def __init__(self, arm_name=''):\n self.controller_names=rospy.get_param(arm_name+'_controller_names', [])\n rospy.Service(arm_name+'_torque_enable', SetBool, self.torque_enable_cb)\n rospy.Service(arm_name+'_go_home', SetBool, self.go_home_cb)\n rospy.Service(arm_name+'_set_speed', SetSpeed, self.set_speed_cb)\n self.pubs=[]\n for i in xrange(len(self.controller_names)):\n pub_tmp=rospy.Publisher(self.controller_names[i]+'_controller/command',\n Float64, queue_size=1)\n self.pubs.append(pub_tmp)\n self.joint_command_sub=rospy.Subscriber(arm_name+'_command_joint',\n Float64Array,\n self.joint_command_cb)\n def torque_enable_cb(self, req):\n resp=SetBoolResponse()\n torque_require=TorqueEnableRequest()\n torque_require.torque_enable=req.data\n for i in xrange(len(self.controller_names)):\n cl_tmp=rospy.ServiceProxy(self.controller_names[i]+'_controller/torque_enable',\n TorqueEnable)\n cl_tmp.call(torque_require)\n rospy.sleep(0.1)\n resp.success=True\n return resp\n \n def go_home_cb(self, req):\n resp=SetBoolResponse()\n torque_require=SetBoolRequest()\n torque_require.data=True\n torque_response=self.torque_enable_cb(torque_require)\n if torque_response.success:\n home_pose=Float64Array()\n home_pose.data=[0]*len(self.controller_names)\n self.joint_command_cb(home_pose)\n resp.success=True\n return resp\n else:\n resp.success=False\n return resp\n \n def set_speed_cb(self, req):\n resp=SetSpeedResponse()\n for i in xrange(len(self.controller_names)):\n cl_tmp=rospy.ServiceProxy(self.controller_names[i]+'_controller/set_speed',\n SetSpeed)\n cl_tmp.call(req)\n rospy.sleep(0.1)\n return resp\n \n def joint_command_cb(self, data):\n if len(data.data)!=len(self.controller_names):\n rospy.logerr('length of the command_joint is wrong')\n return\n speed_require=SetSpeedRequest()\n speed_require.speed=0.3\n self.set_speed_cb(speed_require)\n for i in xrange(len(self.controller_names)):\n self.pubs[i].publish(data.data[i])\n\nif __name__ == '__main__':\n rospy.init_node('robot_arm_dynamixel_manager', anonymous=True)\n arm_names=rospy.get_param('robot_arm_names', [])\n managers=[None]*len(arm_names)\n for i in xrange(len(arm_names)):\n managers[i]=RobotArmDynManager(arm_names[i])\n rospy.spin()\n","sub_path":"cyton_bringup/script/robot_arm_dynamixel_manager.py","file_name":"robot_arm_dynamixel_manager.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"245623353","text":"from .modelinterface import RecipesList, CuisineList\n\n\nclass RecipeFinder:\n def __init__(self, request):\n self.recipes_all = RecipesList()\n self.recipes_filtered = []\n self.query = SearchQuery(request)\n\n def searchRecipe(self):\n\n filter = CombinedFilter(\n [TextFilter(self.query.text), CuisineFilter(self.query.cuisines), CourseFilter(self.query.courses), LabelFilter(self.query.labels)])\n\n for recipe in self.recipes_all.entries:\n if(filter.filter(recipe)):\n self.recipes_filtered.append(recipe)\n\n def getResult(self):\n return self.recipes_filtered\n\n\nclass SearchQuery:\n def __init__(self, request):\n self.text = request.POST['text']\n self.courses = request.POST.getlist('courses')\n self.cuisines = request.POST.getlist('cuisines')\n self.labels = request.POST.getlist('labels')\n\n\nclass TextFilter:\n def __init__(self, filter_text):\n self.filter_text = filter_text\n\n def filter(self, recipe):\n filter_boolean = False\n\n if(self.filter_text == \"\"):\n filter_boolean = True\n\n # Search on title\n elif(self.filter_text.lower() in recipe.name.lower()):\n filter_boolean = True\n\n # Search on ingredient\n for entry in recipe.ingredientlist:\n if(self.filter_text.lower() in entry.ingredient.name.lower()):\n filter_boolean = True\n\n return filter_boolean\n\n\nclass CuisineFilter:\n def __init__(self, filter_cuisines):\n self.filter_cuisines = filter_cuisines\n self.cuisine_list = CuisineList()\n\n def filter(self, recipe):\n filter_boolean = False\n\n if(self.filter_cuisines == []):\n filter_boolean = True\n\n else:\n for cuisine in self.filter_cuisines:\n if(self.cuisine_list.get_initials(cuisine) == recipe.cuisine):\n filter_boolean = True\n\n return filter_boolean\n\n\nclass CourseFilter:\n def __init__(self, filter_courses):\n self.filter_courses = filter_courses\n\n def filter(self, recipe):\n filter_boolean = False\n\n if(self.filter_courses == []):\n filter_boolean = True\n\n else:\n for course in self.filter_courses:\n if(course == recipe.course):\n filter_boolean = True\n\n return filter_boolean\n\n\nclass LabelFilter:\n def __init__(self, filter_labels):\n self.filter_labels = filter_labels\n\n def filter(self, recipe):\n filter_boolean = False\n\n if(self.filter_labels == []):\n filter_boolean = True\n\n else:\n for label in self.filter_labels:\n for recipe_label in recipe.labels:\n if(label == recipe_label.name):\n filter_boolean = True\n\n return filter_boolean\n\n\nclass CombinedFilter:\n def __init__(self, filter_array):\n self.filters = filter_array\n\n def filter(self, recipe):\n filter_boolean = True\n\n for filter in self.filters:\n filter_boolean = filter_boolean & filter.filter(recipe)\n\n return filter_boolean\n","sub_path":"apps/cookbook/recipefinder.py","file_name":"recipefinder.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"361746514","text":"import discord, json, datetime\nfrom discord.ext import commands\nfrom discord.ext import tasks\n\nfrom cogs.Moderation import Moderation\nfrom cogs.helpers._taskValidator import Validator\n\nclass Tasks(commands.Cog):\n\n def __init__(self, client, config):\n self.client = client\n self.config = config\n\n @commands.Cog.listener()\n async def on_ready(self):\n # If there are no registered tasks there is no reason for the scheduler to run\n # Start this in here rather than in init function because init function runs before file read\n # So registeredTasks would always be 0\n if len(self.config.registeredTasks) > 0:\n self.scheduler.start()\n\n @staticmethod\n def isNow(cronStamp):\n now = datetime.datetime.now()\n cronStamp = cronStamp.split()\n \n # Minutes\n if cronStamp[0] != \"*\":\n nowMins = int(now.strftime(\"%M\"))\n if int(cronStamp[0]) != nowMins:\n return False\n # Hour\n if cronStamp[1] != \"*\":\n nowHours = int(now.strftime(\"%H\"))\n if int(cronStamp[1]) != nowHours:\n return False\n\n # Date\n if cronStamp[2] != \"*\":\n nowDate = int(now.strftime(\"%d\"))\n if int(cronStamp[2]) != nowDate:\n return False\n\n # Month\n if cronStamp[3] != \"*\":\n nowMonth = int(now.strftime(\"%m\"))\n if int(cronStamp[3]) != nowMonth:\n return False\n\n # Day\n if cronStamp[4] != \"*\":\n nowDay = int(now.strftime(\"%w\"))\n if int(cronStamp[4]) != nowDay:\n return False\n\n return True\n \n @tasks.loop(minutes=1.0)\n async def scheduler(self):\n # Reads file in the same format as crontab\n # Minute Hour Date Month Day\n for filename in self.config.registeredTasks:\n valid, n = Validator.validate(filename)\n if not valid:\n continue\n f = open(filename)\n data = json.load(f)\n f.close()\n tasks = data[\"tasks\"]\n\n guild = None\n for i in self.client.guilds:\n if i.name == data[\"serverName\"]:\n guild = i\n if guild == None:\n return\n\n for task in tasks:\n start = task[0]\n command = task[1]\n channelName = task[2]\n preposition = task[3]\n end = task[4]\n if command == \"lock\":\n if self.isNow(start):\n # Get log channel\n logChannel = discord.utils.get(self.client.get_all_channels(), guild__name=guild.name, name=self.config.logChannelName)\n # Send lock message\n message = await logChannel.send(\"Locking channel \" + channelName + \"...\")\n # Save returned message\n await Moderation.lock(self, message, channelName, True)\n if preposition == \"until\" and self.isNow(end):\n if channelName in list(self.config.lockedChannels.values()):\n messageID = None\n for i in self.config.lockedChannels:\n if self.config.lockedChannels[i] == channelName:\n messageID = i\n logChannel = discord.utils.get(self.client.get_all_channels(), guild__name=guild.name, name=self.config.logChannelName)\n message = await logChannel.fetch_message(int(messageID))\n # Call the unlock function on the channel which will delete the message\n await Moderation.unlock(self, message, channelName)\n\n @commands.command(\"checktask\")\n async def checktask(self, msg, *args):\n if not self.config.checkPerms(msg): # Check the user has a role in trustedRoles\n await msg.channel.send(self.config.permsError)\n return\n if len(args) == 0:\n await msg.channel.send(\"No file specified\")\n return\n\n filename = args[0]\n if not filename.endswith(\".json\"):\n filename += \".json\"\n\n valid, response = Validator.validate(filename)\n await msg.channel.send(response)\n\n @commands.command(\"addtask\")\n async def addtask(self, msg, *args):\n if not self.config.checkPerms(msg): # Check the user has a role in trustedRoles\n await msg.channel.send(self.config.permsError)\n return\n if len(args) == 0:\n await msg.channel.send(\"No file specified\")\n return\n \n filename = args[0]\n if not filename.endswith(\".json\"):\n filename += \".json\"\n \n valid, response = Validator.validate(filename)\n if valid:\n self.config.registeredTasks.append(filename)\n f = open(\"tasks.dat\", \"w\")\n json.dump({\"registeredTasks\":self.config.registeredTasks}, f)\n f.close()\n await msg.channel.send(\"Task file \" + filename + \" registered successfully\")\n if not self.scheduler.is_running():\n self.scheduler.start()\n else:\n await msg.channel.send(\"Invalid filename \" + args[0])\n\n @commands.command(\"remtask\")\n async def remtask(self, msg, *args):\n if not self.config.checkPerms(msg): # Check the user has a role in trustedRoles\n await msg.channel.send(self.config.permsError)\n return\n if len(args) == 0:\n await msg.channel.send(\"No file specified\")\n return\n\n filename = args[0]\n if not filename.endswith(\".json\"):\n filename += \".json\" \n\n if filename in self.config.registeredTasks:\n self.config.registeredTasks.remove(filename)\n f = open(self.config.tasksFilepath, \"w\")\n json.dump({\"registeredTasks\":self.config.registeredTasks}, f)\n f.close()\n await msg.channel.send(\"Task file \" + filename + \" unregistered successfully\")\n if len(self.config.registeredTasks) == 0:\n self.scheduler.stop()\n else:\n await msg.channel.send(\"Task file \" + filename + \" is not currently registered\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n","sub_path":"cogs/Tasks.py","file_name":"Tasks.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"93525210","text":"class Solution:\n def fizzBuzz(self, n: int):\n ret_val = []\n for i in range(1, n+1):\n s = \"\"\n three_div = (i % 3 == 0)\n five_div = (i % 5 == 0)\n if three_div:\n s += \"Fizz\"\n if five_div:\n s += \"Buzz\"\n if not three_div and not five_div:\n s += str(i)\n ret_val.append(s)\n return ret_val\n\nif __name__ == \"__main__\":\n\n s = Solution()\n print(s.fizzBuzz(1))\n\n","sub_path":"p412.py","file_name":"p412.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"31498861","text":"######################################################################################\n## CCIRCUIT DEBUG IMPROVER ##\n## Author: Michele Marzulli (emimarz) ##\n## Date: 30/09/2014 ##\n## Version: 1.0 ##\n######################################################################################\nimport gdb\nimport gdb.types\nimport gdb.printing\n\n\n\n#Iterator for SetOfPrinter (list walker)\nclass ListWalker(object):\n \"\"\"For list embedded in setOfType objects\"\"\"\n\n def __init__(self,list,setOfValue):\n self.list = list\n self.elemCount = self.list['iElemCount']\n self.headElemPtr = self.list['pHead']\n if self.headElemPtr.type.code != gdb.TYPE_CODE_PTR:\n raise TypeError(\"Error in head element type of the list\")\n self.currElem = self.headElemPtr.dereference() #currElem stores list element\n self.iterCount = 0\n self.getElemType(setOfValue)\n\n def getElemType(self,setOfValue):\n #Useful to fetch basic member type: \n #But gdb.parse_and_eval works only on c++ const methods (like \"This\")\n getThisMethod = gdb.parse_and_eval(setOfValue.type.tag + \"::\" + \"This\")\n if getThisMethod.type.code != gdb.TYPE_CODE_FUNC:\n raise TypeError(\"Error in GetFirst method type\")\n self.elemType = getThisMethod.type.target().target()\n\n def __iter__(self):\n return self\n\n def next(self):\n while (self.iterCount < self.elemCount):\n if self.currElem['pData'].type.code != gdb.TYPE_CODE_PTR : \n raise TypeError(\"Error in element type of the list\")\n retElem = self.currElem['pData'].dereference().cast(self.elemType)\n self.iterCount += 1\n self.currElem = self.currElem['pNext'].dereference()\n return retElem\n raise StopIteration\n","sub_path":"CCPrettyPrinters/composite/setoftypeIterator.py","file_name":"setoftypeIterator.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"595008665","text":"# <~>\n# Employs custom version of pip (awwad/pip:develop) to harvest dependencies and find dependency conflicts for packages in PyPI.\n# See README.md!\n\nimport sys # for arguments and exceptions\nimport pip\nimport os\nimport json\n#import ipdb\nfrom distutils.version import StrictVersion, LooseVersion # for use in version parsing\n\n# Local resources\nBANDERSNATCH_MIRROR_DIR = '/srv/pypi/web/packages/source/'\nLOCATION_OF_LOCAL_INDEX_SIMPLE_LISTING = 'file:///srv/pypi/web/simple'\nWORKING_DIRECTORY = os.getcwd() #'/Users/s/w/git/pypi-depresolve' in my setup\nDEPENDENCY_CONFLICTS1_DB_FILENAME = os.path.join(WORKING_DIRECTORY, \"conflicts_1_db.json\") # db for model 1 conflicts\nDEPENDENCY_CONFLICTS2_DB_FILENAME = os.path.join(WORKING_DIRECTORY, \"conflicts_2_db.json\") # db for model 2 conflicts\nDEPENDENCY_CONFLICTS3_DB_FILENAME = os.path.join(WORKING_DIRECTORY, \"conflicts_3_db.json\") # db for model 3 conflicts\nBLACKLIST_DB_FILENAME = os.path.join(WORKING_DIRECTORY, \"blacklist_db.json\")\nDEPENDENCIES_DB_FILENAME = os.path.join(WORKING_DIRECTORY, \"dependencies_db.json\")\nTEMPDIR_FOR_DOWNLOADED_DISTROS = os.path.join(WORKING_DIRECTORY, 'temp_distros') # May not want this in same place as working directory. Would be terrible to duplicate. One such sdist cache per system! Gets big.\n# If temp / output files are added, please ensure that the directories they're in are also added to this list:\nLIST_OF_OUTPUT_FILE_DIRS = [TEMPDIR_FOR_DOWNLOADED_DISTROS, os.path.dirname(BLACKLIST_DB_FILENAME), os.path.dirname(DEPENDENCY_CONFLICTS3_DB_FILENAME), os.path.dirname(DEPENDENCY_CONFLICTS2_DB_FILENAME), os.path.dirname(DEPENDENCY_CONFLICTS1_DB_FILENAME), os.path.dirname(DEPENDENCIES_DB_FILENAME)]\n\n# Other Assumptions\nSDIST_FILE_EXTENSION = '.tar.gz' # assume the archived packages bandersnatch grabs end in this\nDISABLE_PIP_VERSION_CHECK = '--disable-pip-version-check' # argument to pass to pip to tell it not to prod users about our strange pip version (lest they follow that instruction and install a standard pip version)\n\n# Ensure that appropriate directories for working files / output files exist.\n# Becomes relevant whenever those are placed elsewhere.\nassert(os.path.exists(WORKING_DIRECTORY))\nfor dirname in LIST_OF_OUTPUT_FILE_DIRS:\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n print(\"Directory check: \" + dirname + \" does not exist. Making it.\")\n\n# Argument handling:\n# DEPENDENCY CONFLICT MODELS (see README)\n# --cm1 run using conflict model 1 (all resolvable and unresolvable conflicts; see README)\n# --cm2 run using conflict model 2 (all unresolvable and some resolvable conflicts; see README)\n# --cm3 run using conflict model 3 (default; basically \"would pip get this right?\"; see README)\n#\n# GENERAL ARGUMENTS:\n# --noskip Don't skip packages in the blacklist or packages for which information on\n# whether or not a conflict occurs is already stored.\n#\n# REMOTE OPERATION: (DEFAULT!)\n# ANY ARGS NOT MATCHING the other patterns are interpreted as what I will refer to as 'distkeys':\n# packagename(packageversion)\n# e.g.: \"django(1.8)\"\n# Using one of these means we're downloading from PyPI, per pip's defaults.\n# Your shell will presumably want these arguments passed in quotes because of the parentheses.\n#\n#\n# LOCAL OPERATION: For use when operating with local sdist files (e.g. with a bandersnatched local PyPI mirror)\n# --local=FNAME specifies a local .tar.gz sdist to inspect for dependency conflicts with pip\n# for dependency conflicts\n# e.g. '--local=/srv/pypi/web/packages/source/M/motorengine/motorengine-0.7.4.tar.gz'\n# You can specify as many of these as you like with separate --local= arguments.\n# Local and remote execution are mutually exclusive.\n# --local Using this without \"=\" means we should alphabetically scan from the local PyPI mirror.\n# This is mutually exclusive with the --local= usage above. If files are specified, we only\n# check the files specified.\n# \n# --n=N For use only with --local (not remotes, not --local=).\n# Sets N as the max packages to inspect when pulling alphabetically from local PyPI mirror.\n# e.g. --n=1 or --n=10000\n# Default for --local runs, if this arg is not specified, is all packages in the entire local PyPI\n# mirror at /srv/pypi)\n# (TODO: Must confirm that using this arg won't impact remote operation, just for cleanliness.)\n#\n#\n#\n# EXAMPLE CALLS:\n#\n# ~~ Run on a single package (in this case, arnold version 0.3.0) pulled from remote PyPI,\n# using conflict model 3 (default):\n#\n# > python analyze_deps_via_pip.py \"arnold(0.3.0)\"\n#\n#\n# ~~ Run on a few packages from PyPI, using conflict model 2, and without skipping even if\n# conflict info on those packages is already available, or if they're in the blacklist for\n# having hit unexpected errors in previous runs:\n#\n# > python analyze_deps_via_pip.py \"motorengine(0.7.4)\" \"django(1.6.3)\" --cm2 --noskip\n#\n#\n# ~~ Run on a single specified package, motorengine 0.7.4, stored locally, using conflict model 2:\n# \n# > python analyze_deps_via_pip.py --cm2 --local=/srv/pypi/web/packages/source/M/motorengine/motorengine-0.7.4.tar.gz\n#\n# ~~ Run on the first 10 packages in the local pypi mirror (assumed /srv/pypi) alphabetically,\n# using conflict model 1.\n#\n# > python analyze_deps_via_pip.py --cm1 --local --n=10\n#\ndef main():\n DEBUG__N_SDISTS_TO_PROCESS = 0 # debug; max packages to explore during debug - overriden by --n=N argument.\n CONFLICT_MODEL = 2\n NO_SKIP = False\n USE_BANDERSNATCH_MIRROR = False\n\n print(\"analyze_deps_via_pip - Version 0.3\")\n list_of_sdists_to_inspect = [] # potentially filled with local sdist filenames, from arguments\n list_of_remotes_to_inspect = [] # potentially filled with remote packages to check, from arguments\n\n # Argument processing. If we have arguments coming in, treat those as the packages to inspect.\n if len(sys.argv) > 1:\n for arg in sys.argv[1:]:\n if arg.startswith(\"--n=\"):\n DEBUG__N_SDISTS_TO_PROCESS = int(arg[4:])\n elif arg == \"--cm1\":\n CONFLICT_MODEL = 1\n elif arg == \"--cm2\":\n CONFLICT_MODEL = 2\n elif arg == \"--cm3\":\n CONFLICT_MODEL = 3\n elif arg == \"--noskip\":\n NO_SKIP = True\n elif arg == \"--local\": # without ='' means we pull alphabetically from local PyPI mirror at /srv/pypi/\n USE_BANDERSNATCH_MIRROR = True\n elif arg.startswith(\"--local=\"):\n list_of_sdists_to_inspect.append(arg[8:]) # e.g. '--local=/srv/pypi/web/packages/source/M/motorengine/motorengine-0.7.4.tar.gz'\n USE_BANDERSNATCH_MIRROR = True\n else:\n list_of_remotes_to_inspect.append(arg) # e.g. 'motorengine(0.7.4)'\n USE_BANDERSNATCH_MIRROR = False # For simplicity right now, I'll use one mode or another, not both. Last arg has it if both.\n\n # If we were told to work with a local mirror, but weren't given specific sdists to inspect,\n # we'll scan everything in BANDERSNATCH_MIRROR_DIR until we have DEBUG__N_SDISTS_TO_PROCESS\n # sdists.\n if USE_BANDERSNATCH_MIRROR and not list_of_sdists_to_inspect:\n # Ensure that the local PyPI mirror directory exists first.\n if not os.path.exists(BANDERSNATCH_MIRROR_DIR):\n raise Exception('<~> Exception. Expecting a bandersnatched mirror of PyPI at ' + BANDERSNATCH_MIRROR_DIR + ' but that directory does not exist.')\n i = 0\n for dir, subdirs, files in os.walk(BANDERSNATCH_MIRROR_DIR):\n for fname in files:\n if is_sdist(fname):\n list_of_sdists_to_inspect.append(os.path.join(dir, fname))\n i += 1\n if i >= DEBUG__N_SDISTS_TO_PROCESS: # awkward control structure, but saving debug run time. tidy later.\n break\n if i >= DEBUG__N_SDISTS_TO_PROCESS: # awkward control structure, but saving debug run time. tidy later.\n break\n\n # Fetch info on already known conflicts so that we can skip packages below.\n conflicts_db_fname = None\n if CONFLICT_MODEL == 1:\n conflicts_db_fname = DEPENDENCY_CONFLICTS1_DB_FILENAME\n elif CONFLICT_MODEL == 2:\n conflicts_db_fname = DEPENDENCY_CONFLICTS2_DB_FILENAME\n else:\n assert(CONFLICT_MODEL == 3)\n conflicts_db_fname = DEPENDENCY_CONFLICTS3_DB_FILENAME\n\n conflicts_db = load_json_db(conflicts_db_fname)\n\n # For backward compatibility (before casing fixes for certain package names):\n # Determine a lower-cased set of the keys in the conflicts db.\n keys_in_conflicts_db_lower = set(k.lower() for k in conflicts_db)\n\n # Fetch info on packages in the blacklist.\n # These are runs that resulted in errors or runs that were manually added\n # because, for example, they hang seemingly forever or take an inordinate length of time.\n # Because this came abount after casing resolution addressed above,\n # I know there are no non-lower keys in it. TODO: Write integration/validation tests for casing.\n blacklist_db = load_json_db(BLACKLIST_DB_FILENAME)\n \n\n n_inspected = 0\n n_added_to_blacklist = 0\n\n # Generate a list of distkeys (e.g. 'django(1.8.3)') to inspect, from the lists of sdists and \"remotes\".\n distkeys_to_inspect = []\n if USE_BANDERSNATCH_MIRROR:\n for tarfilename_full in list_of_sdists_to_inspect:\n\n # Deduce package names and versions from sdist filename.\n packagename = get_package_name_given_full_filename(tarfilename_full)\n packagename_withversion = get_package_and_version_string_from_full_filename(tarfilename_full)\n deduced_version_string = packagename_withversion[len(packagename) + 1:]\n\n # Perform a variety of fixes to match pip's normalized package and version names,\n # which are what my code inside pip spits out to the dbs.\n deduced_version_string = normalize_version_string(deduced_version_string)\n distkey = packagename + \"(\" + deduced_version_string + \")\" # This is the format for dists in the conflict db.\n distkey = distkey.lower().replace('_', '-')\n \n distkeys_to_inspect.append(distkey)\n \n else: # if not using local bandersnatched PyPI mirror\n for distkey in list_of_remotes_to_inspect:\n assert '(' in distkey and distkey.endswith(')'), \"Invalid input.\"\n distkey = distkey.lower().replace('_', '-') # avoid casing issues and incorrect underscores\n distkeys_to_inspect.append(distkey)\n \n\n\n # Now take all of the distkeys ( e.g. 'python-twitter(0.2.1)' ) indicated and run on them.\n for distkey in distkeys_to_inspect:\n \n # Check to see if we already have conflict info for this package. If so, don't run for it.\n\n if not NO_SKIP:\n if distkey in keys_in_conflicts_db_lower:\n n_inspected += 1\n print(\"<~> SKIP -- Already have \" + distkey + \" in db of type\", str(CONFLICT_MODEL),\"conflicts. Skipping. (Now at \" + str(n_inspected) + \" out of \" + str(len(list_of_sdists_to_inspect)) + \")\")\n continue\n # Else if the dist is listed in the blacklist along with this python major version (2 or 3), skip.\n elif distkey in blacklist_db and sys.version_info.major in blacklist_db[distkey]:\n n_inspected += 1\n print(\"<~> SKIP -- Blacklist includes \" + distkey + \". Skipping. (Now at \" + str(n_inspected) + \" out of \"+str(len(list_of_sdists_to_inspect)) + \")\")\n continue\n\n print(distkey,\"not found in conflicts or blacklist dbs. Sending to pip.\\n\")\n\n # Else, process the dist.\n\n packagename = distkey[ : distkey.find('(') ]\n version_string = distkey[ distkey.find('(') + 1 : distkey.find(')')]\n assert(distkey.find(')') == len(distkey) - 1)\n formatted_requirement = packagename + \"==\" + version_string\n exitcode = None\n assert(CONFLICT_MODEL in [1, 2, 3])\n\n # Construct the argument list.\n pip_arglist = ['install', '-d', TEMPDIR_FOR_DOWNLOADED_DISTROS, DISABLE_PIP_VERSION_CHECK, '--find-dep-conflicts', str(CONFLICT_MODEL), '--conflicts-db-file', conflicts_db_fname, '--dependencies-db-file', DEPENDENCIES_DB_FILENAME]\n \n if USE_BANDERSNATCH_MIRROR:\n pip_arglist.extend(['-i', LOCATION_OF_LOCAL_INDEX_SIMPLE_LISTING])\n\n pip_arglist.append(formatted_requirement)\n\n # With arg list constructed, call pip.main with it to run a modified pip install attempt (will not install).\n # This assumes that we're dealing with my pip fork version 8.0.0.dev0seb).\n exitcode = pip.main(pip_arglist)\n\n # Process the output of the pip command.\n if exitcode == 2:\n print(\"<~> X SDist\", distkey, \": pip errored out (code=\" + str(exitcode) + \"). Possible DEPENDENCY CONFLICT. Result recorded in conflicts_<...>_db.json and in conflicts_db.log. (Finished with \" + str(n_inspected) + \" out of \" + str(len(list_of_sdists_to_inspect)) + \")\")\n elif exitcode == 0:\n print(\"<~> . SDist\", distkey, \": pip completed successfully. No dependency conflicts observed. (Finished with \" + str(n_inspected) + \" out of \" + str(len(list_of_sdists_to_inspect)) + \")\")\n else:\n print(\"<~> . SDist\", distkey, \": pip errored out (code=\" + str(exitcode) + \"), but it seems to have been unrelated to any dep conflict.... (Finished with \" + str(n_inspected) + \" out of \" + str(len(list_of_sdists_to_inspect)) + \")\")\n # Store in the list of failing packages along with the python version we're running. (sys.version_info.major yields int 2 or 3)\n # Contents are to eventually be a list of the major versions in which it fails.\n # We should never get here if the dist is already in the blacklist for this version of python, but let's keep going even if so.\n if distkey in blacklist_db and sys.version_info.major in blacklist_db[distkey] and not NO_SKIP:\n print(\" WARNING! This should not happen!\", distkey, \"was already in the blacklist for python\",str(sys.version_info.major) + \", thus it should not have been run unless we have --noskip on (which it is not)!\")\n else: # Either the dist is not in the blacklist or it's not in the blacklist for this version of python. (Sensible)\n if distkey not in blacklist_db: # \n blacklist_db[distkey] = [sys.version_info.major]\n print(\" Added entry to blacklist for\", distkey)\n else:\n assert(NO_SKIP or sys.version_info.major not in blacklist_db[distkey])\n blacklist_db[distkey].append(sys.version_info.major)\n print(\" Added additional entry to blacklist for\", distkey)\n\n n_added_to_blacklist += 1\n # Occasionally write the blacklist to file so we don't lose tons of blacklist info if the script\n # has to be killed.\n if n_added_to_blacklist % 10 == 0:\n write_blacklist_to_file(blacklist_db)\n \n # end of exit code processing\n n_inspected += 1\n\n # end of for each tarfile/sdist\n\n # We're done with all packages. Write the collected blacklist back to file.\n write_blacklist_to_file(blacklist_db)\n\n\n# <~> Dump the blacklist json info to file.\ndef write_blacklist_to_file(blacklist_db):\n with open(BLACKLIST_DB_FILENAME, 'w') as fobj:\n json.dump(blacklist_db, fobj)\n \n\n# Given a full filename of an sdist (of the form /srv/.../packagename/packagename-1.0.0.tar.gz),\n# return package name and version (e.g. packagename-1.0.0)\n# Updating to use lower().\ndef get_package_and_version_string_from_full_filename(fname_full):\n # get position of last / in full filename\n i_of_last_slash = fname_full.rfind('/')\n # get position of .tar.gz in full filename\n i_of_targz = fname_full.rfind('.tar.gz')\n return fname_full[i_of_last_slash + 1 : i_of_targz].lower()\n\n\n\n# Given a .tar.gz in a bandersnatch mirror, determine the package name.\n# Bing's code sees fit to assume that the parent directory name is the package name.\n# I'll go with that assumption. (It breaks sometimes with dash/underscore switching,\n# but we fix that manually.)\n# Updating to use lower()\ndef get_package_name_given_full_filename(fname_full):\n return get_parent_dir_name_from_full_path(fname_full).lower()\n\n\n\n# Given a fully specified filename (i.e. including its path), extract name of parent directory (without full path).\ndef get_parent_dir_name_from_full_path(fname_full):\n # get position of last / in full filename\n i_of_last_slash = fname_full.rfind('/')\n # get position of 2nd to last / in full filename\n i_of_second_to_last_slash = fname_full[: i_of_last_slash].rfind('/')\n parent_dir = fname_full[i_of_second_to_last_slash + 1 : i_of_last_slash]\n\n return parent_dir\n\n\n\n# Returns true if the filename given is deemed that of an sdist file, false otherwise.\ndef is_sdist(fname):\n return fname.endswith(SDIST_FILE_EXTENSION)\n\n\n# Load given filename as a json file. If it's invalid or doesn't exist, load an empty dict.\n# Give the user a chance to control-c if file contents are invalid (not if file doesn't\n# exist) by prompting for enter.\ndef load_json_db(filename):\n # Fill with JSON data from file.\n db = None\n fobj = None\n try:\n fobj = open(filename,\"r\")\n db = json.load(fobj)\n except IOError:\n print(\" Directed to load\", filename, \", but UNABLE TO OPEN file. Loading an empty dict.\")\n db = dict()\n except (ValueError):\n fobj.close()\n print(\" Directed to load\", filename, \", but UNABLE TO PARSE JSON DATA from that file. Will load an empty dict.\")\n input(\" PRESS ENTER TO CONTINUE, CONTROL-C TO KILL AND AVOID POTENTIALLY OVERWRITING SALVAGEABLE DATA.\")\n db = dict() # If it was invalid or the file didn't exist, load empty.\n return db\n\n\n\n\n# Simulate most of the normalization of version strings that occurs in pip.\n#from pip._vendor.pkg_resources import safe_name, safe_version #These don't quite do what I need, alas. Pip is doing more than just this. Ugh.\n#distutils.version.StrictVersion might match what I'm getting from within pip....\n# Nope. It helps in one case (1.01 -> 1.1), but hurts in many others.\n # Perform a variety of fixes to match pip's normalized package and version names,\n # which are what my code inside pip spit out to the dbs.\n # So that our lookups work properly (and also to prevent continual reproduction\n # of work), we'll account for these.\n # There are a few normalizations that pip appears to do.\n # The data being logged by my code within pip is being fed package names and versions\n # normalized by some pip code, so we need to match our checks here to that\n # normalization (which is unfortunately not entirely contained in safe_name or\n # safe_version).\n # pip can be expected to do:\n # - underscores replaced by dashes\n # - version string normalization via distutils.version.StrictVersion,\n # which seems to match the information available to my code inside\n # pip that's detecting the errors.\n # Additionally, I'm going to work case-insensitive, and without assuming\n # that existing data is all lowercase.\n # Some dist filenames have \"_\" where the package name has \"-\".\n # Versioning is slightly stricter inside pip. distutils.version.StrictVersion\n # covers some of this normalization, but unfortunately not all of it. /:\n \n # Nevermind on the below: StrictVersion breaks other things, too. See daily notes (1.0.0 -> 1.0, unlike in pip)\n #try:\n # # Example: AnyFilter-0.01 is treated as AnyFilter-0.1 in the pip code.\n # # StrictVersion handles this category of correction for us.\n # deduced_version_string = str(StrictVersion(deduced_version_string))\n #except ValueError:\n # # If StrictVersion doesn't accept the string (e.g. if there's \"dev\" or \"beta\" in it, etc.), well,\n # # all we can do is some hackery for some cases in order to match what I see inside pip for now.\n # # Maybe I can find the rest of the normalization somewhere, but it has already consumed time.\n # # About 1% of my sample set has versions ending in \"dev\" that are then treated as \"dev0\" by pip.\n # if deduced_version_string.endswith('dev)'): # Example: acted.projects(0.10.dev) is treated as acted.projects(0.10.dev0)\n # deduced_version_string += \"0\"\n # elif '-beta' in deduced_version_string: # Example: 2.0-beta5 is reported as 2.0b5 in the case of archgenxml\n # deduced_version_string = deduced_version_string.replace('-beta','b')\ndef normalize_version_string(version):\n \n # Example: about(0.1.0-alpha.1) is reported as about(0.1.0a1) Dash removed, alpha to a, period after removed. \n \n # 'dev' should always be preceded by a '.', not a '-'\n if '-dev' in version:\n version = version.replace('-dev','.dev')\n elif '.dev' in version:\n pass\n elif 'dev' in version:\n version = version.replace('dev','.dev')\n\n # 'dev' should not be followed by a '-'.\n if 'dev-' in version: # Example: abl.util-0.1.5dev-20111031 is treated as abl.util(0.1.5.dev20111031), the dash removed and a '.' before dev.\n version = version.replace('dev-','dev')\n\n\n # Remove preceding - or . from beta or alpha.\n if '-beta' in version: # Example: 2.0-beta5 is reported as 2.0b5 in the case of archgenxml\n version = version.replace('-beta','beta')\n if '.beta' in version: # Example: 2.0-beta5 is reported as 2.0b5 in the case of archgenxml\n version = version.replace('.beta','beta')\n if '-alpha' in version: # Example: about(0.1.0-alpha.1) is reported as about(0.1.0a1) Dash removed, alpha to a, period after removed.\n version = version.replace('-alpha','alpha')\n if '.alpha' in version: # Example: 'adpy(0.12.alpha0)' is treated as 'adpy(0.12a0)'\n version = version.replace('.alpha','alpha')\n\n # Remove . or ' following alpha or beta. Example: about(0.1.0-alpha.1) is treated as about(0.1.0a1) by pip.\n if 'alpha.' in version:\n version = version.replace('alpha.','alpha')\n if 'alpha-' in version:\n version = version.replace('alpha-','alpha')\n if 'beta.' in version:\n version = version.replace('beta.','beta')\n if 'alpha-' in version:\n version = version.replace('beta-','beta')\n\n\n if version.endswith('dev') or version.endswith('beta') or version.endswith('alpha'): # beta or alpha should always be followed by a number. pip defaults to 0 in this case.\n # Example: acted.projects(0.10.dev) is treated as acted.projects(0.10.dev0)\n version += \"0\"\n\n\n # beta and alpha should be b and a\n # Doing this at the end may cause us to miss some cases in which (e.g.) version strings already had a in place of alpha,\n # but it also avoids us messing up any hex strings with 'a's or 'b's in them NOT representing alpha or beta....\n # For example, if the version string is '1.2a', code above will not have correctly turned it into '1.2a0', unfortunately.\n # But then we won't mess with a version string (e.g. with commit hash in it) ending with 'a351b8a' by incorrectly adding a 0 to it.\n # Compromises....\n if 'beta' in version: # beta should always be b instead.\n version = version.replace('beta', 'b')\n if 'alpha' in version: # beta should always be b instead.\n version = version.replace('alpha', 'a')\n\n # This is awkward, but it covers a sizeable number of cases.\n if '.00' in version:\n version = version.replace('.00', '.0')\n if '.01' in version:\n version = version.replace('.01', '.1')\n if '.02' in version:\n version = version.replace('.02', '.2')\n if '.03' in version:\n version = version.replace('.03', '.3')\n if '.04' in version:\n version = version.replace('.04', '.4')\n if '.05' in version:\n version = version.replace('.05', '.5')\n if '.06' in version:\n version = version.replace('.06', '.6')\n if '.07' in version:\n version = version.replace('.07', '.7')\n if '.08' in version:\n version = version.replace('.08', '.8')\n if '.09' in version:\n version = version.replace('.09', '.9')\n\n\n return version\n\n\n\n \n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"analyze_deps_via_pip.py","file_name":"analyze_deps_via_pip.py","file_ext":"py","file_size_in_byte":23904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"484679374","text":"\"\"\"\nCarControl.py\n\nHolds functions that send commands to the car.\n\nFunctions and Parameters:\n\n__init__()\nupdate_sensors()\ndrive(speed)\nsteer(degree)\n_initialize_serial_communication()\n_send_command(command, addnewline=False)\n_initialize_car(pid_flag=True)\n\n\nAuthor: redd\n\"\"\"\n\nimport serial\nimport time\nfrom Sensors import Sensors\nfrom CarActions import CarActions\n# from ReddFollower import ReddFollower\nfrom NewFollower import NewFollower\n\nclass CarControl:\n \"\"\"\n This class will be used to control the car.\n \"\"\"\n\n def __init__(self):\n self.ser = self._initialize_serial_communication() # establish serial communication\n self._initialize_car() # initialize the car\n self.sensor = Sensors() # initialize sensors\n\n # first few frames of camera feed are low quality\n for i in range(0, 10):\n self.update_sensors()\n\n self.action = CarActions(self) # allows us to perform hard-coded actions in the car\n self.rf = NewFollower()\n\n def update_sensors(self):\n \"\"\"\n updates the sensors values\n :return:\n \"\"\"\n self.sensor.update_sensors()\n\n def drive(self, speed):\n \"\"\"\n Commands the car to drive.\n :param speed: -2.0 to 2.0\n :return: nothing\n \"\"\"\n self._send_command(\"!speed\" + str(speed) + \"\\n\")\n\n def steer(self, degree):\n \"\"\"\n Commands the car to turn.\n :param degree: -30.0 to 30.0\n :return: nothing\n \"\"\"\n self._send_command(\"!steering\" + str(degree) + \"\\n\")\n\n def _initialize_serial_communication(self):\n \"\"\"\n Initializes the serial communication.\n\n :return: Object required for communication.\n \"\"\"\n print(\"Initializing Serial Communications\")\n ser = serial.Serial(\"/dev/ttyUSB0\", 115200)\n time.sleep(2) # must sleep for a bit while initializing\n print(\"Flushing Input\")\n ser.flushInput()\n time.sleep(1) # must sleep for a bit while initializing\n return ser\n\n def _send_command(self, command, addnewline=False):\n \"\"\"\n Sends a command to the car. Remember that every command must end with a new line.\n\n Author: redd\n \"\"\"\n if addnewline:\n command = command + \"\\n\"\n self.ser.write(command.encode())\n\n def _initialize_car(self, pid_flag=True):\n \"\"\"\n Initializes the car. This must be run before we can control the car.\n\n Author: redd\n \"\"\"\n\n print(\"Initializing Car\")\n self._send_command(\"!straight1430\\n\")\n self._send_command(\"!kp0.01\\n\")\n self._send_command(\"!kd0.01\\n\")\n if pid_flag:\n self._send_command(\"!pid1\\n\")\n else:\n self._send_command(\"!pid0\\n\")\n self._send_command(\"!start1590\\n\")\n self.drive(0.0)\n print(\"Initialization Completed\")\n","sub_path":"class_code/CarControl.py","file_name":"CarControl.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"493165873","text":"from datetime import datetime\nimport os\nimport numpy as np\nimport pandas as pd\nimport random\n\nfrom django.conf import settings\n\nfrom .models import CurriculumDocument, StandardNode, HumanRelevanceJudgment\n\n\ndef prob_weighted_random(\n queryset,\n model_name=\"baseline\",\n gamma=3.0,\n left_root_id=None,\n right_root_id=None,\n allow_same_doc=False,\n include_nonleaf_nodes=False,\n):\n \"\"\"\n Chooses a random row (uniform random) from all the possible ones,\n then chooses a weighted random column based on the probability and the\n relevance-favoritism factor `gamma`.\n \"\"\"\n\n modeldirpath = os.path.join(settings.MODELS_BASE_DIR, model_name)\n\n # load the index\n node_id_lookup = np.load(os.path.join(modeldirpath, \"index.npy\"))\n\n # load the matrix\n relevance_matrix = np.load(os.path.join(modeldirpath, \"relevance.npy\"))\n\n # load the pickled DataFrame of nodes\n nodes = pd.read_pickle(os.path.join(modeldirpath, \"nodes.pk\"))\n\n # sanity checks\n n = len(node_id_lookup)\n assert relevance_matrix.shape[0] == n, \"relevance_matrix has wrong shape\"\n assert relevance_matrix.shape[1] == n, \"relevance_matrix has wrong shape\"\n\n queryset = queryset.filter(id__in=node_id_lookup)\n left_queryset = queryset\n right_queryset = queryset\n\n if not include_nonleaf_nodes:\n left_queryset = left_queryset.filter(numchild=0)\n right_queryset = right_queryset.filter(numchild=0)\n\n # filter down and choose a random left-hand side node\n if left_root_id is not None:\n left_ancestor_root = queryset.get(id=left_root_id)\n left_queryset = left_queryset.filter(path__startswith=left_ancestor_root.path)\n left_index = nodes.row.loc[left_queryset.values_list(\"id\", flat=True)]\n\n ir = random.choice(list(left_index)) # choose a random row index for the left side\n leftid = node_id_lookup[ir]\n leftnode = queryset.get(id=leftid)\n\n # filter down the right-hand side queryset\n if not allow_same_doc:\n right_queryset = right_queryset.exclude(document_id=leftnode.document_id)\n if right_root_id is not None:\n right_ancestor_root = queryset.get(id=right_root_id)\n right_queryset = right_queryset.filter(\n path__startswith=right_ancestor_root.path\n )\n right_index = nodes.row.loc[right_queryset.values_list(\"id\", flat=True)]\n\n # build a probability distribution for choosing the right-hand node\n rowi = relevance_matrix[ir, :].flatten() # select row\n\n # exclude right-hand side columns based on the queryset\n columns_to_include = np.indices(rowi.shape)\n columns_to_exclude = np.setxor1d(columns_to_include, right_index)\n rowi[columns_to_exclude] = 0\n\n rowi[rowi < 0] = 0 # ignore any with negative values\n rowi[rowi > 0.999] = 0 # ignore any that are virtually identical\n\n # skew the distribution by gamma exponent, normalize, and select a weighted random item\n rowi_asp = rowi ** gamma / sum(rowi ** gamma)\n rowi_asp[np.isnan(rowi_asp)] = 0\n if sum(rowi_asp) == 0:\n rowi_asp = rowi\n jr = np.random.choice(n, p=rowi_asp)\n rightid = node_id_lookup[jr]\n rightnode = queryset.get(id=rightid)\n\n return (\n relevance_matrix[ir, jr],\n rowi_asp[jr],\n list(reversed(sorted(rowi_asp[rowi_asp > 0.001])))[:20],\n queryset.filter(id__in=[leftid, rightid]),\n )\n","sub_path":"alignmentpro/alignmentapp/schedulers.py","file_name":"schedulers.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"369322031","text":"'''\nAuthor: Estefania Munguia\n\nPurpose: This is a python program that performs\n converts binary to hexadecimal.\n'''\n\nimport sys\n\n\ndef bin_to_dec(num):\n dec = 0\n i = 0\n n = 0\n while (num != 0):\n d = num % 10\n dec = dec + d * pow(2, i)\n num = num // 10\n\n i += 1\n\n return dec\n\n\ndef dec_to_hexa(num):\n hex = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n remainder = []\n\n while num > 0:\n remainder.append(num % 16)\n num = num // 16\n\n result = \"\"\n for rev in remainder[::-1]:\n result += hex[rev]\n\n return result\n\n\ndef main():\n arg1 = sys.argv[1]\n\n dec = bin_to_dec(int(arg1))\n hex = dec_to_hexa(dec)\n\n print(hex)\n\nmain()","sub_path":"estefaniamunguia_bintohex.py","file_name":"estefaniamunguia_bintohex.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"10096116","text":"import datetime\nimport json\nimport os\nimport re\nimport sys \nimport psycopg2\nimport requests\nimport traceback\nfrom requests.exceptions import HTTPError\n\ncloud_link = os.getenv('GCLOUD_LINK')\n\ndef get_user(user_id):\n user = os.getenv('PGCONNECT_USER')\n password = os.getenv('PGCONNECT_PASSWORD')\n host = os.getenv('PGCONNECT_HOST')\n port = os.getenv('PGCONNECT_PORT')\n dbname = os.getenv('PGCONNECT_DBNAME')\n \n conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port, sslmode='require')\n cur = conn.cursor()\n\n\n sql = \"SELECT native_lang, es_rec,fr_rec,de_rec,en_rec,langs FROM users WHERE id = %s\"\n cur.execute(sql,(user_id,))\n user = cur.fetchall()\n conn.close()\n return user[0]\n\ndef cluster_arts(native_lang,user_id,clust_num,percent):\n\n cluster_link = f'https://lango-rec-{native_lang}-v26nfpfxqq-uc.a.run.app/cluster'\n\n try:\n cluster = requests.post(cluster_link, json={\n \"native_lang\": native_lang,\n \"user_id\":user_id,\n \"clust_num\":clust_num,\n \"percent\":percent\n })\n pop_clusters=cluster.json()\n print(len(pop_clusters))\n for cluster in pop_clusters:\n print(len(cluster))\n return pop_clusters\n except:\n print('error')\n\n\ndef get_recs(pop_clusters,user_id,rec_num,trans_lang):\n\n trans_links = f'https://lango-rec-{trans_lang}-v26nfpfxqq-uc.a.run.app/get_recs'\n\n response = requests.post(trans_links, json={\n \"trans_lang\": trans_lang,\n \"user_id\":user_id,\n \"rec_num\":rec_num,\n \"pop_clusters\":pop_clusters\n })\n print(response)\n\ndef gen_recs(user_id,rec_num,clust_num,percent):\n t18=datetime.datetime.now()\n user1 = get_user(user_id)\n\n native_lang=user1[0]\n es_rec=user1[1]\n fr_rec=user1[2]\n de_rec=user1[3]\n en_rec=user1[4]\n langs=user1[5]\n\n pop_clusters = cluster_arts(native_lang,user_id,clust_num,percent)\n\n rec_times = {}\n \n if es_rec:\n t0=datetime.datetime.now()\n print('es_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'es')\n t1=datetime.datetime.now()\n es_rec_time='es_rec' + str(t1-t0)\n rec_times['es_rec_time'] = es_rec_time\n print(es_rec_time)\n \n if de_rec:\n t2=datetime.datetime.now()\n print('de_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'de')\n t3=datetime.datetime.now()\n de_rec_time='de_rec' + str(t3-t2)\n rec_times['de_rec_time'] = de_rec_time\n print(de_rec_time)\n \n if fr_rec:\n t4=datetime.datetime.now()\n print('fr_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'fr')\n t5=datetime.datetime.now()\n fr_rec_time='fr_rec' + str(t5-t4)\n rec_times['fr_rec_time'] = fr_rec_time\n print(fr_rec_time)\n \n if en_rec:\n t6=datetime.datetime.now()\n print('en_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'en')\n t7=datetime.datetime.now()\n en_rec_time='en_rec' + str(t7-t6)\n rec_times['en_rec_time'] = en_rec_time\n print(en_rec_time)\n \n t19=datetime.datetime.now()\n store_recs1='store recs' + str(t19-t18)\n rec_times['store recs'] = store_recs1\n print(store_recs1)\n print(rec_times)\n \n return 'All recs uploaded'\n\ndef main():\n user_id=22\n t18=datetime.datetime.now()\n\n user1 = get_user(user_id)\n print(user1)\n \n native_lang=user1[0]\n es_rec=user1[1]\n fr_rec=user1[2]\n de_rec=user1[3]\n en_rec=user1[4]\n\n clust_num=15\n percent=0.33\n rec_num=20\n\n print('clustering')\n pop_clusters = cluster_arts(native_lang,user_id,clust_num,percent)\n \n if es_rec:\n print('es_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'es')\n \n if de_rec:\n print('de_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'de')\n \n if fr_rec:\n print('fr_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'fr')\n \n if en_rec:\n print('en_rec generate')\n get_recs(pop_clusters,user_id,rec_num,'es')\n\n t19=datetime.datetime.now()\n store_recs1='store recs' + str(t19-t18)\n print(store_recs1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"generate_recs.py","file_name":"generate_recs.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"517372076","text":"import pygame\r\nimport json\r\nfrom os import path\r\nfrom pygame.locals import *\r\nfrom random import randint\r\nfrom itertools import repeat\r\nfrom button import *\r\nfrom recources import *\r\nfrom testcar import *\r\nfrom theclassic import *\r\nfrom thedestroyer import *\r\nfrom replay import *\r\nfrom sprite import *\r\n\r\npygame.init()\r\n\r\n# Game Window ----------------------------------------------------------------------------#\r\nDISPLAY_WIDTH = 1280\r\nDISPLAY_HEIGHT = 720\r\nRES = (DISPLAY_WIDTH, DISPLAY_HEIGHT)\r\nGAME_DISPLAY = pygame.display.set_mode(RES, 0, 32)\r\n\r\npath = os.path.dirname(os.path.dirname(__file__))\r\npath_2 = os.path.dirname(__file__)\r\n\r\n\r\ndef displayMessage(text, text_colour, text_size, coords):\r\n font = pygame.font.Font(\r\n path+'/External Files/street.ttf', text_size)\r\n textSurface = font.render(text, False, text_colour)\r\n GAME_DISPLAY.blit(textSurface, coords)\r\n\r\n\r\n# # Menu Systems ----------------------------------------------------------------------------#\r\ndef mainMenu(music_volume=1.0, fx_volume=0.5):\r\n # Individual Menu Buttons #Size x y width height\r\n play_game_button = Button(\"PLAY GAME\", ORANGE, 90, 50, 100, 460, 75)\r\n choose_car_button = Button(\"CHOOSE CAR\", ORANGE, 75, 80, 220, 400, 65)\r\n options_button = Button(\"OPTIONS\", ORANGE, 75, 80, 330, 280, 65)\r\n quit_button = Button(\"QUIT\", ORANGE, 75, 80, 440, 160, 65)\r\n\r\n running = True\r\n while running:\r\n # Displays Background Image First\r\n GAME_DISPLAY.blit(main_menu_image, (0, 0))\r\n # Draws Each button to the screen\r\n play_game_button.draw(GAME_DISPLAY)\r\n choose_car_button.draw(GAME_DISPLAY)\r\n options_button.draw(GAME_DISPLAY)\r\n quit_button.draw(GAME_DISPLAY)\r\n\r\n key = pygame.key.get_pressed()\r\n if key[pygame.K_ESCAPE]:\r\n pygame.display.quit()\r\n pygame.quit()\r\n for event in pygame.event.get():\r\n # Get's position of mouse to detect collisions\r\n pos = pygame.mouse.get_pos()\r\n # Handles quit event\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n if event.type == pygame.MOUSEMOTION:\r\n # Adds responsiveness to text whewn hovering your mouse over it\r\n if play_game_button.mouseOver(pos):\r\n play_game_button.text_colour, play_game_button.text_size = NEON_GREEN, 100\r\n elif choose_car_button.mouseOver(pos):\r\n choose_car_button.text_colour, choose_car_button.text_size = NEON_GREEN, 85\r\n elif options_button.mouseOver(pos):\r\n options_button.text_colour, options_button.text_size = NEON_GREEN, 85\r\n elif quit_button.mouseOver(pos):\r\n quit_button.text_colour, quit_button.text_size = NEON_GREEN, 85\r\n else:\r\n # When the mouse is not over the buttons, they should return back to their default attributes\r\n play_game_button.text_colour, play_game_button.text_size = ORANGE, 90\r\n choose_car_button.text_colour, choose_car_button.text_size = ORANGE, 75\r\n options_button.text_colour, options_button.text_size = ORANGE, 75\r\n quit_button.text_colour, quit_button.text_size = ORANGE, 75\r\n\r\n # HANDLING --------------------------------------------------------------------#\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n # Handles what to do after the user clicks on the corresponding button\r\n if play_game_button.mouseOver(pos):\r\n # This avoids the current state of whatever menu system overriding the intended menu\r\n # For example before this, when the user would pause the game and exit to main menu then click play game,\r\n # They would immidiately arrive at the pause screen an not the actual game hence the use of a default parameter\r\n Main(initial_run=False, music_volume=music_volume,\r\n fx_volume=fx_volume)\r\n elif choose_car_button.mouseOver(pos):\r\n chooseCarMenu()\r\n elif options_button.mouseOver(pos):\r\n optionMenu()\r\n elif quit_button.mouseOver(pos):\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n pygame.display.update()\r\n\r\n\r\ndef chooseCarMenu():\r\n # Individual Menu Buttons #Size x y width height\r\n play_button = Button(\"Play\", NEON_GREEN, 60, 1100, 520, 160, 75)\r\n main_menu_button = Button(\"Main Menu\", NEON_GREEN, 60, 20, 520, 330, 75)\r\n quit_button = Button(\"Quit\", NEON_GREEN, 60, 1100, 0, 160, 75)\r\n left_box_button = Button(\"\", NEON_GREEN, 0, 175, 50, 350, 450)\r\n right_box_button = Button(\"\", NEON_GREEN, 0, 750, 50, 350, 450)\r\n\r\n running = True\r\n while running:\r\n # Displays Background Image First\r\n GAME_DISPLAY.blit(choosing_menu_image, (0, 0))\r\n # Draws Each button to the screen\r\n play_button.draw(GAME_DISPLAY)\r\n main_menu_button.draw(GAME_DISPLAY)\r\n quit_button.draw(GAME_DISPLAY)\r\n left_box_button.draw(GAME_DISPLAY)\r\n right_box_button.draw(GAME_DISPLAY)\r\n GAME_DISPLAY.blit(arrow_left_image, (25, 620))\r\n GAME_DISPLAY.blit(arrow_right_image, (1150, 620))\r\n # Rectangles\r\n GAME_DISPLAY.blit(car_outline_rectangle_image, (175, 50))\r\n GAME_DISPLAY.blit(car_outline_rectangle_image, (750, 50))\r\n # Car Objects\r\n GAME_DISPLAY.blit(the_classic_icon, (150, 60))\r\n GAME_DISPLAY.blit(the_destroyer_icon, (740, 105))\r\n # Car Names\r\n displayMessage(\"The Classic\", YELLOW, 60, (205, 70))\r\n displayMessage(\"The Destroyer\", YELLOW, 50, (785, 70))\r\n # Stats\r\n # -------------------------------------------------------\r\n displayMessage(\"HP\", WHITE, 50, (210, 270))\r\n displayMessage(\"Defence\", WHITE, 40, (210, 330))\r\n displayMessage(\"Turning Radius\", WHITE, 30, (210, 380))\r\n displayMessage(\"SPEED\", WHITE, 40, (210, 420))\r\n\r\n displayMessage(\"+ 200\", LIGHT_GREEN, 50, (290, 270))\r\n displayMessage(\"+ 30\", LIGHT_GREEN, 40, (350, 330))\r\n displayMessage(\"+ 3.0\", LIGHT_GREEN, 30, (420, 380))\r\n displayMessage(\"+ 18\", LIGHT_GREEN, 40, (320, 420))\r\n\r\n # -------------------------------------------------------\r\n displayMessage(\"HP\", WHITE, 50, (785, 270))\r\n displayMessage(\"Defence\", WHITE, 40, (785, 330))\r\n displayMessage(\"TURNING RADIUS\", WHITE, 30, (785, 380))\r\n displayMessage(\"SPEED\", WHITE, 40, (785, 420))\r\n\r\n displayMessage(\"+ 200\", LIGHT_GREEN, 50, (865, 270))\r\n displayMessage(\"+ 60\", LIGHT_GREEN, 40, (925, 330))\r\n displayMessage(\"+ 2.5\", LIGHT_GREEN, 30, (995, 380))\r\n displayMessage(\"+ 14\", LIGHT_GREEN, 40, (895, 420))\r\n\r\n for event in pygame.event.get():\r\n # Get's position of mouse to detect collisions\r\n pos = pygame.mouse.get_pos()\r\n # Handles quit event\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n if event.type == pygame.MOUSEMOTION:\r\n # Adds responsiveness to text whewn hovering your mouse over it\r\n if play_button.mouseOver(pos):\r\n play_button.text_colour, play_button.text_size = ORANGE, 70\r\n elif main_menu_button.mouseOver(pos):\r\n main_menu_button.text_colour, main_menu_button.text_size = ORANGE, 70\r\n elif quit_button.mouseOver(pos):\r\n quit_button.text_colour, quit_button.text_size = ORANGE, 70\r\n elif left_box_button.mouseOver(pos):\r\n GAME_DISPLAY.blit(car_outline_selected_image, (175, 50))\r\n elif right_box_button.mouseOver(pos):\r\n GAME_DISPLAY.blit(car_outline_selected_image, (750, 50))\r\n\r\n else:\r\n play_button.text_colour, play_button.text_size = NEON_GREEN, 60\r\n main_menu_button.text_colour, main_menu_button.text_size = NEON_GREEN, 60\r\n quit_button.text_colour, quit_button.text_size = NEON_GREEN, 60\r\n\r\n # HANDLING --------------------------------------------------------------------#\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n # Handles what to do after the user clicks on the corresponding button\r\n if play_button.mouseOver(pos):\r\n Main(initial_run=False)\r\n elif main_menu_button.mouseOver(pos):\r\n mainMenu()\r\n elif left_box_button.mouseOver(pos):\r\n GAME_DISPLAY.blit(car_outline_selected_image, (175, 50))\r\n Main(initial_run=False, car_choice=1)\r\n elif right_box_button.mouseOver(pos):\r\n GAME_DISPLAY.blit(car_outline_selected_image, (750, 50))\r\n Main(initial_run=False, car_choice=2)\r\n elif quit_button.mouseOver(pos):\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n pygame.display.update()\r\n\r\n\r\ndef optionMenu():\r\n\r\n back_button = Button(\"Main Menu\", ORANGE, 60, 20, 600, 330, 75)\r\n left_arrow_button = Button(\"\", BLACK, 0, 115, 163, 40, 40)\r\n right_arrow_button = Button(\"\", BLACK, 0, 451, 163, 40, 40)\r\n\r\n volume_index = 9 # Max volume by default\r\n volume = 1.0\r\n fx_volume = 0.5\r\n\r\n running = True\r\n while running:\r\n\r\n GAME_DISPLAY.fill((NAVY))\r\n # Controls\r\n GAME_DISPLAY.blit(car_control_image, (900, 30))\r\n GAME_DISPLAY.blit(a_key_image, (950, 320))\r\n GAME_DISPLAY.blit(d_key_image, (1050, 320))\r\n GAME_DISPLAY.blit(left_key_image, (950, 420))\r\n GAME_DISPLAY.blit(right_key_image, (1050, 420))\r\n displayMessage(\"Powerup Activation:\", ORANGE, 40, (880, 550))\r\n GAME_DISPLAY.blit(space_key_image, (1000, 600))\r\n\r\n # Volume Control\r\n displayMessage(\"~- Volume -~\", SALMON, 50, (155, 100))\r\n dots_rects_list = [[160, 175, 15, 15], # 0.1\r\n [190, 175, 15, 15], # 0.2\r\n [220, 175, 15, 15], # 0.3\r\n [250, 175, 15, 15], # 0.4\r\n [280, 175, 15, 15], # 0.5\r\n [310, 175, 15, 15], # 0.6\r\n [340, 175, 15, 15], # 0.7\r\n [370, 175, 15, 15], # 0.8\r\n [400, 175, 15, 15], # 0.9\r\n [430, 175, 15, 15]] # 1.0\r\n\r\n GAME_DISPLAY.blit(left_arrow_options_image, (120, 167))\r\n GAME_DISPLAY.blit(right_arrow_options_image, (456, 167))\r\n display_rects = [pygame.draw.rect(GAME_DISPLAY, WHITE, dots_rects_list[i])\r\n for i in range(len(dots_rects_list))]\r\n\r\n try:\r\n circle_x = dots_rects_list[volume_index][0] + 7\r\n circle_y = dots_rects_list[volume_index][1] + 8\r\n except IndexError:\r\n # Encapsulates error when click the button when already on max volume\r\n pass\r\n\r\n pygame.draw.circle(GAME_DISPLAY, BLACK, (circle_x, circle_y), 18, 2)\r\n\r\n back_button.draw(GAME_DISPLAY)\r\n left_arrow_button.draw(GAME_DISPLAY)\r\n right_arrow_button.draw(GAME_DISPLAY)\r\n\r\n for event in pygame.event.get():\r\n # Get's position of mouse to detect collisions\r\n pos = pygame.mouse.get_pos()\r\n # Handles quit event\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n if event.type == pygame.MOUSEMOTION:\r\n # Adds responsiveness to text whewn hovering your mouse over it\r\n if back_button.mouseOver(pos):\r\n back_button.text_colour, back_button.text_size = NEON_GREEN, 70\r\n else:\r\n back_button.text_colour, back_button.text_size = ORANGE, 60\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n # Handles what to do after the user clicks on the corresponding button\r\n if back_button.mouseOver(pos):\r\n mainMenu(music_volume=volume, fx_volume=fx_volume)\r\n elif left_arrow_button.mouseOver(pos):\r\n volume_index -= 1\r\n volume -= 0.1\r\n fx_volume -= 0.1\r\n elif right_arrow_button.mouseOver(pos):\r\n volume_index += 1\r\n volume += 0.1\r\n fx_volume += 0.1\r\n\r\n pygame.display.update()\r\n\r\n\r\ndef pauseMenu():\r\n # Individual Menu Buttons #Size x y width height\r\n resume_button = Button(\"Resume Game\", ORANGE, 90, 365, 200, 545, 75)\r\n # Placed Button Left Area\r\n main_menu_button = Button(\"Main Menu\",\r\n ORANGE, 60, 20, 520, 330, 75)\r\n quit_button = Button(\"Quit Game\", ORANGE, 90, 415, 310, 445, 75)\r\n\r\n running = True\r\n while running:\r\n\r\n # This will give the \"fade\" animation\r\n rect = pygame.Surface((1280, 720), pygame.SRCALPHA, 32)\r\n rect.fill((102, 178, 255, 10))\r\n GAME_DISPLAY.blit(rect, (0, 0))\r\n\r\n displayMessage(\"- PAUSED - \", WHITE, 80, (475, 50))\r\n\r\n # Draws Each button to the screen\r\n resume_button.draw(GAME_DISPLAY)\r\n main_menu_button.draw(GAME_DISPLAY)\r\n quit_button.draw(GAME_DISPLAY)\r\n GAME_DISPLAY.blit(arrow_left_image, (25, 620))\r\n\r\n for event in pygame.event.get():\r\n # Get's position of mouse to detect collisions\r\n pos = pygame.mouse.get_pos()\r\n # Handles quit event\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n if event.type == pygame.MOUSEMOTION:\r\n # Adds responsiveness to text whewn hovering your mouse over it\r\n if resume_button.mouseOver(pos):\r\n resume_button.text_colour, resume_button.text_size = NEON_GREEN, 105\r\n elif main_menu_button.mouseOver(pos):\r\n main_menu_button.text_colour, main_menu_button.text_size = NEON_GREEN, 70\r\n elif quit_button.mouseOver(pos):\r\n quit_button.text_colour, quit_button.text_size = NEON_GREEN, 105\r\n # When the mouse is not over the buttons, they should return back to their default attributes\r\n else:\r\n resume_button.text_colour, resume_button.text_size = ORANGE, 90\r\n main_menu_button.text_colour, main_menu_button.text_size = ORANGE, 60\r\n quit_button.text_colour, quit_button.text_size = ORANGE, 90\r\n\r\n # HANDLING --------------------------------------------------------------------#\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n # Handles what to do after the user clicks on the corresponding button\r\n if resume_button.mouseOver(pos):\r\n # Returns the user back to the game\r\n running = False # Breaks out of while loop\r\n return True # Breaks out of function\r\n elif main_menu_button.mouseOver(pos):\r\n mainMenu()\r\n elif quit_button.mouseOver(pos):\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n # Adds a little convinience so if the user wants to return to the game without having to click any buttons they can do so\r\n if event.key == pygame.K_ESCAPE:\r\n running = False # Breaks out of while loop\r\n return True # Breaks out of function\r\n\r\n pygame.display.update()\r\n\r\n\r\ndef gameOverMenu():\r\n play_again_button = Button(\r\n \"Play again\", NEON_GREEN, 60, 510, 600, 280, 75)\r\n main_menu_button = Button(\"Main Menu\", NEON_GREEN, 60, 20, 520, 330, 75)\r\n choose_car_button = Button(\"Choose Car\", NEON_GREEN, 60, 960, 520, 280, 75)\r\n quit_button = Button(\"Quit\", NEON_GREEN, 60, 1100, 0, 160, 75)\r\n\r\n running = True\r\n while running:\r\n # Fill Background\r\n GAME_DISPLAY.fill((134, 117, 169))\r\n # Draws Each button to the screen\r\n play_again_button.draw(GAME_DISPLAY)\r\n main_menu_button.draw(GAME_DISPLAY)\r\n choose_car_button.draw(GAME_DISPLAY)\r\n quit_button.draw(GAME_DISPLAY)\r\n\r\n with open(path_2+'/currentscore.txt', 'r') as f:\r\n current_score = f.read()\r\n\r\n with open(path_2+'/highscore.txt', 'r') as f:\r\n high_score = f.read()\r\n\r\n # Game over messages\r\n displayMessage(\"GAME OVER\", RED, 100, (380, 50))\r\n pygame.draw.rect(GAME_DISPLAY, SALMON, (360, 150, 560, 5))\r\n displayMessage(\r\n f\"Score: {current_score}\", BEIGE, 70, (523, 230))\r\n displayMessage(\r\n f\"High Score: {high_score}\", YELLOW, 70, (435, 320))\r\n\r\n for event in pygame.event.get():\r\n # Get's position of mouse to detect collisions\r\n pos = pygame.mouse.get_pos()\r\n # Handles quit event\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n if event.type == pygame.MOUSEMOTION:\r\n # Adds responsiveness to text whewn hovering your mouse over it\r\n if play_again_button.mouseOver(pos):\r\n play_again_button.text_colour, play_again_button.text_size = ORANGE, 70\r\n elif main_menu_button.mouseOver(pos):\r\n main_menu_button.text_colour, main_menu_button.text_size = ORANGE, 70\r\n elif choose_car_button.mouseOver(pos):\r\n choose_car_button.text_colour, choose_car_button.text_size = ORANGE, 70\r\n elif quit_button.mouseOver(pos):\r\n quit_button.text_colour, quit_button.text_size = ORANGE, 70\r\n else:\r\n play_again_button.text_colour, play_again_button.text_size = NEON_GREEN, 60\r\n main_menu_button.text_colour, main_menu_button.text_size = NEON_GREEN, 60\r\n choose_car_button.text_colour, choose_car_button.text_size = NEON_GREEN, 60\r\n quit_button.text_colour, quit_button.text_size = NEON_GREEN, 60\r\n\r\n # HANDLING --------------------------------------------------------------------#\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n # Handles what to do after the user clicks on the corresponding button\r\n if play_again_button.mouseOver(pos):\r\n Main(initial_run=False)\r\n elif main_menu_button.mouseOver(pos):\r\n mainMenu()\r\n elif choose_car_button.mouseOver(pos):\r\n chooseCarMenu()\r\n elif quit_button.mouseOver(pos):\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n pygame.display.update()\r\n\r\n\r\nclass Main:\r\n \"\"\" Class controls the entire program \"\"\"\r\n\r\n def __init__(self, initial_run=True, car_choice=1, music_volume=1, fx_volume=0.5):\r\n # Initialisations ----------------------------------------------------------------------#\r\n self.FPS = 60\r\n self.clock = pygame.time.Clock()\r\n\r\n # Used to run generateSpawnPoint() only once at the start of the turn\r\n self.flag_spawn = True\r\n self.spawn_points_list = []\r\n\r\n # 1 = The classic, 2 = The destroyer\r\n self.car_choice = car_choice\r\n\r\n # Slightly above 'n' to allow the user to react to the timer before counting down\r\n self.timer = 10.5 # SHOULD BE 10.5\r\n self.powerup_timer = 3.5 # How Long a power up lasts\r\n\r\n self.turn_count = 1\r\n self.snapshots = []\r\n self.saved_replay = []\r\n self.dt = 0 # (Explained bellow when value is assigned to it)\r\n\r\n # Sprite Handling ----------------------------------------------------------------------#\r\n self.user_car_sprite = pygame.sprite.Group()\r\n self.replay_sprites_group = pygame.sprite.Group()\r\n\r\n # Game Window --------------------------------------------------------------------------#\r\n pygame.display.set_caption('Do Not Crash!')\r\n pygame.display.set_icon(game_icon_image)\r\n\r\n # Music\r\n pygame.mixer.music.play(-1) # Plays BG song on repeat\r\n pygame.mixer.music.set_volume(music_volume)\r\n crash_fx.set_volume(fx_volume)\r\n # print(crash_fx.get_volume())\r\n # print(pygame.mixer.music.get_volume())\r\n\r\n # Objects ------------------------------------------------------------------------------#\r\n if car_choice == 1:\r\n self.replay_object = Replay(the_classic_ghost_sprite)\r\n elif car_choice == 2:\r\n self.replay_object = Replay(the_destroyer_ghost_sprite)\r\n else:\r\n print(\"Error\")\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n self.lake_object = Sprite(lake_image)\r\n\r\n # Misc --------------------------------------------------------------------------------#\r\n self.true_scroll = [0, 0]\r\n self.screen_shake = 10\r\n self.player_health = 200\r\n\r\n # Power Up Handling --------------------------------------------------------------------#\r\n self.health_collected = False\r\n self.speedeup_collected = False\r\n self.slowdown_collected = False\r\n self.shield_collected = False\r\n self.reverse_collected = False\r\n self.route_collected = False\r\n\r\n self.speedup_activated = False\r\n self.slowdown_activated = False\r\n self.shield_activated = False\r\n self.reverse_activated = False\r\n self.route_actived = False\r\n\r\n self.place_powerup = True\r\n self.powerup_collided = False\r\n self.replay_reverse = False\r\n\r\n # Runs Main Methods --------------------------------------------------------------------#\r\n self.running = True\r\n self.initial_run = initial_run\r\n self.run()\r\n\r\n def run(self):\r\n # Main game loop\r\n\r\n while self.running:\r\n\r\n if self.initial_run:\r\n mainMenu() # The function makes initial run = False otherwise the saved state will carry on between menu screens\r\n\r\n if self.flag_spawn == True:\r\n self.generateSpawnPoint()\r\n\r\n # This allows us to do integration for some physics simulation\r\n self.dt = self.clock.get_time() / 100\r\n\r\n self.createMap()\r\n self.createTimer()\r\n self.handleCar()\r\n self.saveSnapshot()\r\n\r\n # Checks if the turn count is a multiple of 4 to display powerup\r\n if self.place_powerup and self.turn_count % 4 == 0:\r\n loc = randint(0, 2)\r\n powerup_choice = randint(0, len(list_of_powerups)-1)\r\n self.no_powerup = False\r\n self.place_powerup = False\r\n\r\n elif self.turn_count % 4 != 0:\r\n self.no_powerup = True\r\n\r\n if self.no_powerup:\r\n pass\r\n else:\r\n self.powerUp(loc, powerup_choice)\r\n\r\n self.drawHealthBar()\r\n self.showPowerUp()\r\n # self.playMusic()\r\n self.events()\r\n self.update()\r\n\r\n def generateSpawnPoint(self):\r\n # Generates a location for the car to spawn at\r\n\r\n if self.turn_count == 1:\r\n # Reads the contents of the spawn_points json file\r\n # Also resets the spawn_points if the user exits to main menu in the middle of the game\r\n with open(path_2+'/spawn_points.json') as f: # Automatically a read only file\r\n self.spawn_points_list = json.load(f)\r\n\r\n try: # Encapsulate error when the game runs out of spawn points\r\n # print(self.spawn_points_list)\r\n index = randint(0, len(self.spawn_points_list)-1)\r\n new_spawn_location = self.spawn_points_list[index]\r\n # Avoids respawning in the same location by deleting the used spawn location index\r\n\r\n if self.car_choice == 1:\r\n self.car = The_Classic(\r\n the_classic_sprite, new_spawn_location[0], new_spawn_location[1], new_spawn_location[2])\r\n elif self.car_choice == 2:\r\n self.car = The_Destroyer(\r\n the_destroyer_sprite, new_spawn_location[0], new_spawn_location[1], new_spawn_location[2])\r\n else:\r\n print(\"Error\")\r\n pygame.display.quit()\r\n pygame.quit()\r\n # Test Car\r\n # self.car = Test_Car(car_sprite,\r\n # new_spawn_location[0], new_spawn_location[1], new_spawn_location[2])\r\n self.user_car_sprite.add(self.car)\r\n del self.spawn_points_list[index]\r\n self.flag_spawn = False\r\n\r\n except ValueError:\r\n # print(\"Ran out of spawn points\")\r\n pass\r\n\r\n def createMap(self, x=0, y=0):\r\n # Handles all objects seen on the map\r\n\r\n # Copy use for shake\r\n self.scroll = self.true_scroll.copy()\r\n\r\n # x and y used to displace objects after for screen shake effect\r\n GAME_DISPLAY.blit(grass_image, (x, y))\r\n GAME_DISPLAY.blit(roads_image, (x, y))\r\n GAME_DISPLAY.blit(house_image, (x, y))\r\n GAME_DISPLAY.blit(tree_image, (x, y))\r\n GAME_DISPLAY.blit(lake_image, (x, y))\r\n\r\n # Transparent Rectangle Behind Turn Count/Timer\r\n GAME_DISPLAY.blit(left_rectangle_image, (0, 0))\r\n GAME_DISPLAY.blit(right_rectangle_image, (1080, -25))\r\n\r\n # displayMessage(f\"FPS: {int(self.clock.get_fps())}\",\r\n # WHITE, 20, (50, 100))\r\n # displayMessage(f\"Health: {self.car.health}\", RED, 35, (640, 10))\r\n\r\n self.wallTeleport()\r\n displayMessage(f\"Turn: {self.turn_count}\", WHITE, 35, (24, 10))\r\n\r\n # Car\r\n rotated_image = pygame.transform.rotate(\r\n self.car.image, self.car.angle)\r\n GAME_DISPLAY.blit(rotated_image, self.car.pos -\r\n (rotated_image.get_width() / 2, rotated_image.get_height() / 2))\r\n\r\n def createTimer(self):\r\n # Timer\r\n\r\n if self.timer < 10.5 and self.timer > 6:\r\n displayMessage(f\"Time: {int(self.timer)}\", WHITE, 35, (1100, 10))\r\n # Turns red when the clock hits 5 seconds to notify the user their turn is nearly over\r\n elif self.timer <= 6 and self.timer >= 4:\r\n displayMessage(f\"Time: {int(self.timer)}\", RED, 35, (1100, 10))\r\n # Shows ms for the last 3 seconds\r\n elif self.timer <= 4 and self.timer >= 0: # Won't realistically hit close to 0 due to lag\r\n displayMessage(\r\n f\"Time: {self.timer:.2f}\", RED, 35, (1100, 10)) # 2 decimal places\r\n\r\n dt = self.dt / 10 # Get's My Time in seconds\r\n self.timer -= dt\r\n\r\n # Handles everything that should happen at the end of the turn\r\n if self.timer < 0:\r\n # Removes the previous turns sprite from the group as it is becomes a 'replay' sprite\r\n self.car.removeFromSpriteList()\r\n self.place_powerup = True\r\n self.powerup_collided = False\r\n self.timer = 10.5 # Resets timer\r\n self.saved_replay.append(self.snapshots)\r\n self.snapshots = []\r\n self.turn_count += 1\r\n self.flag_spawn = True\r\n\r\n def handleCar(self):\r\n # Handles the car logic\r\n\r\n self.car.accelerate(self.dt)\r\n self.car.steering(self.dt)\r\n self.car.update()\r\n\r\n if self.player_health < 0:\r\n self.saveScore()\r\n gameOverMenu()\r\n\r\n # Car Replay -----------------------------------------------------------------------#\r\n if self.turn_count > 1:\r\n if self.replay_reverse:\r\n self.displayReplays(reverse=True)\r\n else:\r\n self.displayReplays()\r\n\r\n # Power Up -------------------------------------------------------------------------#\r\n\r\n # Car Collision With Objects--------------------------------------------------------#\r\n # Blits Hitboxes around objects\r\n # tree_rects = [pygame.draw.rect(GAME_DISPLAY, BLACK, trees_rects_list[i], 2)\r\n # for i in range(len(trees_rects_list))]\r\n # house_rects = [pygame.draw.rect(GAME_DISPLAY, BLACK, house_rects_list[i], 2)\r\n # for i in range(len(house_rects_list))]\r\n\r\n house_rects = [house_rects_list[i]\r\n for i in range(len(house_rects_list))]\r\n tree_rects = [trees_rects_list[i]\r\n for i in range(len(trees_rects_list))]\r\n\r\n # Collision with car and house/tree\r\n for i in range(len(house_rects)):\r\n for x in range(len(tree_rects)):\r\n\r\n if self.car.rect.colliderect(house_rects[i]) or self.car.rect.colliderect(tree_rects[x]):\r\n self.screen_shake = 10\r\n self.collision()\r\n\r\n # Draws Mask around lake\r\n # for point in self.lake_object.mask_outline:\r\n # self.lake_object.mask_outline[0] = (point[0], point[1])\r\n # pygame.draw.polygon(GAME_DISPLAY, (0, 0, 0),\r\n # self.lake_object.mask_outline, 3)\r\n\r\n # Collision with lake unless shield powerup is activated\r\n if self.shield_activated:\r\n pass\r\n else:\r\n for b_rect in self.lake_object.bounding_rects:\r\n if b_rect.contains(self.car):\r\n self.saveScore()\r\n gameOverMenu()\r\n\r\n key = pygame.key.get_pressed()\r\n if key[pygame.K_SPACE]:\r\n\r\n if self.health_collected:\r\n self.healthPowerUp()\r\n self.health_collected = False\r\n\r\n elif self.speedeup_collected:\r\n self.speedup_activated = True\r\n self.powerup_timer = 3.5\r\n self.speedeup_collected = False\r\n\r\n elif self.slowdown_collected:\r\n\r\n self.slowdown_activated = True\r\n self.powerup_timer = 3.5\r\n self.slowdown_collected = False\r\n\r\n elif self.shield_collected:\r\n self.shield_activated = True\r\n self.powerup_timer = 3.5\r\n self.shield_collected = False\r\n\r\n elif self.reverse_collected:\r\n self.reverse_activated = True\r\n self.powerup_timer = 3.5\r\n self.reverse_collected = False\r\n\r\n elif self.route_collected:\r\n self.route_actived = True\r\n self.powerup_timer = 3.5\r\n self.route_collected = False\r\n\r\n if self.speedup_activated:\r\n self.speedupPowerUp()\r\n elif self.slowdown_activated:\r\n self.slowdownPowerUp()\r\n elif self.shield_activated:\r\n self.shieldPowerUp()\r\n elif self.reverse_activated:\r\n self.reversePowerUp()\r\n elif self.route_actived:\r\n self.routePowerUp()\r\n\r\n # Testing -------------------------------------------------------------------------#\r\n # displayMessage(\r\n # f\"Current Turning: {self.car.turning}\", WHITE, 20, (1000, 450))\r\n # displayMessage(\r\n # f\"Current Vel: {self.car.vel}\", WHITE, 20, (1000, 500))\r\n # displayMessage(\r\n # f\"Current Accel: {self.car.accel}\", WHITE, 20, (1000, 600))\r\n # displayMessage(\r\n # f\"Current Angle: {self.car.angle}\", WHITE, 20, (1000, 550))\r\n # displayMessage(\r\n # f\"Current Pos: {self.car.pos}\", WHITE, 20, (800, 450))\r\n\r\n def saveSnapshot(self):\r\n # Records all the essential car movements at a given moment in time\r\n\r\n # Stores the pos and rotation of the car at a given time\r\n # Time will be in synch with the current time during the round\r\n data_points = {'time': self.timer,\r\n 'position': [self.car.pos.x, self.car.pos.y],\r\n 'angle': self.car.angle}\r\n self.snapshots.append(data_points)\r\n\r\n def displayReplays(self, reverse=False):\r\n # Handles everything replay related\r\n\r\n if not reverse:\r\n # Starts with the first dictionary element in the snapshot list and iterates through everything\r\n for i in range(len(self.saved_replay)):\r\n for j in range(len(self.saved_replay[i])):\r\n\r\n x_pos = self.saved_replay[i][j]['position'][0]\r\n y_pos = self.saved_replay[i][j]['position'][1]\r\n angle = self.saved_replay[i][j]['angle']\r\n\r\n # Synchronises the current time with the replay time to display the replay in real time wuth a varience of ~0.04\r\n\r\n if self.saved_replay[i][j]['time'] > self.timer and self.saved_replay[i][j]['time'] < self.timer+0.04:\r\n pos = pygame.Vector2(x_pos, y_pos)\r\n self.replay_object.pos = pos\r\n\r\n # Draws the hitbox around the replay cars\r\n self.replay_object.update()\r\n # pygame.draw .rect(GAME_DISPLAY, BLACK,\r\n # self.replay_object.rect, 2)\r\n self.replay_sprites_group.add(self.replay_object)\r\n\r\n if self.replay_object.isCollided(self.car):\r\n # Screen Shake animation to show collision\r\n self.screen_shake = 10\r\n self.collision()\r\n\r\n # Blits the Car to the screen\r\n rotated_image = pygame.transform.rotate(\r\n self.replay_object.image, angle)\r\n GAME_DISPLAY.blit(rotated_image, pos -\r\n (rotated_image.get_width() / 2, rotated_image.get_height() / 2))\r\n # breaks out before it can blit more than 1 car in the replay\r\n break\r\n\r\n else:\r\n # Starts with the first dictionary element in the snapshot list and iterates through everything\r\n for i in range(len(self.saved_replay)):\r\n for j in range(len(self.saved_replay[i])):\r\n\r\n x_pos = self.saved_replay[i][-j]['position'][0]\r\n y_pos = self.saved_replay[i][-j]['position'][1]\r\n angle = self.saved_replay[i][-j]['angle']\r\n\r\n # Synchronises the current time with the replay time to display the replay in real time wuth a varience of ~0.04\r\n\r\n if self.saved_replay[i][j]['time'] > self.timer and self.saved_replay[i][j]['time'] < self.timer+0.04:\r\n pos = pygame.Vector2(x_pos, y_pos)\r\n self.replay_object.pos = pos\r\n\r\n # Draws the hitbox around the replay cars\r\n self.replay_object.update()\r\n # pygame.draw .rect(GAME_DISPLAY, BLACK,\r\n # self.replay_object.rect, 2)\r\n self.replay_sprites_group.add(self.replay_object)\r\n\r\n if self.replay_object.isCollided(self.car):\r\n # Screen Shake animation to show collision\r\n self.screen_shake = 10\r\n self.collision()\r\n\r\n # Blits the Car to the screen\r\n rotated_image = pygame.transform.rotate(\r\n self.replay_object.image, angle)\r\n GAME_DISPLAY.blit(rotated_image, pos -\r\n (rotated_image.get_width() / 2, rotated_image.get_height() / 2))\r\n # breaks out before it can blit more than 1 car in the replay\r\n break\r\n\r\n def wallTeleport(self):\r\n # Places the user on the opposite side of the map when leaving to give them more options for routes\r\n\r\n # car_hitbox = (x, y, width, height)\r\n # Rectangular Based Collisions will be used for the cars as it resembles the shape nicely and is much\r\n # faster than mask based collision (~ 112% Faster!) which adds up as more cars are added to the screen\r\n car_hitbox = self.car.rect\r\n # pygame.draw.rect(GAME_DISPLAY, BLACK, car_hitbox, 2)\r\n if car_hitbox[0] > DISPLAY_WIDTH:\r\n self.car.pos.x = 0\r\n if car_hitbox[0] + car_hitbox[2] < 0:\r\n self.car.pos.x = DISPLAY_WIDTH\r\n if car_hitbox[1] > DISPLAY_HEIGHT:\r\n self.car.pos.y = 0\r\n if car_hitbox[1] + car_hitbox[3] < 0:\r\n self.car.pos.y = DISPLAY_HEIGHT\r\n\r\n def powerUp(self, loc, powerup_choice):\r\n\r\n # Draws rectangle around every locations\r\n # for i in range(len(power_up_recs)):\r\n # pygame.draw.rect(GAME_DISPLAY, RED, power_up_recs[i], 2)\r\n # Drwas rectangle around icon hitbox slo\r\n # pygame.draw.rect(GAME_DISPLAY, RED, power_up_recs[loc], 2)\r\n\r\n # [0] = heart\r\n # [1] = speedup\r\n # [2] = slowdown\r\n # [3] = shield\r\n # [4] = reverse\r\n # [5] = route\r\n\r\n # Only Blits the powerup if it hasn't been collected\r\n if not self.powerup_collided:\r\n GAME_DISPLAY.blit(\r\n list_of_powerups[powerup_choice], (power_up_recs[loc][0], power_up_recs[loc][1]))\r\n\r\n # Handles each power up after collision\r\n if self.car.rect.colliderect(power_up_recs[loc]):\r\n\r\n # Heart\r\n if powerup_choice == 0:\r\n self.powerup_collided = True\r\n self.health_collected = True\r\n\r\n # Speedup\r\n elif powerup_choice == 1:\r\n self.powerup_collided = True\r\n self.speedeup_collected = True\r\n\r\n # Slowdown\r\n elif powerup_choice == 2:\r\n self.powerup_collided = True\r\n self.slowdown_collected = True\r\n\r\n # Shield\r\n elif powerup_choice == 3:\r\n self.powerup_collided = True\r\n self.shield_collected = True\r\n\r\n # Reverse\r\n elif powerup_choice == 4:\r\n self.powerup_collided = True\r\n self.reverse_collected = True\r\n\r\n # Route\r\n elif powerup_choice == 5:\r\n self.powerup_collided = True\r\n self.route_collected = True\r\n\r\n def healthPowerUp(self):\r\n self.player_health = 200\r\n print(\"0\")\r\n\r\n def speedupPowerUp(self):\r\n\r\n if self.powerup_timer < 3.6:\r\n displayMessage(\r\n f\"Time: {int(self.powerup_timer)}\", YELLOW, 35, (200, 10))\r\n self.car.max_vel = 25\r\n self.car.vel.x = 25\r\n\r\n dt = self.dt / 10\r\n self.powerup_timer -= dt\r\n\r\n if self.powerup_timer <= 0:\r\n if self.car_choice == 1:\r\n self.car.vel.x = 18\r\n self.car.max_vel = 18\r\n self.speedup_activated = False\r\n elif self.car_choice == 2:\r\n self.car.vel.x = 14\r\n self.car.max_vel = 14\r\n self.speedup_activated = False\r\n\r\n def slowdownPowerUp(self):\r\n if self.powerup_timer < 3.6:\r\n displayMessage(\r\n f\"Time: {int(self.powerup_timer)}\", YELLOW, 35, (200, 10))\r\n self.car.max_vel = 10\r\n self.car.vel.x = 10\r\n\r\n dt = self.dt / 10\r\n self.powerup_timer -= dt\r\n\r\n if self.powerup_timer <= 0:\r\n if self.car_choice == 1:\r\n self.car.vel.x = 18\r\n self.car.max_vel = 18\r\n self.slowdown_activated = False\r\n elif self.car_choice == 2:\r\n self.car.vel.x = 14\r\n self.car.max_vel = 14\r\n self.slowdown_activated = False\r\n\r\n def shieldPowerUp(self):\r\n\r\n if self.powerup_timer < 3.6:\r\n displayMessage(\r\n f\"Time: {int(self.powerup_timer)}\", YELLOW, 35, (200, 10))\r\n\r\n GAME_DISPLAY.blit(\r\n shield_image, (self.car.rect.x-20, self.car.rect.y-20))\r\n\r\n dt = self.dt / 10\r\n self.powerup_timer -= dt\r\n\r\n if self.powerup_timer <= 0:\r\n self.shield_activated = False\r\n\r\n def reversePowerUp(self):\r\n\r\n if self.powerup_timer < 3.6:\r\n displayMessage(\r\n f\"Time: {int(self.powerup_timer)}\", YELLOW, 35, (200, 10))\r\n\r\n self.replay_reverse = True\r\n\r\n dt = self.dt / 10\r\n self.powerup_timer -= dt\r\n\r\n if self.powerup_timer <= 0:\r\n self.replay_reverse = False\r\n self.reverse_activated = False\r\n\r\n def routePowerUp(self):\r\n\r\n if self.powerup_timer < 3.6:\r\n displayMessage(\r\n f\"Time: {int(self.powerup_timer)}\", YELLOW, 35, (200, 10))\r\n\r\n for i in range(len(self.saved_replay)):\r\n for j in range(len(self.saved_replay[i])):\r\n\r\n x_pos = self.saved_replay[i][j]['position'][0]\r\n y_pos = self.saved_replay[i][j]['position'][1]\r\n angle = self.saved_replay[i][j]['angle']\r\n\r\n pos = pygame.Vector2(x_pos, y_pos)\r\n\r\n rotated_image = pygame.transform.rotate(\r\n show_route_image, angle)\r\n GAME_DISPLAY.blit(rotated_image, pos -\r\n (rotated_image.get_width() / 2, rotated_image.get_height() / 2))\r\n\r\n dt = self.dt / 10\r\n self.powerup_timer -= dt\r\n\r\n if self.powerup_timer <= 0:\r\n self.route_actived = False\r\n\r\n def saveScore(self):\r\n # Saves the score/highscore to be read by game over menu\r\n\r\n # Save current score\r\n with open(path_2+'/currentscore.txt', 'w') as f:\r\n f.write(str(self.turn_count))\r\n\r\n # Checks to update highscore if needed\r\n try:\r\n with open(path_2+'/highscore.txt', 'r') as f:\r\n high_score = f.read()\r\n if self.turn_count > int(high_score):\r\n with open(path_2+'/highscore.txt', 'w') as e:\r\n e.write(str(self.turn_count))\r\n except ValueError:\r\n pass\r\n\r\n def collision(self):\r\n # Plays Shake Animation after Collision unless shield powerup is activated\r\n\r\n if self.shield_activated:\r\n # Ignore collisions if the shield is activated\r\n pass\r\n else:\r\n if self.screen_shake > 0:\r\n self.scroll[0] += randint(0, 8) - 4\r\n self.scroll[1] += randint(0, 8) - 4\r\n crash_fx.play()\r\n self.createMap(self.scroll[0], self.scroll[1])\r\n self.createTimer()\r\n\r\n # Reduces Health\r\n if self.car_choice == 1:\r\n self.car.health -= 30\r\n self.player_health = self.car.health\r\n elif self.car_choice == 2:\r\n self.car.health -= 20\r\n self.player_health = self.car.health\r\n\r\n self.screen_shake -= 1\r\n\r\n def drawHealthBar(self):\r\n\r\n # Health Bar\r\n GAME_DISPLAY.blit(middle_rectangle_image, (0, 0))\r\n GAME_DISPLAY.blit(red_healthbar_image, (0, 0))\r\n for health in range(self.player_health):\r\n GAME_DISPLAY.blit(green_health_image, (health+459, 11))\r\n\r\n def showPowerUp(self):\r\n\r\n GAME_DISPLAY.blit(powerup_rectangle_image, (-70, 600))\r\n if self.health_collected:\r\n GAME_DISPLAY.blit(heart_powerup_image, (7, 620))\r\n elif self.speedeup_collected:\r\n GAME_DISPLAY.blit(speedup_powerup_image, (7, 620))\r\n elif self.slowdown_collected:\r\n GAME_DISPLAY.blit(slowdown_powerup_image, (7, 620))\r\n elif self.shield_collected:\r\n GAME_DISPLAY.blit(shield_powerup_image, (7, 620))\r\n elif self.reverse_collected:\r\n GAME_DISPLAY.blit(reverse_replay_powerup_image, (7, 620))\r\n elif self.route_collected:\r\n GAME_DISPLAY.blit(show_route_powerup_image, (7, 620))\r\n\r\n # def playMusic(self):\r\n\r\n # pygame.mixer.music.play(-1) # Plays BG song on repeat\r\n\r\n def events(self):\r\n # Handles quit event\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.display.quit()\r\n pygame.quit()\r\n\r\n key = pygame.key.get_pressed()\r\n if key[pygame.K_ESCAPE]:\r\n # Makes them red to be easily seen over the pause menu screen\r\n displayMessage(f\"Turn: {self.turn_count}\", RED, 35, (24, 10))\r\n displayMessage(f\"Time: {int(self.timer)}\", RED, 35, (1100, 10))\r\n pauseMenu()\r\n self.update() # Stops the car from having a mind of it's own due to things breaking when the clock is not being ticked\r\n\r\n #Car Control ------------------------------------------------------------------#\r\n # Encapsulates all the presses in a KEYDOWN event to\r\n # prevent game stutters not registering the key press and assigning a value of 0 by mistake\r\n if event.type == pygame.KEYDOWN:\r\n if key[pygame.K_a] or key[pygame.K_LEFT]:\r\n self.car.turning += 20 # Positive Rotates Anti Clockwise\r\n if key[pygame.K_d] or key[pygame.K_RIGHT]:\r\n self.car.turning -= 20 # Negative angles rotate Clockwise\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_a or event.key == pygame.K_LEFT:\r\n self.car.turning = 0\r\n\r\n elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:\r\n self.car.turning = 0\r\n\r\n def update(self):\r\n pygame.display.update()\r\n self.clock.tick(self.FPS)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main_object = Main()\r\n\r\n pygame.display.quit()\r\n pygame.quit()\r\n","sub_path":"DoNotCrash_v5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":46877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"136274957","text":"from flask import Blueprint, render_template, redirect, url_for, flash\n\nfrom .github import require_github_login, github\n\n\nmyrepos = Blueprint(\"myrepos\", __name__)\n\n\n@myrepos.route(\"/\", defaults={\"page\": 1})\n@myrepos.route(\"/page/\")\n@require_github_login\ndef index(page):\n\n resp = github.get(\"/user/repos?page={}\".format(page))\n\n return render_template(\"index.html\",\n repos=resp.json())\n\n\n\n\n","sub_path":"github_api/views/myrepos.py","file_name":"myrepos.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"72821498","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys, os\n\ninf = sys.argv[1]\ninFile = open(inf,'r')\ny = [float(line.split('\\t')[1]) for line in inFile]\ny = y - np.min(y[0])\n\ny = y%1\n\na = np.polyfit(np.linspace(0,255,256),y,2)\n\nz = [a[0]*x**2+a[1]*x+a[0] for x in range(255)]\n\n#z = [a[0]*x+a[1] for x in range(256)]\n\nplt.scatter([x for x in range(len(y))], y)\nplt.plot(z)\nplt.show()\n","sub_path":"calibration/code/fitter.py","file_name":"fitter.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"559361679","text":"# this module will be for processing and comparing the images of each frame\n# Steps: Need to figure out an arbitrary threshold value formula for each video and stream\n# that gets processed in order to accurately compare and filter images based on each specific video\nimport shutil\nimport os\nimport cv2\nimport numpy as np\nfrom skimage.metrics import structural_similarity as ssim\nimport imutils\n\n\ndef isolateSheetMusic(frame):\n # uses rectangular border detection for sheet music\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # blurred = cv2.GaussianBlur(image, (5, 5), 0)\n # thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.threshold(image, 60, 255, cv2.THRESH_BINARY)[1]\n\n contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n contours = imutils.grab_contours(contours)\n\n for c in contours:\n approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c, True), True)\n cv2.drawContours(image, [approx], 0, (0, 255, 0), 2)\n x = approx.ravel()[0]\n y = approx.ravel()[1] - 5\n if len(approx) == 3:\n cv2.putText(image, \"Triangle\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n elif len(approx) == 4:\n x1, y1, w, h = cv2.boundingRect(approx)\n aspect_ratio = float(w) / float(h)\n print(aspect_ratio)\n if 0.95 <= aspect_ratio <= 1.05:\n cv2.putText(image, \"Square\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n\n cv2.imshow(\"img\", image)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n\ndef compareFrames(frame1, frame2):\n m = mse(frame1, frame2)\n s = ssim(frame1, frame2, multichannel=True)\n print(\"m: \" + str(m))\n print(\"s: \" + str(s))\n # need to include a threshold value in order to properly filter images\n\n\ndef mse(imageA, imageB):\n # Mean Squared Error - the lower the error, the more similar the images are\n err = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\n err /= float(imageA.shape[0] * imageA.shape[1])\n return err\n\n\ndef createSheetMusic():\n print(\"Create Sheet Music\")\n\n\ndef destroyStoredFrames():\n print(\"Destroying stored frames now that PDF has been created.\")\n shutil.rmtree('data')\n","sub_path":"process_images.py","file_name":"process_images.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"509984460","text":"from django import forms\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy\n\nfrom .models import AdjudicatorFeedback, AdjudicatorFeedbackQuestion\nfrom tournaments.models import Round\nfrom participants.models import Adjudicator, Team\nfrom adjallocation.models import DebateAdjudicator\nfrom draw.models import Debate, DebateTeam\nfrom utils.forms import OptionalChoiceField\n\n# General, but only used here\n\nclass IntegerRadioFieldRenderer(forms.widgets.RadioFieldRenderer):\n \"\"\"Used by IntegerRadioSelect.\"\"\"\n outer_html = '{content}'\n inner_html = '{choice_value}{sub_widgets}'\n\nclass IntegerRadioSelect(forms.RadioSelect):\n renderer = IntegerRadioFieldRenderer\n\nclass IntegerScaleField(forms.IntegerField):\n \"\"\"Class to do integer scale fields.\"\"\"\n widget = IntegerRadioSelect\n\n def __init__(self, *args, **kwargs):\n super(IntegerScaleField, self).__init__(*args, **kwargs)\n self.widget.choices = tuple((i, str(i)) for i in range(self.min_value, self.max_value+1))\n\nclass BlankUnknownBooleanSelect(forms.NullBooleanSelect):\n \"\"\"Uses '--------' instead of 'Unknown' for the None choice.\"\"\"\n\n def __init__(self, attrs=None):\n choices = (('1', ugettext_lazy('--------')),\n ('2', ugettext_lazy('Yes')),\n ('3', ugettext_lazy('No')))\n # skip the NullBooleanSelect constructor\n super(forms.NullBooleanSelect, self).__init__(attrs, choices)\n\nclass BooleanSelectField(forms.NullBooleanField):\n \"\"\"Widget to do boolean select fields following our conventions.\n Specifically, if 'required', checks that an option was chosen.\"\"\"\n widget = BlankUnknownBooleanSelect\n def clean(self, value):\n value = super(BooleanSelectField, self).clean(value)\n if self.required and value is None:\n raise forms.ValidationError(_(\"This field is required.\"))\n return value\n\nclass RequiredTypedChoiceField(forms.TypedChoiceField):\n def clean(self, value):\n value = super(RequiredTypedChoiceField, self).clean(value)\n if value == \"None\":\n raise forms.ValidationError(_(\"This field is required.\"))\n return value\n\n# Feedback Fields\n\nclass AdjudicatorFeedbackCheckboxFieldRenderer(forms.widgets.CheckboxFieldRenderer):\n \"\"\"Used by AdjudicatorFeedbackCheckboxSelectMultiple.\"\"\"\n outer_html = '{content}'\n inner_html = '
{choice_value}{sub_widgets}
'\n\nclass AdjudicatorFeedbackCheckboxSelectMultiple(forms.CheckboxSelectMultiple):\n renderer = AdjudicatorFeedbackCheckboxFieldRenderer\n\nclass AdjudicatorFeedbackCheckboxSelectMultipleField(forms.MultipleChoiceField):\n \"\"\"Class to do multiple choice fields following our conventions.\n Specifically, converts to a string rather than a list.\"\"\"\n widget = AdjudicatorFeedbackCheckboxSelectMultiple\n\n def clean(self, value):\n value = super(AdjudicatorFeedbackCheckboxSelectMultipleField, self).clean(value)\n return AdjudicatorFeedbackQuestion.CHOICE_SEPARATOR.join(value)\n\n# Feedback Forms\n\nclass BaseFeedbackForm(forms.Form):\n \"\"\"Base class for all dynamically-created feedback forms. Contains all\n question fields.\"\"\"\n\n # parameters set at \"compile time\" by subclasses\n tournament = None # must be set by subclasses\n _use_tournament_password = False\n _confirm_on_submit = False\n _enforce_required = True\n question_filter = dict()\n\n def __init__(self, *args, **kwargs):\n super(BaseFeedbackForm, self).__init__(*args, **kwargs)\n self._create_fields()\n\n def _make_question_field(self, question):\n if question.answer_type == question.ANSWER_TYPE_BOOLEAN_SELECT:\n field = BooleanSelectField()\n elif question.answer_type == question.ANSWER_TYPE_BOOLEAN_CHECKBOX:\n field = forms.BooleanField()\n elif question.answer_type == question.ANSWER_TYPE_INTEGER_TEXTBOX:\n min_value = int(question.min_value) if question.min_value else None\n max_value = int(question.max_value) if question.max_value else None\n field = forms.IntegerField(min_value=min_value, max_value=max_value)\n elif question.answer_type == question.ANSWER_TYPE_INTEGER_SCALE:\n min_value = int(question.min_value) if question.min_value is not None else None\n max_value = int(question.max_value) if question.max_value is not None else None\n if min_value is None or max_value is None:\n logger.error(\"Integer scale %r has no min_value or no max_value\" % question.reference)\n field = forms.IntegerField()\n else:\n field = IntegerScaleField(min_value=min_value, max_value=max_value)\n elif question.answer_type == question.ANSWER_TYPE_FLOAT:\n field = forms.FloatField(min_value=question.min_value,\n max_value=question.max_value)\n elif question.answer_type == question.ANSWER_TYPE_TEXT:\n field = forms.CharField()\n elif question.answer_type == question.ANSWER_TYPE_LONGTEXT:\n field = forms.CharField(widget=forms.Textarea)\n elif question.answer_type == question.ANSWER_TYPE_SINGLE_SELECT:\n field = OptionalChoiceField(choices=question.choices_for_field)\n elif question.answer_type == question.ANSWER_TYPE_MULTIPLE_SELECT:\n field = AdjudicatorFeedbackCheckboxSelectMultipleField(choices=question.choices_for_field)\n field.label = question.text\n field.required = self._enforce_required and question.required\n return field\n\n def _create_fields(self):\n \"\"\"Creates dynamic fields in the form.\"\"\"\n # Feedback questions defined for the tournament\n adj_min_score = self.tournament.pref('adj_min_score')\n adj_max_score = self.tournament.pref('adj_max_score')\n score_label = mark_safe(\"Overall score
(%s=lowest, %s=highest)\" % (adj_min_score, adj_max_score))\n self.fields['score'] = forms.FloatField(min_value=adj_min_score, max_value=adj_max_score, label=score_label)\n\n for question in self.tournament.adj_feedback_questions.filter(**self.question_filter):\n self.fields[question.reference] = self._make_question_field(question)\n\n # Tournament password field, if applicable\n if self._use_tournament_password and self.tournament.pref('public_use_password'):\n self.fields['password'] = TournamentPasswordField(tournament=self.tournament)\n\n def save_adjudicatorfeedback(self, **kwargs):\n \"\"\"Saves the question fields and returns the AdjudicatorFeedback.\n To be called by save() of child classes.\"\"\"\n af = AdjudicatorFeedback(**kwargs)\n\n if self._confirm_on_submit:\n self.discard_all_existing(adjudicator=kwargs['adjudicator'],\n source_adjudicator=kwargs['source_adjudicator'],\n source_team=kwargs['source_team'])\n af.confirmed = True\n\n af.score = self.cleaned_data['score']\n af.save()\n\n for question in self.tournament.adj_feedback_questions.filter(**self.question_filter):\n if self.cleaned_data[question.reference] is not None:\n answer = question.answer_type_class(feedback=af, question=question,\n answer=self.cleaned_data[question.reference])\n answer.save()\n\n return af\n\n def discard_all_existing(self, **kwargs):\n for fb in AdjudicatorFeedback.objects.filter(**kwargs):\n fb.discarded = True\n fb.save()\n\ndef make_feedback_form_class(source, *args, **kwargs):\n \"\"\"Constructs a FeedbackForm class specific to the given source.\n 'source' is the Adjudicator or Team who is giving feedback.\n 'submission_fields' is a dict of fields that is passed directly as keyword\n arguments to Submission.\n 'confirm_on_submit' is a bool, and indicates that this feedback should be\n as confirmed and all others discarded.\"\"\"\n if isinstance(source, Adjudicator):\n return make_feedback_form_class_for_adj(source, *args, **kwargs)\n elif isinstance(source, Team):\n return make_feedback_form_class_for_team(source, *args, **kwargs)\n else:\n raise TypeError('source must be Adjudicator or Team: %r' % source)\n\ndef make_feedback_form_class_for_adj(source, submission_fields, confirm_on_submit=False,\n enforce_required=True, include_unreleased_draws=False):\n \"\"\"Constructs a FeedbackForm class specific to the given source adjudicator.\n Parameters are as for make_feedback_form_class.\"\"\"\n\n def adj_choice(da):\n return (da.id, '%s (%s, %s)' % (da.adjudicator.name,\n da.debate.round.name, da.get_type_display()))\n def coerce_da(value):\n return DebateAdjudicator.objects.get(id=int(value))\n\n debate_filter = {'debateadjudicator__adjudicator': source}\n if not source.tournament.pref('panellist_feedback_enabled'):\n debate_filter['debateadjudicator__type'] = DebateAdjudicator.TYPE_CHAIR # include only debates for which this adj was the chair\n if include_unreleased_draws:\n debate_filter['round__draw_status__in'] = [Round.STATUS_CONFIRMED, Round.STATUS_RELEASED]\n else:\n debate_filter['round__draw_status'] = Round.STATUS_RELEASED\n debates = Debate.objects.filter(**debate_filter)\n\n choices = [(None, '-- Adjudicators --')]\n # for an adjudicator, find every adjudicator on their panel except them\n choices.extend(adj_choice(da) for da in DebateAdjudicator.objects.filter(\n debate__in=debates).exclude(\n adjudicator=source).select_related(\n 'debate').order_by(\n '-debate__round__seq'))\n\n class FeedbackForm(BaseFeedbackForm):\n tournament = source.tournament # BaseFeedbackForm setting\n _use_tournament_password = True # BaseFeedbackForm setting\n _confirm_on_submit = confirm_on_submit\n _enforce_required = enforce_required\n question_filter = dict(chair_on_panellist=True)\n\n debate_adjudicator = RequiredTypedChoiceField(choices=choices, coerce=coerce_da)\n\n def save(self):\n \"\"\"Saves the form and returns the AdjudicatorFeedback object.\"\"\"\n da = self.cleaned_data['debate_adjudicator']\n sa = DebateAdjudicator.objects.get(adjudicator=source, debate=da.debate)\n kwargs = dict(adjudicator=da.adjudicator, source_adjudicator=sa, source_team=None)\n kwargs.update(submission_fields)\n return self.save_adjudicatorfeedback(**kwargs)\n\n return FeedbackForm\n\ndef make_feedback_form_class_for_team(source, submission_fields, confirm_on_submit=False,\n enforce_required=True, include_unreleased_draws=False):\n \"\"\"Constructs a FeedbackForm class specific to the given source team.\n Parameters are as for make_feedback_form_class.\"\"\"\n\n # Only include non-silent rounds for teams.\n debate_filter = {\n 'debateteam__team': source,\n 'round__silent': False,\n }\n if include_unreleased_draws:\n debate_filter['round__draw_status__in'] = [Round.STATUS_CONFIRMED, Round.STATUS_RELEASED]\n else:\n debate_filter['round__draw_status'] = Round.STATUS_RELEASED\n debates = Debate.objects.filter(**debate_filter).order_by('-round__seq')\n\n choices = [(None, '-- Adjudicators --')]\n for debate in debates:\n try:\n chair = DebateAdjudicator.objects.get(debate=debate, type=DebateAdjudicator.TYPE_CHAIR)\n except DebateAdjudicator.DoesNotExist:\n continue\n panel = DebateAdjudicator.objects.filter(debate=debate, type=DebateAdjudicator.TYPE_PANEL)\n if panel.exists():\n choices.append((chair.id, '{name} ({r} - chair gave oral)'.format(\n name=chair.adjudicator.name, r=debate.round.name)))\n for da in panel:\n choices.append((da.id, '{name} ({r} - chair rolled, this panellist gave oral)'.format(\n name=da.adjudicator.name, r=debate.round.name)))\n else:\n choices.append((chair.id, '{name} ({r})'.format(\n name=chair.adjudicator.name, r=debate.round.name)))\n\n def coerce_da(value):\n return DebateAdjudicator.objects.get(id=int(value))\n\n class FeedbackForm(BaseFeedbackForm):\n tournament = source.tournament # BaseFeedbackForm setting\n _use_tournament_password = True # BaseFeedbackForm setting\n _confirm_on_submit = confirm_on_submit\n _enforce_required = enforce_required\n question_filter = dict(team_on_orallist=True)\n\n debate_adjudicator = RequiredTypedChoiceField(choices=choices, coerce=coerce_da)\n\n def save(self):\n # Saves the form and returns the m.AdjudicatorFeedback object\n da = self.cleaned_data['debate_adjudicator']\n st = DebateTeam.objects.get(team=source, debate=da.debate)\n kwargs = dict(adjudicator=da.adjudicator, source_adjudicator=None, source_team=st)\n kwargs.update(submission_fields)\n return self.save_adjudicatorfeedback(**kwargs)\n\n return FeedbackForm\n\n","sub_path":"adjfeedback/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":13271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"394833690","text":"# write a code to validate -string starting and ending with a uppercase letter\n# -except special characters -minimum length 5 -maximum length 10\n\nimport re\nstr=input(\"Enter a string :\")\nx='^[A-Z]\\w[A-Z]{5,10}'\nmatch=re.fullmatch(x,str)\nif match is not None:\n print(\"Valid\")\nelse:\n print(\"invalid\")","sub_path":"Exam2/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"91876909","text":"def solution(N, stages):\n answer = []\n length = len(stages)\n result = {}\n\n for stage in range(1, N + 1):\n if length != 0:\n fail = stages.count(stage)\n result[stage] = fail / length\n length -= fail\n else:\n result[stage] = 0\n\n answer = sorted(result, key = lambda x: result[x], reverse = True)\n \n return answer","sub_path":"JE/7주차/FailRate.py","file_name":"FailRate.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"29817886","text":"VERSION = '0.2'\nIFACE_NAME = 'org.exaile.DBusInterface'\nIFACE_PATH = '/DBusInterfaceObject'\nimport CurrentSong\nclass Exaile( CurrentSong.DbusBase ):\n '''Exaile Interface'''\n def __init__( self ):\n CurrentSong.DbusBase.__init__( self, IFACE_NAME, self.setInterface )\n try: self.iface\n except: self.iface = None\n def setInterface( self ):\n proxy_obj = self.bus.get_object( IFACE_NAME, IFACE_PATH )\n self.iface = self.module.Interface( proxy_obj, IFACE_NAME )\n def getCoverPath( self ):\n if self.iface:\n return self.iface.get_cover_path()\n else:\n return None\n def setCurrentSongData( self ):\n if self.iface:\n self.artist = self.iface.get_artist()\n self.title = self.iface.get_title()\n self.album = self.iface.get_album()\n def getVersion( self ):\n try:\n self.iface.get_version()\n except:\n return False\n return True\n def isPlaying( self ):\n if not self.getVersion():\n return False\n if self.iface.get_artist() != None and \\\n self.iface.get_title() != None and \\\n self.iface.get_album() != None:\n return True\n return False\n def check( self ):\n if not self.iface or not self.isNameActive(IFACE_NAME):\n return\n if self.artist != self.iface.get_artist() or \\\n self.title != self.iface.get_title() or \\\n self.album != self.iface.get_album():\n self.setCurrentSongData()\n return True\n return False\n","sub_path":"emesene/rev1286-1505/right-branch-1505/plugins_base/currentSong/Exaile.py","file_name":"Exaile.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"366793711","text":"def decode_II(bits: str) -> int:\n def recurse(bits, k, mem):\n if k == 0:\n return 1\n\n s = len(bits) - k\n if bits[s] == '0':\n return 0\n\n if mem[k] is not None:\n return mem[k]\n\n ret = recurse(bits, k - 1, mem)\n if k >= 2 and int(bits[s:s + 2]) < 27:\n ret += recurse(bits, k - 2, mem)\n\n mem[k] = ret\n return ret\n\n mem = [None] * (len(bits) + 1)\n return recurse(bits, len(bits), mem)\n\nif __name__ == \"__main__\":\n bits = \"1*\"\n print(decode_II(bits))","sub_path":"decode_II.py","file_name":"decode_II.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"32096635","text":"import resnet\nimport params\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport seaborn as sn\nfrom sklearn.metrics import confusion_matrix\nimport torchvision\nfrom torchvision import transforms\nimport numpy as np\nimport pandas as pd\nimport random\nfrom data import Dataset, Subset\nfrom copy import deepcopy\n\ndef Train (ResNet, task, train_Dataset, new_train_indexes): #this mmethod is used to add neurons to the fc layer and train\n\n ResNet.train()\n train_set=Subset(train_Dataset,new_train_indexes,transform=params.transform_train)\n train_loader = DataLoader( train_set, num_workers=params.NUM_WORKERS, batch_size=params.BATCH_SIZE, shuffle=True)\n\n if task > 0:\n #old network for distillation loss\n oldNet = deepcopy(ResNet)\n oldNet = oldNet.to(params.DEVICE)\n oldNet.train(False)\n\n #add *params.TASK_CLASSES* neurons to the fc layer:\n in_features = ResNet.fc.in_features #save n° of input features of old fc\n out_features = ResNet.fc.out_features #save n° of output features of old fc\n weight = ResNet.fc.weight.data #save weights of old fc\n\n ResNet.fc = nn.Linear(in_features, out_features + params.TASK_CLASSES) #new fc\n ResNet.fc.weight.data[:out_features] = weight #weights for previous classes are the same\n ResNet.to(params.DEVICE) #otherwise the new layer is not on device\n\n criterion=nn.BCEWithLogitsLoss(reduction=\"mean\")\n optimizer = torch.optim.SGD(ResNet.parameters(), lr=params.LR, momentum=params.MOMENTUM, weight_decay=params.WEIGHT_DECAY)\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer,params.STEP_SIZE,gamma=params.GAMMA,last_epoch=-1)\n\n\n #training loop\n for epoch in range(params.N_EPOCHS):\n correct_preds=0\n n_images=0\n for images, labels, indices in train_loader:\n images = images.float().to(params.DEVICE) ## need to be float\n labels = labels.to(params.DEVICE)\n indices = indices.to(params.DEVICE)\n Id = torch.eye(params.TASK_CLASSES*(task+1))\n onehot_labels = Id[labels].cuda() #one-hot encoding\n optimizer.zero_grad()\n output = ResNet(images,features = False)\n # classification loss\n if task==0:\n loss = criterion(output, onehot_labels)\n # classification loss and distillation lost\n if task>0:\n q = torch.zeros(50000, params.TASK_CLASSES*(task)).cuda()\n g = torch.sigmoid(oldNet(images))\n q[indices] = g.data\n q_i = q[indices]\n onehot_labels[:,:params.TASK_CLASSES*(task)] = q_i[:,:params.TASK_CLASSES*(task)]\n loss=criterion(output,onehot_labels)\n\n loss.backward()\n optimizer.step()\n\n _ , preds = output.max(1)\n correct_preds += (preds == labels).sum()\n n_images += len(images)\n accuracy = correct_preds/n_images\n scheduler.step()\n print(f\"in task {task} and epoch {epoch} the loss is {loss} and the accuracy is {accuracy}\")\n return ResNet, train_set, train_loader\n\n\n\noriginal_trainset = torchvision.datasets.CIFAR100(root= './data', train= True, transform= params.transform_train, download = True)\noriginal_testset = torchvision.datasets.CIFAR100(root = './data', train = False, transform= params.transform_test, download = True)\n\n# use our custom class for dataset\ntrain_Dataset = Dataset(original_trainset, classes_per_task= params.TASK_CLASSES,transform= params.transform_train)\ntest_Dataset = Dataset(original_testset, classes_per_task = params.TASK_CLASSES,transform= params.transform_test)\n\n\n\n#check if the splits in train and test are equal\nassert (np.array_equal(train_Dataset.splits, test_Dataset.splits)), \"The splits are different, check the code!\"\n\n# number of splits = number of tasks\nsplits =(train_Dataset.splits)\nn_tasks = splits.shape[0]\nsplits = splits.tolist()\nprint(\"Successful split. Number of tasks: \", n_tasks)\n\ntrain_indexes = []\ntest_indexes = []\n\nResNet=resnet.resnet32(num_classes=params.TASK_CLASSES)\nResNet.to(params.DEVICE)\n\n\nrandom.seed(params.SEED)\nnp.random.seed(params.SEED)\ntorch.manual_seed(params.SEED)\n\n\nfor task in range(n_tasks):\n #indexes for this task\n known_classes= task*params.TASK_CLASSES\n\n #trainset and testset of the task\n train_indexes = train_Dataset.__getIndexesGroups__(task*params.TASK_CLASSES) # splits[task]\n test_indexes = test_indexes + test_Dataset.__getIndexesGroups__(task*params.TASK_CLASSES)\n\n ResNet ,train_set, train_loader = Train(ResNet, task, train_Dataset, train_indexes)\n ResNet.eval()\n\n#results on training set\n all_preds = []\n all_labels = []\n n_images = 0\n correct_preds=0\n for images, labels, _ in train_loader:\n images = images.float().to(params.DEVICE) ## need to be float\n labels = labels.to(params.DEVICE)\n output = ResNet(images,features = False)\n _ , preds = output.max(1)\n correct_preds += (preds == labels).sum()\n n_images += len(images)\n all_preds = np.concatenate((all_preds,preds.cpu()))\n all_labels = np.concatenate((all_labels,labels.cpu()))\n\n\n accuracy = correct_preds/n_images\n print(f\"accuracy on training set: {accuracy}\")\n\n #results on test set\n test_set = Subset(test_Dataset,test_indexes,transform=params.transform_test)\n test_loader = DataLoader( test_set, num_workers=params.NUM_WORKERS, batch_size=params.BATCH_SIZE, shuffle=True)\n all_preds = []\n all_labels = []\n n_images = 0\n correct_preds=0\n for images, labels, _ in test_loader:\n images = images.float().to(params.DEVICE) ## need to be float\n labels = labels.to(params.DEVICE)\n output = ResNet(images,features = False)\n _ , preds = output.max(1)\n n_images += len(images)\n correct_preds += (preds == labels).sum()\n all_preds = np.concatenate((all_preds,preds.cpu()))\n all_labels = np.concatenate((all_labels,labels.cpu()))\n\n accuracy = correct_preds/n_images\n print(f\"accuracy on test set: {accuracy}\")\n\n\n #confusion matrix\n cm = confusion_matrix(all_labels,all_preds)\n df_cm = pd.DataFrame(cm, range((task+1)*params.TASK_CLASSES), range((task+1)*params.TASK_CLASSES))\n plt.figure(figsize = (10,7))\n sn.heatmap(df_cm, annot=False, cmap=\"viridis\")\n plt.savefig(f\"{task}_cf\")\n ResNet = ResNet.train(True)\n","sub_path":"LWF/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"391368108","text":"#퀵 정렬 알고리즘 구현\nfrom typing import MutableSequence\n\ndef qsort(a: MutableSequence, left: int, right: int) -> None:\n pl = left #왼쪽 커서\n pr = right #오른쪽 커서\n x = a[(left + right) // 2] #피벗(가운데 원소)\n\n while pl <= pr:\n while a[pl] < x: pl += 1\n while a[pr] > x : pr -= 1\n if pl <= pr:\n a[pl], a[pr] = a[pr], a[pl]\n pl += 1\n pr -= 1\n if left < pr: qsort(a, left, pr)\n if right > pl: qsort(a, pl, right)\n\ndef quick_sort(a: MutableSequence) -> None:\n qsort(a, 0, len(a) -1)\n\nif __name__ == '__main__' :\n print('퀵 정렬 수행')\n num = int(input('원소 개수 입력 : '))\n x = [None] * num\n\n for i in range(num):\n x[i] = int(input(f'x[{i}] : '))\n\n quick_sort(x)\n\n print('오름차순 정렬 완료')\n for i in range(num):\n print(f'x[{i}] = {x[i]}')\n","sub_path":"6.정렬 알고리즘/quick_sort1.py","file_name":"quick_sort1.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"526817863","text":"import random\n\ndef big_endian(data, start, end):\n i = start\n j = 2*i+1\n tmp = data[i]\n while j <= end:\n if j+1 <= end and data[j+1] > data[j]:\n j += 1\n if data[j] > tmp:\n data[i] = data[j]\n i = j\n j = 2*i+1\n else:\n break\n data[i] = tmp\n\n\ndef heap_sort(data):\n n = len(data)\n for x in range(n//2-1, -1, -1):\n big_endian(data, x, n-1)\n for x in range(n-1, -1, -1):\n data[0], data[x] = data[x], data[0]\n big_endian(data, 0, x-1)\n\n\ndata_list = list(range(10))\nrandom.shuffle(data_list)\nprint(data_list)\nheap_sort(data_list)\nprint(data_list)","sub_path":"堆排.py","file_name":"堆排.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"527050512","text":"import numpy as np\nfrom math import exp #to calculate the transformed feature data\n\n#CSState is an object which stores as much information as possible about the current state. \n#All the variables are initialized to None for memory sake when conducting MCTS search, as only action_indices\n#is needed when conducting MCTS search\n\nclass State():\n def __init__(self, actions_indicator, col_indices = [], identifier = None): #default of col_indices is None if not specified\n #action_indicator initalizes variables which are used for MCTS search. All other variables are\n #initialized when Neural Network computes features\n self.action_indices = actions_indicator #numpy array of 0 and 1's indicating available actions. \n self.identifier = identifier #identifies which MCTS tree state belongs to.\n self.keyRep = None #Assuming game.keyRepresentation is called, the key is saved here. Should be an integer. \n self.col_indices = col_indices\n #feature dictionary\n self.feature_dic = {} #contains all relevant feature data used in NN. Use methods below to compute these\n #compute terminal reward, 0 if not a terminal state.\n self.termreward = None\n #labels(if there are any)\n pi_as_label = np.zeros(actions_indicator.size)\n #pi_as_label[-1] = 1\n self.pi_as = pi_as_label\n self.z = None #The computed label for state\n #NN_input format for prediction(dont need labels for states we wish to predict)\n self.nn_input = None\n self.inverse = None #stores (A^T * A)^-1 for this state wrt to columns chosen. We can compute this from previous state\n self.ATy = None #stores A^T * b. We can compute this from previous state\n \n def computecolStats(self): #O(n) operation, where n is the length of the list\n S = []\n for i in range(self.action_indices.size-1):\n if self.action_indices[i] == 1:\n S.append(i)\n \n self.col_indices = S\n \n def compute_x_S_and_res(self, args, Game_args): #compute feature vectors for feeding into NN. Labels are returned by computeTermReward. \n if self.col_indices: #If self.col_indices is not an empty list(meaning that we are not at the start state in which we have not chosen any columns)\n #FEATURE 1:\n if args['x_l2'] == True:\n x_S = np.matmul(self.inverse, self.ATy)\n x_S = x_S.flatten() #flatten x_S shape from [|S|, 1) to (|S|,) for computations below\n\n opt_sol_l2 = np.zeros(args['n'])\n i = 0\n for k in self.col_indices:\n opt_sol_l2[k] = x_S[i]\n i += 1\n \n self.feature_dic['x_l2']=opt_sol_l2\n\n \n #FEATURE 2:\n if args['lambda'] == True: \n residual = Game_args.obs_vector - np.matmul(Game_args.sensing_matrix[:, self.col_indices], x_S)\n \n col_res_IP = np.matmul(Game_args.sensing_matrix.transpose(), residual)\n self.feature_dic['col_res_IP'] = col_res_IP\n\n else: #If column indices is empty, this means we have not chosen any columns, so current l2 solution is 0, and col_res_IP is A^T*y\n if args['x_l2'] == True:\n self.feature_dic['x_l2'] = np.zeros(args['n'])\n if args['lambda'] == True:\n self.feature_dic['col_res_IP'] = np.matmul(Game_args.sensing_matrix.transpose(), Game_args.obs_vector)\n \n def computeTermReward(self, args, Game_args): \n #determine whether terminal state conditions are met. If any of the terminal state conditions are met, return terminal value, which is negative\n #See Game.getGameEnded. \n #1)Game.getGameEnded() is called in MCTS.search to verify if a state/node we are currently at in MCTS search is a terminal state or not.\n #2)Game.getGameEnded() is also called in Coach.executeEpisode, when self play is being conducted. For each game state we enter in self-play, we call Game.getGameEnded() to verify if we are at a terminal state or not.\n # If we are at a terminal state, then we stop and convert all the states w'eve visited to training samples with the labels being the terminal rewards. \n #3)If self.termreward = 0, then the state is NOT a terminal state. Only nonzero self.termrewards should be labels for training the neural network. \n if self.col_indices: #If self.col_indices is not an empty list\n S = self.col_indices #note that when we compute the termreward for initial state, THIS WILL RETURN AN ERROR because col_indices of initial state is [] \n A_S = Game_args.sensing_matrix[:,S]\n x_S = np.matmul(self.inverse, self.ATy)\n #Note that product.shape = (7, 1)\n product = np.matmul(A_S, x_S)\n product = product.flatten()\n #Note that product.shape = (7, ) and Game_args.obs_vector.shape = (7, )\n residual = Game_args.obs_vector - product\n res_norm_squared = np.linalg.norm(residual)**2\n \n #if terminal state, compute the reward.\n if len(self.col_indices) == Game_args.game_iter or self.action_indices[-1] == 1 or res_norm_squared < args['epsilon']: #Game_args.game_iter is set every time we call Game_args.generateNewObsVec\n self.termreward = - args['alpha']*len(self.col_indices) - args['gamma']*res_norm_squared\n \n #ow, reward is 0 if state is not a terminal state\n else:\n self.termreward = 0 #not terminal state\n \n elif self.action_indices[-1] == 1: #If self.col_indices is an empty list, but stopping action was taken, then reward is exactly equal to the negative of squared norm of y * gamma\n self.termreward = -args['gamma']*np.linalg.norm(Game_args.obs_vector)**2\n \n else:#If self.col_indices is an empty list, and self.action_indices[-1] != 1, then this implies we are at initial state. Set self.termreward to zero.\n self.termreward = 0\n \n \n def converttoNNInput(self): \n #convert data in features dictionary into format recognizable by NN for prediction. This method is used in MCTS search method, where we output the p_as and z for searching for the next node to go to and backpropagating the reward. \n #features_dic MUST ALREADY BE COMPUTED \n NN_input_X = []\n for key in self.feature_dic:\n feature_data = self.feature_dic[key]\n feature_data = np.reshape(feature_data, (1, feature_data.size)) #reshape to (1, feature_data.size) for single predictions. Must be of this form for model.predict\n NN_input_X.append(feature_data)\n \n self.nn_input = NN_input_X\n \n\n \n \n\n \n \n \n ","sub_path":"prev_versions/batch+matrix_inversion/alphazero_compressedsensing_nonoise_hierarchical_v2/compressed_sensing/CSState.py","file_name":"CSState.py","file_ext":"py","file_size_in_byte":6889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"328934309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 19 21:21:05 2019\n\n@author: 10502\n\"\"\"\nclass Polynomial:\n def __init__(self,dic):\n self.dic=dic\n def __call__(self,x):\n c=0\n for k in self.dic:\n c+=self.dic[k]*x**(k)\n return c\n def __add__(self,another):\n result={}\n for k in self.dic:\n result[k]=self.dic[k]\n for j in another.dic:\n if k==j:\n result[k]=self.dic[k]+another.dic[j]\n else:\n result[j]=another.dic[j]\n return Polynomial(result)\n\nx=1 \na=Polynomial({1:1,100:-3})(x)\nb=Polynomial({20:1,1:-1,100:4})(x)\nprint(a+b)\n \n\n","sub_path":"p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"99980079","text":"\nimport numpy as np\n\n\n# -----------------------------------\ndef mesh(filename = \"data/MeshRegion.exp\"):\n \"\"\"\n Read in the triangular mesh from a text file.\n\n Returns:\n =======\n x, y: coordinates of the mesh vertices\n ele: 3-column array of indices of the vertices of each triangle\n \"\"\"\n\n with open(filename, 'r') as fid:\n num_triangles, num_nodes, _, _ = map(int, fid.readline().split())\n\n x = np.zeros(num_nodes, dtype = np.float64)\n y = np.zeros(num_nodes, dtype = np.float64)\n ele = np.zeros((num_triangles, 3), dtype = np.int32)\n bnd = np.zeros(num_nodes, dtype = np.int32)\n\n fid.readline()\n\n for i in range(num_nodes):\n line = fid.readline().split()\n x[i] = float(line[2])\n y[i] = float(line[3])\n bnd[i] = int(line[4])\n\n for n in range(num_triangles):\n line = fid.readline().split()\n ele[n, :] = map(lambda s: int(s) - 1, line[2:5])\n\n return x, y, ele, bnd\n\n\n# ----------------------------------\ndef data(filename = \"data/RegionVis.dat\"):\n \"\"\"\n Read in the observed/modelled data from a text file.\n\n Returns:\n =======\n u, v: measured ice surface velocity (m/a)\n s, b: measured ice surface/bed elevation (m/a)\n tau_b: modelled ice basal shear stress (Pa)\n C: modelled ice friction coefficient (Pa * a/m)\n \"\"\"\n\n x, y, _, _ = mesh()\n num_nodes = len(x)\n\n u = np.zeros(num_nodes, dtype = np.float64)\n v = np.zeros(num_nodes, dtype = np.float64)\n s = np.zeros(num_nodes, dtype = np.float64)\n b = np.zeros(num_nodes, dtype = np.float64)\n tau_b = np.zeros(num_nodes, dtype = np.float64)\n C = np.zeros(num_nodes, dtype = np.float64)\n\n with open(filename, 'r') as fid:\n fid.readline()\n\n for i in range(num_nodes):\n l = map(float, fid.readline().split())\n u[i], v[i] = l[2:4]\n s[i], b[i] = l[6:8]\n tau_b[i] = l[9]\n C[i] = l[10]\n\n return u, v, s, b, tau_b, C\n","sub_path":"data/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"522042889","text":"import os\nimport sys\n\nOOI_ARRAYS = {'CP' : 'Coastal_Pioneer',\n 'CE' : 'Coastal_Endurance',\n 'GI' : 'Global_Irminger_Sea',\n 'GS' : 'Global_Southern_Ocean',\n 'GA' : 'Global_Argentine_Basin',\n 'RS' : 'Cabled_Array',\n 'GP' : 'Global_Station_Papa'}\n \ndef build_nc_dest(instrument, telemetry, stream, deployment_number):\n \n # Create the directory\n # Map the first 2 characters of the subsite to OOI_ARRAYS\n if instrument[:2] not in OOI_ARRAYS.keys():\n sys.stderr.write('No Array name found for instrument: {:s}\\n'.format(instrument))\n return None\n \n i_tokens = instrument.split('-')\n if len(i_tokens) != 4:\n sys.stderr.write('Invalid instrument reference designator: {:s}\\n'.format(instrument))\n return None\n \n return os.path.join(OOI_ARRAYS[instrument[:2]],\n i_tokens[0],\n i_tokens[1],\n '{:s}-{:s}'.format(i_tokens[2], i_tokens[3]),\n stream,\n telemetry,\n 'deployment{:04.0f}'.format(deployment_number))\n","sub_path":"asynclib/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"379187037","text":"# by default, serial name should be /dev/ttyACM0 unless you put \"dummy\" for fake arduino input\nserial_name = \"/dev/ttyACM0\"\n\n#the number of arduino sensors and pins and raspberry pi sensors and pins\nard_sensors = 'ir'\n \n#not using sensors yet, but will in the near future\nrasp_sensors = [\n ]\n\n######################################################################## \n#action of peripherals\nprogram = \"on_off\"\n \n\n#need to put raspberry pi pins here for these as the 3rd argument if needed. preface with \"dummy_\" for setting up a dummy version of peripheral device, for pin use GPIO pin, not the regular raspberry pi board pin\nperiphs =[['dummy_led_matrix'],\n ['dummy_feeder', 'a', 21],\n ['dummy_feeder', 'b', 20]\n ]\n\ntraining_switch_seconds = [30, 300] # min time, max time\ntesting_duration_secs = 120 # test for ____ seconds\ntesting_how_often = [900, 1800] # [test every min secs, max secs]\n\n#for start and end times use minute of day. 6 am would be 360 for instance\nstart_time = 360 \nend_time = 1080 \n#########################################################################\n\n#the experiment name used for data file name\nexp_name = \"test_experiment\"\n\n#prefix of file name ex. save_file_name + \"_date.txt\"\nsave_file_name = \"day_1\"\n\n#data to tag\nsave_model = [\n \"program\",\n \"ard_sensor\",\n \"datetime\",\n \"correct?\",\n \"training?\"\n ]\n\n","sub_path":"experiments/3_exp.py","file_name":"3_exp.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"469660413","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\nclass Order(models.Model):\n PERSONS = (\n (\"1\", 1),\n (\"2\", 2),\n (\"3\", 3),\n (\"4\", 4),\n (\"5\", 5),\n (\"6\", 6)\n )\n\n reservator = models.ForeignKey(\n User,\n on_delete=models.CASCADE\n )\n\n phone = models.IntegerField(\n verbose_name='Номер Телефона'\n )\n\n date = models.DateField(\n verbose_name='Дата Бронирования'\n )\n\n time = models.TimeField(\n verbose_name='Время Бронирования'\n )\n\n persons = models.CharField(\n verbose_name='Количество человек',\n choices=PERSONS,\n max_length=5,\n default=PERSONS[0][0]\n )\n\n message = models.TextField(\n verbose_name='Комментарий',\n )\n\n date_created = models.DateTimeField(\n verbose_name='Дата создания Бронирования',\n default=timezone.now\n )\n\n def __str__(self):\n return f'{self.date} {self.time} - {self.reservator}'","sub_path":"Resto-master/reservation/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"440733727","text":"import numpy as np \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndf=pd.read_csv('data.csv')\r\n# df=df.fillna(np.NaN)\r\n#membuat plot antara Age vs Overall\r\nxAge=df['Age'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]\r\nyOverall=df['Overall'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]\r\nyPotential = df['Potential'][(df['Age']<=25)&(df['Overall']>=80)&(df['Potential']>=80)]\r\nindexX=xAge.index.tolist()\r\nxout=df['Age'].loc[~df.index.isin(indexX)]\r\nyOvout=df['Overall'].loc[~df.index.isin(indexX)]\r\nyPotout=df['Potential'].loc[~df.index.isin(indexX)]\r\nplt.figure()\r\nplt.subplot(121)\r\nplt.scatter(xAge,yOverall,label='Target',color='g')\r\nplt.scatter(xout,yOvout,label='Non-Target',color='r')\r\nplt.title('Age vs Overall')\r\nplt.xlabel('Age')\r\nplt.ylabel('Overall')\r\nplt.legend()\r\nplt.grid(True)\r\n\r\nplt.subplot(122)\r\nplt.scatter(xAge,yPotential,label='Target',color='g')\r\nplt.scatter(xout,yPotout,label='Non-Target',color='r')\r\nplt.title('Age vs Potential')\r\nplt.xlabel('Age')\r\nplt.ylabel('Potential')\r\nplt.legend()\r\nplt.grid(True)\r\nplt.show()","sub_path":"Ujian_Pemain_Muda_Berbakat/soal2_1.py","file_name":"soal2_1.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"115692599","text":"# 10/04/2018\n# Identify how many numbers, letters, or special character in a string\n\nstring = input(\"Enter a word: \")\n\n# create a list that will contain the appropriate elements later\nnumber_list = []\nletter_list = []\ncharacter_list = []\n\nfor index in range(len(string)):\n element = string[index] # element (number, letter, character) on the word\n\n if element.isdigit(): # if the element in word is number then put that element to the number_list\n number_list.append(element)\n elif element.isalpha(): # if the element in word is letter then put that element to the letter_list\n letter_list.append(element)\n else: # if the element in word is not a number or a letter, automatically it is a special character\n character_list.append(element)\n\n# get the length (how many elements in the list) of every list then print\nprint(f\"Number Counts: {len(number_list)}\")\nprint(f\"Letter Counts: {len(letter_list)}\")\nprint(f\"Special Character Counts: {len(character_list)}\")\n\n","sub_path":"NumberLetterCharacterCounts.py","file_name":"NumberLetterCharacterCounts.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"59344167","text":"\"\"\"\nFile : MSTransferor.py\nAuthor : Valentin Kuznetsov \n Alan Malta \nDescription: MSTransferor class provides the whole logic for\ncentral production workflow's input data placement.\n\nThis is NOT a thread-safe module, even though some internal\ntasks might be extended to multi-threading in the future.\n\"\"\"\n# futures\nfrom __future__ import division, print_function\nfrom future.utils import viewitems, listvalues, listitems\nfrom future import standard_library\nstandard_library.install_aliases()\n\n# system modules\nfrom operator import itemgetter\nfrom pprint import pformat\nfrom retry import retry\nfrom random import randint, choice\nfrom copy import deepcopy\n\n# WMCore modules\nfrom Utils.IteratorTools import grouper\nfrom WMCore.MicroService.DataStructs.DefaultStructs import TRANSFEROR_REPORT,\\\n TRANSFER_RECORD, TRANSFER_COUCH_DOC\nfrom WMCore.MicroService.Tools.Common import gigaBytes, teraBytes\nfrom WMCore.MicroService.MSCore import MSCore\nfrom WMCore.MicroService.MSTransferor.RequestInfo import RequestInfo\nfrom WMCore.MicroService.MSTransferor.RSEQuotas import RSEQuotas\nfrom WMCore.Services.CRIC.CRIC import CRIC\nfrom WMCore.Services.AlertManager.AlertManagerAPI import AlertManagerAPI\n\ndef newTransferRec(dataIn):\n \"\"\"\n Create a basic transfer record to be appended to a transfer document\n :param dataIn: dictionary with information relevant to this transfer doc\n :return: a transfer record dictionary\n \"\"\"\n record = deepcopy(TRANSFER_RECORD)\n record[\"dataset\"] = dataIn['name']\n record[\"dataType\"] = dataIn['type']\n record[\"campaignName\"] = dataIn['campaign']\n return record\n\n\ndef newTransferDoc(reqName, transferRecords):\n \"\"\"\n Create a transfer document which is meant to be created in\n central CouchDB\n :param reqName: string with the workflow name\n :param transferRecords: list of dictionaries with transfer records\n :return: a transfer document dictionary\n \"\"\"\n doc = dict(TRANSFER_COUCH_DOC)\n doc[\"workflowName\"] = reqName\n doc[\"transfers\"] = transferRecords\n return doc\n\nclass MSTransferor(MSCore):\n \"\"\"\n MSTransferor class provide whole logic behind\n the transferor module.\n \"\"\"\n\n def __init__(self, msConfig, logger=None):\n \"\"\"\n Runs the basic setup and initialization for the MS Transferor module\n :param microConfig: microservice configuration\n \"\"\"\n super(MSTransferor, self).__init__(msConfig, logger=logger)\n\n # minimum percentage completion for dataset/blocks subscribed\n self.msConfig.setdefault(\"minPercentCompletion\", 99)\n # minimum available storage to consider a resource good for receiving data\n self.msConfig.setdefault(\"minimumThreshold\", 1 * (1000 ** 4)) # 1TB\n # limit MSTransferor to this amount of requests per cycle\n self.msConfig.setdefault(\"limitRequestsPerCycle\", 500)\n # Send warning messages for any data transfer above this threshold.\n # Set to negative to ignore.\n self.msConfig.setdefault(\"warningTransferThreshold\", 100. * (1000 ** 4)) # 100TB\n # weight expression for the input replication rules\n self.msConfig.setdefault(\"rucioRuleWeight\", 'ddm_quota')\n\n quotaAccount = self.msConfig[\"rucioAccount\"]\n\n self.rseQuotas = RSEQuotas(quotaAccount, self.msConfig[\"quotaUsage\"],\n minimumThreshold=self.msConfig[\"minimumThreshold\"],\n verbose=self.msConfig['verbose'], logger=logger)\n self.reqInfo = RequestInfo(self.msConfig, self.rucio, self.logger)\n\n self.cric = CRIC(logger=self.logger)\n self.inputMap = {\"InputDataset\": \"primary\",\n \"MCPileup\": \"secondary\",\n \"DataPileup\": \"secondary\"}\n self.uConfig = {}\n self.campaigns = {}\n self.psn2pnnMap = {}\n self.pnn2psnMap = {}\n self.dsetCounter = 0\n self.blockCounter = 0\n # service name used to route alerts via AlertManager\n self.alertServiceName = \"ms-transferor\"\n self.alertManagerUrl = self.msConfig.get(\"alertManagerUrl\", None)\n self.alertManagerApi = AlertManagerAPI(self.alertManagerUrl, logger=logger)\n\n @retry(tries=3, delay=2, jitter=2)\n def updateCaches(self):\n \"\"\"\n Fetch some data required for the transferor logic, e.g.:\n * account limits from Rucio\n * account usage from Rucio\n * unified configuration\n * all campaign configuration\n * PSN to PNN map from CRIC\n \"\"\"\n self.logger.info(\"Updating RSE/PNN quota and usage\")\n self.rseQuotas.fetchStorageQuota(self.rucio)\n self.rseQuotas.fetchStorageUsage(self.rucio)\n self.rseQuotas.evaluateQuotaExceeded()\n if not self.rseQuotas.getNodeUsage():\n raise RuntimeWarning(\"Failed to fetch storage usage stats\")\n\n self.logger.info(\"Updating all local caches...\")\n self.dsetCounter = 0\n self.blockCounter = 0\n self.uConfig = self.unifiedConfig()\n campaigns = self.reqmgrAux.getCampaignConfig(\"ALL_DOCS\")\n self.psn2pnnMap = self.cric.PSNtoPNNMap()\n self.pnn2psnMap = self.cric.PNNtoPSNMap()\n if not self.uConfig:\n raise RuntimeWarning(\"Failed to fetch the unified configuration\")\n elif not campaigns:\n raise RuntimeWarning(\"Failed to fetch the campaign configurations\")\n elif not self.psn2pnnMap:\n raise RuntimeWarning(\"Failed to fetch PSN x PNN map from CRIC\")\n else:\n # let's make campaign look-up easier and more efficient\n self.campaigns = {}\n for camp in campaigns:\n self.campaigns[camp['CampaignName']] = camp\n self.rseQuotas.printQuotaSummary()\n\n def execute(self, reqStatus):\n \"\"\"\n Executes the whole transferor logic\n :param reqStatus: request status to process\n :return:\n \"\"\"\n counterWorkflows = 0\n counterFailedRequests = 0\n counterProblematicRequests = 0\n counterSuccessRequests = 0\n summary = dict(TRANSFEROR_REPORT)\n try:\n requestRecords = self.getRequestRecords(reqStatus)\n self.updateReportDict(summary, \"total_num_requests\", len(requestRecords))\n msg = \" retrieved %s requests. \" % len(requestRecords)\n msg += \"Service set to process up to %s requests per cycle.\" % self.msConfig[\"limitRequestsPerCycle\"]\n self.logger.info(msg)\n except Exception as err: # general error\n requestRecords = []\n msg = \"Unknown exception while fetching requests from ReqMgr2. Error: %s\", str(err)\n self.logger.exception(msg)\n self.updateReportDict(summary, \"error\", msg)\n\n try:\n self.updateCaches()\n self.updateReportDict(summary, \"total_num_campaigns\", len(self.campaigns))\n self.updateReportDict(summary, \"nodes_out_of_space\", list(self.rseQuotas.getOutOfSpaceRSEs()))\n except RuntimeWarning as ex:\n msg = \"All retries exhausted! Last error was: '%s'\" % str(ex)\n msg += \"\\nRetrying to update caches again in the next cycle.\"\n self.logger.error(msg)\n self.updateReportDict(summary, \"error\", msg)\n return summary\n except Exception as ex:\n msg = \"Unknown exception updating caches. Error: %s\" % str(ex)\n self.logger.exception(msg)\n self.updateReportDict(summary, \"error\", msg)\n return summary\n\n # process all requests\n for reqSlice in grouper(requestRecords, 100):\n self.logger.info(\"Processing workflows from %d to %d.\",\n counterWorkflows + 1, counterWorkflows + len(reqSlice))\n # get complete requests information\n # based on Unified Transferor logic\n reqResults = self.reqInfo(reqSlice)\n self.logger.info(\"%d requests information completely processed.\", len(reqResults))\n\n for wflow in reqResults:\n if not self.verifyCampaignExist(wflow):\n counterProblematicRequests += 1\n continue\n\n # first, check whether any pileup dataset is already in place\n self.checkPUDataLocation(wflow)\n if wflow.getSecondarySummary() and not wflow.getPURSElist():\n # then we still have pileup to be transferred, but with incorrect locations\n self.alertPUMisconfig(wflow.getName())\n # FIXME: this needs to be logged somewhere and workflow be set to failed\n counterProblematicRequests += 1\n continue\n\n # now check where input primary and parent blocks will need to go\n self.checkDataLocation(wflow)\n\n try:\n success, transfers = self.makeTransferRequest(wflow)\n except Exception as ex:\n success = False\n self.alertUnknownTransferError(wflow.getName())\n msg = \"Unknown exception while making transfer request for %s \" % wflow.getName()\n msg = \"\\tError: %s\" % str(ex)\n self.logger.exception(msg)\n if success:\n self.logger.info(\"Transfer requests successful for %s. Summary: %s\",\n wflow.getName(), pformat(transfers)) # then create a document in ReqMgr Aux DB\n if self.createTransferDoc(wflow.getName(), transfers):\n self.logger.info(\"Transfer document successfully created in CouchDB for: %s\", wflow.getName())\n # then move this request to staging status\n self.change(wflow.getName(), 'staging', self.__class__.__name__)\n counterSuccessRequests += 1\n else:\n counterFailedRequests += 1\n self.alertTransferCouchDBError(wflow.getName())\n else:\n counterFailedRequests += 1\n # it can go slightly beyond the limit. It's evaluated for every slice\n if counterSuccessRequests >= self.msConfig[\"limitRequestsPerCycle\"]:\n msg = \"Transferor succeeded acting on %d workflows in this cycle. \" % counterSuccessRequests\n msg += \"Which exceeds the configuration limit set to: %s\" % self.msConfig[\"limitRequestsPerCycle\"]\n self.logger.info(msg)\n break\n counterWorkflows += len(reqSlice)\n\n self.logger.info(\"Summary for this cycle is:\")\n self.logger.info(\" * there were %d problematic requests;\", counterProblematicRequests)\n self.logger.info(\" * there were %d failed requests;\", counterFailedRequests)\n self.logger.info(\" * there were %d successful requests;\", counterSuccessRequests)\n self.logger.info(\" * a total of %d datasets were subscribed;\", self.dsetCounter)\n self.logger.info(\" * a total of %d blocks were subscribed.\", self.blockCounter)\n self.updateReportDict(summary, \"success_request_transition\", counterSuccessRequests)\n self.updateReportDict(summary, \"failed_request_transition\", counterFailedRequests)\n self.updateReportDict(summary, \"problematic_requests\", counterProblematicRequests)\n self.updateReportDict(summary, \"num_datasets_subscribed\", self.dsetCounter)\n self.updateReportDict(summary, \"num_blocks_subscribed\", self.blockCounter)\n self.updateReportDict(summary, \"nodes_out_of_space\", list(self.rseQuotas.getOutOfSpaceRSEs()))\n return summary\n\n def getRequestRecords(self, reqStatus):\n \"\"\"\n Queries ReqMgr2 for requests in a given status, sort them by priority\n and return a subset of each request with important information for the\n data placement algorithm.\n \"\"\"\n self.logger.info(\"Fetching requests in status: %s\", reqStatus)\n # get requests from ReqMgr2 data-service for given status\n reqData = self.reqmgr2.getRequestByStatus([reqStatus], detail=True)\n\n # we need to first put these requests in order of priority, as done for GQ...\n orderedRequests = []\n for requests in reqData:\n orderedRequests = listvalues(requests)\n orderedRequests.sort(key=itemgetter('RequestPriority'), reverse=True)\n\n return orderedRequests\n\n def verifyCampaignExist(self, wflow):\n \"\"\"\n Check whether the campaigns associated to all the input datasets\n exist in the database.\n :param wflow: a workflow object\n :return: True if campaigns exist, False otherwise\n \"\"\"\n for dataIn in wflow.getDataCampaignMap():\n if dataIn['campaign'] not in self.campaigns:\n msg = \"Workflow: %s has to transfer dataset: %s under the campaign: %s. \"\n msg += \"This campaign does not exist and needs to be created. Skipping this workflow!\"\n self.logger.warning(msg, wflow.getName(), dataIn['name'], dataIn['campaign'])\n return False\n return True\n\n def checkDataLocation(self, wflow):\n \"\"\"\n Check which data is already in place (according to the site lists)\n and remove them from the data placement to be performed next.\n If workflow has XRootD/AAA enabled, data location can be outside of the\n SiteWhitelist.\n :param wflow: workflow object\n \"\"\"\n if not wflow.getInputDataset():\n return\n\n wflowPnns = self._getPNNsFromPSNs(wflow.getSitelist())\n primaryAAA = wflow.getReqParam(\"TrustSitelists\")\n msg = \"Checking data location for request: %s, TrustSitelists: %s, request white/black list PNNs: %s\"\n self.logger.info(msg, wflow.getName(), primaryAAA, wflowPnns)\n\n if not wflow.getPileupDatasets():\n # perfect, it does not depend on pileup location then\n pass\n elif primaryAAA:\n # perfect, data can be anywhere\n pass\n elif wflow.getSecondarySummary() and wflow.getPURSElist():\n # still pileup datasets to be transferred\n wflowPnns = wflow.getPURSElist()\n self.logger.info(\"using: %s for primary/parent/pileup data placement\", wflowPnns)\n finalPNN = self._checkPrimaryDataVolume(wflow, wflowPnns)\n self.logger.info(\"Forcing all primary/parent data to be placed under: %s\", finalPNN)\n wflow.setPURSElist(finalPNN)\n wflowPnns = finalPNN\n elif wflow.getPURSElist():\n # all pileup datasets are already in place\n wflowPnns = wflow.getPURSElist()\n self.logger.info(\"using: %s for primary/parent data placement\", wflowPnns)\n finalPNN = self._checkPrimaryDataVolume(wflow, wflowPnns)\n self.logger.info(\"Forcing all primary/parent data to be placed under: %s\", finalPNN)\n wflow.setPURSElist(finalPNN)\n wflowPnns = finalPNN\n else:\n self.logger.error(\"Unexpected condition for request: %s ...\", wflow.getName())\n\n for methodName in (\"getPrimaryBlocks\", \"getParentBlocks\"):\n inputBlocks = getattr(wflow, methodName)()\n self.logger.info(\"Request %s has %d initial blocks from %s\",\n wflow.getName(), len(inputBlocks), methodName)\n\n for block, blockDict in listitems(inputBlocks): # dict can change size here\n blockLocation = self._diskPNNs(blockDict['locations'])\n if primaryAAA and blockLocation:\n msg = \"Primary/parent block %s already in place (via AAA): %s\" % (block, blockLocation)\n self.logger.info(msg)\n inputBlocks.pop(block)\n elif blockLocation:\n commonLocation = wflowPnns & set(blockLocation)\n if commonLocation:\n self.logger.info(\"Primary/parent block %s already in place: %s\", block, commonLocation)\n inputBlocks.pop(block)\n else:\n self.logger.info(\"block: %s will need data placement!!!\", block)\n else:\n self.logger.info(\"Primary/parent block %s not available in any disk storage\", block)\n\n self.logger.info(\"Request %s has %d final blocks from %s\",\n wflow.getName(), len(getattr(wflow, methodName)()), methodName)\n\n def _checkPrimaryDataVolume(self, wflow, wflowPnns):\n \"\"\"\n Calculate the total data volume already available in the\n restricted list of PNNs, such that we can minimize primary/\n parent data transfers\n :param wflow: a workflow object\n :param wflowPnns: set with the allowed PNNs to receive data\n :return: the PNN which contains most of the data already in\n \"\"\"\n msg = \"Checking primary data volume for: %s, allowed PNNs: %s\"\n self.logger.info(msg, wflow.getName(), wflowPnns)\n\n volumeByPNN = dict()\n for pnn in wflowPnns:\n volumeByPNN.setdefault(pnn, 0)\n\n for methodName in (\"getPrimaryBlocks\", \"getParentBlocks\"):\n inputBlocks = getattr(wflow, methodName)()\n self.logger.info(\"Request %s has %d initial blocks from %s\",\n wflow.getName(), len(inputBlocks), methodName)\n\n for block, blockDict in viewitems(inputBlocks):\n blockLocation = self._diskPNNs(blockDict['locations'])\n commonLocation = wflowPnns & set(blockLocation)\n if not commonLocation:\n continue\n for pnn in commonLocation:\n volumeByPNN[pnn] += blockDict['blockSize']\n\n maxSize = 0\n finalPNN = set()\n self.logger.info(\"Primary/parent data volume currently available:\")\n for pnn, size in viewitems(volumeByPNN):\n self.logger.info(\" PNN: %s\\t\\tData volume: %s GB\", pnn, gigaBytes(size))\n if size > maxSize:\n maxSize = size\n finalPNN = {pnn}\n elif size == maxSize:\n finalPNN.add(pnn)\n self.logger.info(\"The PNN that would require less data to be transferred is: %s\", finalPNN)\n if len(finalPNN) > 1:\n # magically picks one site from the list. It could pick the one with highest\n # available quota, but that might overload that one site...\n # make sure it's a set object\n finalPNN = choice(list(finalPNN))\n finalPNN = {finalPNN}\n self.logger.info(\"Randomly picked PNN: %s as final location\", finalPNN)\n\n return finalPNN\n\n def checkPUDataLocation(self, wflow):\n \"\"\"\n Check the workflow configuration - in terms of AAA - and the secondary\n pileup distribution; and if possible remove the pileup dataset from the\n next step where data is placed.\n If workflow has XRootD/AAA enabled, data location can be outside of the\n SiteWhitelist.\n :param wflow: workflow object\n \"\"\"\n pileupInput = wflow.getSecondarySummary()\n if not pileupInput:\n # nothing to be done here\n return\n\n wflowPnns = self._getPNNsFromPSNs(wflow.getSitelist())\n secondaryAAA = wflow.getReqParam(\"TrustPUSitelists\")\n msg = \"Checking secondary data location for request: {}, \".format(wflow.getName())\n msg += \"TrustPUSitelists: {}, request white/black list PNNs: {}\".format(secondaryAAA, wflowPnns)\n self.logger.info(msg)\n\n if secondaryAAA:\n # what matters is to have pileup dataset(s) available in ANY disk storage\n for dset, dsetDict in listitems(pileupInput): # dict can change size here\n datasetLocation = self._diskPNNs(dsetDict['locations'])\n msg = \"it has secondary: %s, total size: %s GB, disk locations: %s\"\n self.logger.info(msg, dset, gigaBytes(dsetDict['dsetSize']), datasetLocation)\n if datasetLocation:\n self.logger.info(\"secondary dataset %s already in place through AAA: %s\",\n dset, datasetLocation)\n pileupInput.pop(dset)\n else:\n self.logger.info(\"secondary dataset %s not available even through AAA\", dset)\n else:\n if len(pileupInput) == 1:\n for dset, dsetDict in listitems(pileupInput): # dict can change size here\n datasetLocation = self._diskPNNs(dsetDict['locations'])\n msg = \"it has secondary: %s, total size: %s GB, current disk locations: %s\"\n self.logger.info(msg, dset, gigaBytes(dsetDict['dsetSize']), datasetLocation)\n commonLocation = wflowPnns & set(datasetLocation)\n if commonLocation:\n msg = \"secondary dataset: %s already in place. \"\n msg += \"Common locations with site white/black list is: %s\"\n self.logger.info(msg, dset, commonLocation)\n pileupInput.pop(dset)\n wflow.setPURSElist(commonLocation)\n else:\n self.logger.info(\"secondary: %s will need data placement!!!\", dset)\n elif len(pileupInput) >= 2:\n # then make sure multiple pileup datasets are available at the same location\n # Note: avoid transferring the biggest one\n largestSize = 0\n largestDset = \"\"\n for dset, dsetDict in viewitems(pileupInput):\n if dsetDict['dsetSize'] > largestSize:\n largestSize = dsetDict['dsetSize']\n largestDset = dset\n datasetLocation = self._diskPNNs(pileupInput[largestDset]['locations'])\n msg = \"it has multiple pileup datasets, the largest one is: %s,\"\n msg += \"total size: %s GB, current disk locations: %s\"\n self.logger.info(msg, largestDset, gigaBytes(largestSize), datasetLocation)\n commonLocation = wflowPnns & set(datasetLocation)\n if commonLocation:\n self.logger.info(\"Largest secondary dataset %s already in place: %s\",\n largestDset, datasetLocation)\n pileupInput.pop(largestDset)\n wflow.setPURSElist(commonLocation)\n else:\n self.logger.info(\"Largest secondary dataset %s not available in a common location. This is BAD!\")\n # now iterate normally through the pileup datasets\n for dset, dsetDict in listitems(pileupInput): # dict can change size here\n datasetLocation = self._diskPNNs(dsetDict['locations'])\n msg = \"it has secondary: %s, total size: %s GB, current disk locations: %s\"\n self.logger.info(msg, dset, gigaBytes(dsetDict['dsetSize']), datasetLocation)\n commonLocation = wflowPnns & set(datasetLocation)\n if not commonLocation:\n msg = \"secondary dataset: %s not in any common location. Its current locations are: %s\"\n self.logger.info(msg, dset, datasetLocation)\n elif commonLocation and not wflow.getPURSElist():\n # then it's the first pileup dataset available within the SiteWhitelist,\n # force its common location for the workflow from now on\n msg = \"secondary dataset: %s already in place: %s, common location: %s\"\n msg += \". Forcing the whole workflow to this new common location.\"\n self.logger.info(msg, dset, datasetLocation, commonLocation)\n pileupInput.pop(dset)\n wflow.setPURSElist(commonLocation)\n else:\n # pileup RSE list has already been defined. Get the new common location\n newCommonLocation = commonLocation & wflow.getPURSElist()\n if newCommonLocation:\n msg = \"secondary dataset: %s already in place. \"\n msg += \"New common locations with site white/black list is: %s\"\n self.logger.info(msg, dset, newCommonLocation)\n pileupInput.pop(dset)\n wflow.setPURSElist(newCommonLocation)\n else:\n msg = \"secondary dataset: %s is currently available within the site white/black list: %s\"\n msg += \" But there is no common location with the other(s) pileup datasets: %s\"\n msg += \" It will need data placement!!!\"\n self.logger.info(msg, dset, commonLocation, wflow.getPURSElist())\n\n # check if there are remaining pileups to be placed\n # we need to figure out its location NOW!\n if wflow.getSecondarySummary() and not wflow.getPURSElist():\n pnns = self._findFinalPULocation(wflow)\n wflow.setPURSElist(pnns)\n\n def _findFinalPULocation(self, wflow):\n \"\"\"\n Given a workflow object, find the secondary datasets left to be\n placed and decide which destination to be used, based on the campaign\n configuration and the site with more quota available\n :param wflow: the workflow object\n :return: a string with the final pileup destination PNN\n \"\"\"\n # FIXME: workflows should be marked as failed if there is no common\n # site between SiteWhitelist and secondary location\n psns = wflow.getSitelist()\n self.logger.info(\"Finding final pileup destination for request: %s\", wflow.getName())\n\n for dataIn in wflow.getDataCampaignMap():\n if dataIn[\"type\"] == \"secondary\" and dataIn['name'] in wflow.getSecondarySummary():\n # secondary still to be transferred\n dsetName = dataIn[\"name\"]\n campConfig = self.campaigns[dataIn['campaign']]\n\n commonPsns = set()\n # if the dataset has a location list, use solely that one\n if campConfig['Secondaries'].get(dsetName, []):\n campSecPSNs = self._getPSNsFromPNNs(campConfig['Secondaries'][dsetName])\n commonPsns = set(psns) & campSecPSNs\n if not commonPsns:\n msg = \"Workflow has been incorrectly assigned: %s. The secondary dataset: %s,\"\n msg += \"belongs to the campaign: %s, with Secondaries location set to: %s. \"\n msg += \"While the workflow has been assigned to: %s\"\n self.logger.error(msg, wflow.getName(), dsetName, dataIn['campaign'],\n campSecPSNs, psns)\n else:\n if dsetName.startswith(\"/Neutrino\"):\n # different PU type use different campaign attributes...\n campSecPSNs = self._getPSNsFromPNNs(campConfig['SecondaryLocation'])\n commonPsns = set(psns) & campSecPSNs\n if not commonPsns:\n msg = \"Workflow has been incorrectly assigned: %s. The secondary dataset: %s,\"\n msg += \"belongs to the campaign: %s, with SecondaryLocation set to: %s. \"\n msg += \"While the workflow has been assigned to: %s\"\n self.logger.error(msg, wflow.getName(), dsetName, dataIn['campaign'],\n campSecPSNs, psns)\n else:\n if campConfig['SiteWhiteList']:\n commonPsns = set(psns) & set(campConfig['SiteWhiteList'])\n if campConfig['SiteBlackList']:\n commonPsns = set(psns) - set(campConfig['SiteBlackList'])\n if not commonPsns:\n msg = \"Workflow has been incorrectly assigned: %s. The secondary dataset: %s,\"\n msg += \"belongs to the campaign: %s, which does not match the campaign SiteWhiteList: %s \"\n msg += \"and SiteBlackList: %s. While the workflow has been assigned to: %s\"\n self.logger.error(msg, wflow.getName(), dsetName, dataIn['campaign'],\n campConfig['SiteWhiteList'], campConfig['SiteBlackList'], psns)\n if not commonPsns:\n # returns an empty set, which will make this workflow to be skipped for the moment\n return commonPsns\n\n pnns = self._getPNNsFromPSNs(commonPsns)\n self.logger.info(\" found a PSN list: %s, which maps to a list of PNNs: %s\", commonPsns, pnns)\n return pnns\n\n def makeTransferRequest(self, wflow):\n \"\"\"\n Checks which input data has to be transferred, select the final destination if needed,\n create the transfer record to be stored in Couch, and create the DM placement request.\n This method does the following:\n 1. return if there is no workflow data to be transferred\n 2. check if the data input campaign is in the database, skip if not\n 3. _getValidSites: using the workflow site lists and the campaign configuration,\n find a common list of sites (converted to PNNs). If the PNN is out of quota,\n it's also removed from this list\n 4. create the transfer record dictionary\n 5. for every final node\n 5.1. if it's a pileup dataset, pick a random node and subscribe the whole container\n 5.2. else, retrieve chunks of blocks to be subscribed (evenly distributed)\n 5.3. update node usage with the amount of data subscribed\n 6. re-evaluate nodes with quota exceeded\n 7. return the transfer record, with a list of transfer IDs\n :param wflow: workflow object\n :return: boolean whether it succeeded or not, and a list of transfer records\n \"\"\"\n response = []\n success = True\n if not (wflow.getParentBlocks() or wflow.getPrimaryBlocks() or wflow.getSecondarySummary()):\n self.logger.info(\"Request %s does not have any further data to transfer\", wflow.getName())\n return success, response\n\n self.logger.info(\"Handling data subscriptions for request: %s\", wflow.getName())\n\n for dataIn in wflow.getDataCampaignMap():\n if dataIn[\"type\"] == \"parent\":\n msg = \"Skipping 'parent' data subscription (done with the 'primary' data), for: %s\" % dataIn\n self.logger.info(msg)\n continue\n elif dataIn[\"type\"] == \"secondary\" and dataIn['name'] not in wflow.getSecondarySummary():\n # secondary already in place\n continue\n\n if wflow.getPURSElist() and not wflow.isRelVal():\n # then the whole workflow is very much limited to a single site\n nodes = list(wflow.getPURSElist() & self.rseQuotas.getAvailableRSEs())\n if not nodes:\n msg = \"Workflow: %s can only run in RSEs with no available space: %s. \"\n msg += \"Skipping this workflow until space gets released\"\n self.logger.warning(msg, wflow.getName(), wflow.getPURSElist())\n return False, response\n else:\n nodes = self._getValidSites(wflow, dataIn)\n if not nodes:\n msg = \"There are no RSEs with available space for %s. \" % wflow.getName()\n msg += \"Skipping this workflow until RSEs get enough free space\"\n self.logger.warning(msg)\n return False, response\n\n transRec = newTransferRec(dataIn)\n for blocks, dataSize, idx in self._decideDataDestination(wflow, dataIn, len(nodes)):\n if not blocks and dataIn[\"type\"] == \"primary\":\n # no valid files in any blocks, it will likely fail in global workqueue\n return success, response\n if blocks:\n subLevel = \"block\"\n else:\n # enforce a container-level Rucio rule\n subLevel = \"container\"\n blocks = None\n\n success, transferId = self.makeTransferRucio(wflow, dataIn, subLevel,\n blocks, dataSize, nodes, idx)\n\n if not success:\n # stop any other data placement for this workflow\n msg = \"There were failures transferring data for workflow: %s. Will retry again later.\"\n self.logger.warning(msg, wflow.getName())\n break\n if transferId:\n if isinstance(transferId, (set, list)):\n transRec['transferIDs'].update(transferId)\n else:\n transRec['transferIDs'].add(transferId)\n self.rseQuotas.updateNodeUsage(nodes[idx], dataSize)\n\n # and update some instance caches\n if subLevel == 'container':\n self.dsetCounter += 1\n else:\n self.blockCounter += len(blocks)\n\n transRec['transferIDs'] = list(transRec['transferIDs'])\n response.append(transRec)\n\n # once the workflow has been completely processed, update the node usage\n self.rseQuotas.evaluateQuotaExceeded()\n return success, response\n\n def makeTransferRucio(self, wflow, dataIn, subLevel, blocks, dataSize, nodes, nodeIdx):\n \"\"\"\n Creates a Rucio rule object and make a replication rule in Rucio\n\n :param wflow: the workflow object\n :param dataIn: short summary of the data to be placed\n :param subLevel: subscription level (container or block)\n :param blocks: list of blocks to be subscribed (or None if dataset level)\n :param dataSize: amount of data being placed by this rule\n :param nodes: list of nodes/RSE\n :param nodeIdx: index of the node/RSE to be used in the replication rule\n :return: a boolean flagging whether it succeeded or not, and the rule id\n \"\"\"\n success, transferId = True, set()\n subLevel = \"ALL\" if subLevel == \"container\" else \"DATASET\"\n dids = blocks if blocks else [dataIn['name']]\n\n ruleAttrs = {'copies': 1,\n 'activity': 'Production Input',\n 'lifetime': self.msConfig['rulesLifetime'],\n 'account': self.msConfig['rucioAccount'],\n 'grouping': subLevel,\n 'weight': self.msConfig['rucioRuleWeight'],\n 'meta': {'workflow_group': wflow.getWorkflowGroup()},\n 'comment': 'WMCore MSTransferor input data placement'}\n\n if wflow.getParentDataset():\n # then we need to make sure the child and its parent blocks end up in the same RSE\n rseExpr = nodes[nodeIdx]\n msg = \"Primary data placement with parent blocks, putting all in the same RSE: {}\".format(rseExpr)\n self.logger.info(msg)\n elif ruleAttrs['grouping'] == \"ALL\":\n # this means we are placing the whole container under the same RSE.\n # Ask Rucio which RSE we should use, provided a list of them\n rseExpr = \"|\".join(nodes)\n rseTuple = self.rucio.pickRSE(rseExpr)\n if not rseTuple:\n self.logger.error(\"PickRSE did not return any valid RSE for expression: %s\", rseExpr)\n return False, transferId\n self.logger.info(\"Placing whole container, picked RSE: %s out of an RSE list: %s\",\n rseTuple[0], rseExpr)\n rseExpr = rseTuple[0]\n else:\n # then grouping is by DATASET, and there is no parent dataset\n # we can proceed with the primary blocks data placement in all RSEs\n rseExpr = \"|\".join(nodes)\n msg = \"Primary data placement without any parent dataset, \"\n msg += \"using all RSEs for the rule creation: {}\".format(rseExpr)\n self.logger.info(msg)\n\n if self.msConfig.get('enableDataTransfer', True):\n # Force request-only subscription\n # to any data transfer going above some threshold (do not auto-approve)\n aboveWarningThreshold = self.msConfig.get('warningTransferThreshold') > 0. and \\\n dataSize > self.msConfig.get('warningTransferThreshold')\n\n # Then make the data subscription, for real!!!\n self.logger.info(\"Creating rule for workflow %s with %d DIDs in container %s, RSEs: %s, grouping: %s\",\n wflow.getName(), len(dids), dataIn['name'], rseExpr, subLevel)\n try:\n res = self.rucio.createReplicationRule(dids, rseExpr, **ruleAttrs)\n except Exception as exc:\n msg = \"Hit a bad exception while creating replication rules for DID: %s. Error: %s\"\n self.logger.error(msg, dids, str(exc))\n success = False\n else:\n if res:\n # it could be that some of the DIDs already had such a rule in\n # place, so we might be retrieving a bunch of rule ids instead of\n # a single one\n self.logger.info(\"Rules successful created for %s : %s\", dataIn['name'], res)\n transferId.update(res)\n # send an alert, if needed\n self.alertLargeInputData(aboveWarningThreshold, transferId, wflow.getName(), dataSize, dataIn)\n else:\n self.logger.error(\"Failed to create rule for %s, will retry later\", dids)\n success = False\n else:\n msg = \"DRY-RUN: making Rucio rule for workflow: %s, dids: %s, rse: %s, kwargs: %s\"\n self.logger.info(msg, wflow.getName(), dids, rseExpr, ruleAttrs)\n return success, transferId\n\n def sendAlert(self, alertName, severity, summary, description, service, endSecs=1 * 60 * 60):\n \"\"\"\n Send alert to Prometheus, wrap function in a try-except clause\n \"\"\"\n try:\n # alert to expiry in an hour from now\n self.alertManagerApi.sendAlert(alertName, severity, summary, description,\n service, endSecs=endSecs)\n except Exception as ex:\n self.logger.exception(\"Failed to send alert to %s. Error: %s\", self.alertManagerUrl, str(ex))\n\n def alertPUMisconfig(self, workflowName):\n \"\"\"\n Send alert to Prometheus with PU misconfiguration error\n \"\"\"\n alertName = \"{}: PU misconfiguration error. Workflow: {}\".format(self.alertServiceName,\n workflowName)\n alertSeverity = \"high\"\n alertSummary = \"[MSTransferor] Workflow cannot proceed due to some PU misconfiguration.\"\n alertDescription = \"Workflow: {} could not proceed due to some PU misconfiguration,\".format(workflowName)\n alertDescription += \"so it will be skipped.\"\n self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,\n self.alertServiceName)\n self.logger.critical(alertDescription)\n\n def alertUnknownTransferError(self, workflowName):\n \"\"\"\n Send alert to Prometheus with unknown transfer error\n \"\"\"\n alertName = \"{}: Transfer request error. Workflow: {}\".format(self.alertServiceName,\n workflowName)\n alertSeverity = \"high\"\n alertSummary = \"[MSTransferor] Unknown exception while making transfer request.\"\n alertDescription = \"Unknown exception while making Transfer request for workflow: {}\".format(workflowName)\n self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,\n self.alertServiceName)\n\n def alertTransferCouchDBError(self, workflowName):\n \"\"\"\n Send alert to Prometheus with CouchDB transfer error\n \"\"\"\n alertName = \"{}: Failed to create a transfer document in CouchDB for workflow: {}\".format(self.alertServiceName,\n workflowName)\n alertSeverity = \"high\"\n alertSummary = \"[MSTransferor] Transfer document could not be created in CouchDB.\"\n alertDescription = \"Workflow: {}, failed request due to error posting to CouchDB\".format(workflowName)\n self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,\n self.alertServiceName)\n self.logger.warning(alertDescription)\n\n\n def alertLargeInputData(self, aboveWarningThreshold, transferId, wflowName, dataSize, dataIn):\n \"\"\"\n Evaluates whether the amount of data placed is too big, if so, send an alert\n notification to a few persons\n :param aboveWarningThreshold: boolean flag saying if the thresholds was exceeded or not\n :param transferId: rule/transfer request id\n :param wflowName: name of the workflow\n :param dataSize: total amount of data subscribed\n :param dataIn: short summary of the workflow data\n \"\"\"\n # Warn about data transfer subscriptions going above some threshold\n if aboveWarningThreshold:\n alertName = \"{}: input data transfer over threshold: {}\".format(self.alertServiceName,\n wflowName)\n alertSeverity = \"high\"\n alertSummary = \"[MS] Large pending data transfer under request id: {}\".format(transferId)\n alertDescription = \"Workflow: {} has a large amount of \".format(wflowName)\n alertDescription += \"data subscribed: {} TB, \".format(teraBytes(dataSize))\n alertDescription += \"for {} data: {}.\"\"\".format(dataIn['type'], dataIn['name'])\n\n self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,\n self.alertServiceName)\n self.logger.warning(alertDescription)\n\n def _getValidSites(self, wflow, dataIn):\n \"\"\"\n Given a workflow object and the data short summary, find out\n the Campaign name, the workflow SiteWhitelist, map the PSNs to\n PNNs and finally remove PNNs without space\n can still receive data\n :param wflow: the workflow object\n :param dataIn: short summary of data to be transferred\n :return: a unique and ordered list of PNNs to take data\n \"\"\"\n campConfig = self.campaigns[dataIn['campaign']]\n psns = wflow.getSitelist()\n\n if dataIn[\"type\"] == \"primary\":\n if campConfig['SiteWhiteList']:\n psns = set(psns) & set(campConfig['SiteWhiteList'])\n if campConfig['SiteBlackList']:\n psns = set(psns) - set(campConfig['SiteBlackList'])\n\n self.logger.info(\" final list of PSNs to be use: %s\", psns)\n pnns = self._getPNNsFromPSNs(psns)\n\n if wflow.isRelVal():\n self.logger.info(\"RelVal workflow '%s' ignores sites out of quota\", wflow.getName())\n return list(pnns)\n\n self.logger.info(\"List of out-of-space RSEs dropped for '%s' is: %s\",\n wflow.getName(), pnns & self.rseQuotas.getOutOfSpaceRSEs())\n return list(pnns & self.rseQuotas.getAvailableRSEs())\n\n\n def _decideDataDestination(self, wflow, dataIn, numNodes):\n \"\"\"\n Given a global list of blocks and the campaign configuration,\n decide which blocks have to be transferred and to where.\n :param wflow: workflow object\n :param dataIn: dictionary with a summary of the data to be placed\n :param numNodes: amount of nodes/RSEs that can receive data\n :return: yield a block list, the total chunk size and a node index\n \"\"\"\n # FIXME: implement multiple copies (MaxCopies > 1)\n blockList = []\n dsetName = dataIn[\"name\"]\n\n ### NOTE: data placement done in a block basis\n if dataIn[\"type\"] == \"primary\":\n # Except for DQMHarvest workflows, which must have a data placement of the\n # whole dataset within the same location\n if wflow.getReqType() == \"DQMHarvest\":\n numNodes = 1\n # if there is no parent data, just make one big rule for all the primary data\n # against all RSEs available for the workflow (intersection with PU data\n if not wflow.getParentBlocks():\n numNodes = 1\n listBlockSets, listSetsSize = wflow.getChunkBlocks(numNodes)\n if not listBlockSets:\n self.logger.warning(\" found 0 primary/parent blocks for dataset: %s, moving on...\", dsetName)\n yield blockList, 0, 0\n for idx, blocksSet in enumerate(listBlockSets):\n self.logger.info(\"Have a chunk of %d blocks (%s GB) for dataset: %s\",\n len(blocksSet), gigaBytes(listSetsSize[idx]), dsetName)\n yield blocksSet, listSetsSize[idx], idx\n ### NOTE: data placement done in a dataset basis\n elif dataIn[\"type\"] == \"secondary\":\n # secondary datasets are transferred as a whole, until better days...\n dsetSize = wflow.getSecondarySummary()\n dsetSize = dsetSize[dsetName]['dsetSize']\n # randomly pick one of the PNNs to put the whole pileup dataset in\n idx = randint(0, numNodes - 1)\n self.logger.info(\"Have whole PU dataset: %s (%s GB)\", dsetName, gigaBytes(dsetSize))\n yield blockList, dsetSize, idx\n\n def createTransferDoc(self, reqName, transferRecords):\n \"\"\"\n Enrich the records returned from the data placement logic, wrap them up\n in a single document and post it to CouchDB\n :param reqName: the workflow name\n :param transferRecords: list of dictionaries records, or empty if no input at all\n :return: True if operation is successful, else False\n \"\"\"\n doc = newTransferDoc(reqName, transferRecords)\n # Use the update/put method, otherwise it will fail if the document already exists\n if self.reqmgrAux.updateTransferInfo(reqName, doc):\n return True\n self.logger.error(\"Failed to create transfer document in CouchDB. Will retry again later.\")\n return False\n\n def _getPNNsFromPSNs(self, psnList):\n \"\"\"\n Given a list/set of PSNs, return a set of valid PNNs.\n Note that T3, Tape and a few other PNNs are never returned.\n \"\"\"\n pnns = set()\n for psn in psnList:\n for pnn in self.psn2pnnMap.get(psn, []):\n if pnn == \"T2_CH_CERNBOX\" or pnn.startswith(\"T3_\"):\n pass\n elif pnn.endswith(\"_Tape\") or pnn.endswith(\"_MSS\") or pnn.endswith(\"_Export\"):\n pass\n else:\n pnns.add(pnn)\n return pnns\n\n def _getPSNsFromPNNs(self, pnnList):\n \"\"\"\n Given a list/set of PNNs, return a set of valid PSNs.\n Note that T3 sites are never returned.\n \"\"\"\n psns = set()\n for pnn in pnnList:\n for psn in self.pnn2psnMap.get(pnn, []):\n if psn.startswith(\"T3_\"):\n pass\n else:\n psns.add(psn)\n return psns\n\n def _diskPNNs(self, pnnList):\n \"\"\"\n Provided a list of PNN locations, return another list of\n PNNs without mass storage and T3 sites\n :param pnnList: list of PNN strings\n :return: a set of strings with filtered out PNNs\n \"\"\"\n diskPNNs = set()\n for pnn in pnnList:\n if pnn == \"T2_CH_CERNBOX\" or pnn.startswith(\"T3_\"):\n pass\n elif pnn.endswith(\"_Tape\") or pnn.endswith(\"_MSS\") or pnn.endswith(\"_Export\"):\n pass\n else:\n diskPNNs.add(pnn)\n return diskPNNs\n","sub_path":"src/python/WMCore/MicroService/MSTransferor/MSTransferor.py","file_name":"MSTransferor.py","file_ext":"py","file_size_in_byte":48955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"184031358","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport base64\nfrom wsgiref.simple_server import make_server\nfrom wsgiref.simple_server import WSGIRequestHandler\nimport sopdscfg\nfrom wsgiref.util import setup_testing_defaults\nimport traceback\n\nimport os\nimport sys\nimport functools\nimport mimetypes\nimport urllib.request\nimport xml.sax\nimport re\nimport jinja2\nimport time\nimport base64\n\nfrom pprint import pprint\n\nmime = mimetypes.MimeTypes()\n\n\nclass OpdsDocument:\n def __init__(self, url, user, password):\n \"\"\"\n A simple function to converts XML data into native Python object.\n \"\"\"\n\n non_id_char = re.compile('[^_0-9a-zA-Z]')\n def _name_mangle(name):\n return non_id_char.sub('_', name)\n\n class DataNode(object):\n def __init__(self):\n self._attrs = {} # XML attributes and child elements\n self.data = None # child text data\n\n def __len__(self):\n # treat single element as a list of 1\n return 1\n\n def __getitem__(self, key):\n if isinstance(key, str):\n return self._attrs.get(key,None)\n else:\n return [self][key]\n\n def __contains__(self, name):\n return self._attrs.has_key(name)\n\n def __nonzero__(self):\n return bool(self._attrs or self.data)\n\n def __getattr__(self, name):\n if name.startswith('__'):\n # need to do this for Python special methods???\n raise AttributeError(name)\n return self._attrs.get(name,None)\n\n def _add_xml_attr(self, name, value):\n if name in self._attrs:\n # multiple attribute of the same name are represented by a list\n children = self._attrs[name]\n if not isinstance(children, list):\n children = [children]\n self._attrs[name] = children\n children.append(value)\n else:\n self._attrs[name] = value\n\n def __str__(self):\n return self.data or ''\n\n def __repr__(self):\n items = sorted(self._attrs.items())\n if self.data:\n items.append(('data', self.data))\n return '{%s}' % ', '.join(['%s:%s' % (k,repr(v)) for k,v in items])\n\n class TreeBuilder(xml.sax.handler.ContentHandler):\n def __init__(self):\n self.stack = []\n self.root = DataNode()\n self.current = self.root\n self.text_parts = []\n\n def startElement(self, name, attrs):\n self.stack.append((self.current, self.text_parts))\n self.current = DataNode()\n self.text_parts = []\n # xml attributes --> python attributes\n for k, v in attrs.items():\n self.current._add_xml_attr(_name_mangle(k), v)\n\n def endElement(self, name):\n text = ''.join(self.text_parts).strip()\n if text:\n self.current.data = text\n if self.current._attrs:\n obj = self.current\n else:\n # a text only node is simply represented by the string\n obj = text or ''\n self.current, self.text_parts = self.stack.pop()\n self.current._add_xml_attr(_name_mangle(name), obj)\n\n def characters(self, content):\n self.text_parts.append(content)\n\n self.variables = []\n\n req = urllib.request.Request(url)\n if user != '':\n auth = base64.b64encode((\"%s:%s\" % (user, password)).encode('UTF-8'))\n req.add_header('Authorization', \"Basic %s\" % auth.decode('latin-1'))\n\n resp = urllib.request.urlopen(req)\n self.status = resp.getcode()\n\n builder = TreeBuilder()\n xml.sax.parseString(resp.read(), builder)\n self.variables = builder.root._attrs\n self.variables['feed_kind'] = self._getFeedKind()\n\n\n def _getFeedKind(self):\n for link in self.variables['feed']['link']:\n if link['rel'] != 'self':\n continue\n\n type = \"%s\" % link['type'].lower()\n b = type.rfind(\"kind=\");\n if b < 0:\n continue\n b += 5\n\n e = type.find(\";\", b)\n if e < 0:\n return type[b:]\n else:\n return type[b:e]\n\n\n\nclass WebResponse:\n #********************************************\n #\n #********************************************\n def __init__(self, config, environ, start_response):\n self.config = config\n self.environ = environ\n self.start_response = start_response\n\n self.opdsProto = 'http'\n self.opdsPort = config.PORT\n if config.BIND_ADDRESS == '0.0.0.0':\n self.opdsHost = 'localhost'\n else:\n self.opdsHost = config.BIND_ADDRESS\n\n if ((self.opdsHost == 'localhost') or\n (self.opdsHost.startsWith(\"127.\"))):\n self.opdsHost = self.environ['HTTP_HOST'].replace(\":%d\" % config.WEB_PORT, '')\n\n self.url = self.environ['PATH_INFO']\n self.templatePath = os.path.join(config.WEB_TEMPLATES_DIR, config.WEB_THEME)\n self.parseQueryString(environ['QUERY_STRING'])\n\n\n self.opdsUrl= \"%s://%s:%d\" % (\n self.opdsProto,\n self.opdsHost,\n self.opdsPort)\n\n\n #********************************************\n #\n #********************************************\n def parseQueryString(self, quersString):\n self.query = urllib.parse.parse_qs(quersString, keep_blank_values=True)\n for key in self.query:\n self.query[key] = list(map(lambda x: urllib.parse.unquote(x), self.query[key]))\n\n\n #********************************************\n #\n #********************************************\n def send404(self):\n headers = [('Content-type', 'text/html; charset=utf-8')]\n\n self.start_response('404 Not Found', headers)\n\n return [((\"\\n\"\n \"\\n\"\n \"404 Not Found\\n\"\n \"\\n\"\n \"

Not Found

\\n\"\n \"

The requested URL %s was not found on this server.

\\n\"\n \"
\\n\"\n \"
SimpleOPDS web server at %s
\\n\"\n \"\\n\") % (self.environ['PATH_INFO'], self.environ['HTTP_HOST'])\n ).encode(\"utf-8\")]\n\n\n #********************************************\n #\n #********************************************\n def sendHttpError(self, error):\n self.start_response('%s %s' % (error.code, error.msg), error.hdrs.items())\n return [(\"%s %s\" % (error.code, error.msg)).encode(\"utf-8\")]\n\n\n #********************************************\n #\n #********************************************\n def sendError(self, title, message):\n headers = [('Content-type', 'text/html; charset=utf-8')]\n\n self.start_response('200 OK', headers)\n\n return [((\"\\n\"\n \"\\n\"\n \"%s\\n\"\n \"\\n\"\n \"

%s

\\n\"\n \"

%s.

\\n\"\n \"
\\n\"\n \"
SimpleOPDS web server at %s
\\n\"\n \"\\n\") % (\n title,\n title,\n message.replace('\\n', '
\\n'),\n self.environ['HTTP_HOST'])\n ).encode(\"utf-8\")]\n\n\n #********************************************\n #\n #********************************************\n def process(self):\n\n if self.url.startswith('_'):\n return self.send404()\n\n if self.url == '/env.html':\n return self.sendEnv()\n\n url = self.url\n\n if os.path.isdir(self.templatePath + url):\n if url.endswith(\"/\"):\n url += \"index.html\"\n else:\n url += \"/index.html\"\n\n if os.path.isfile(self.templatePath + url):\n if url.endswith(\".html\"):\n return self.sendTemplate(url)\n else:\n return self.sendStatic(url)\n\n return self.send404()\n\n\n #********************************************\n #\n #********************************************\n def sendEnv(self):\n setup_testing_defaults(self.environ)\n status = '200 OK'\n headers = [('Content-type', 'text/plain; charset=utf-8')]\n\n self.start_response(status, headers)\n\n ret = [(\"%s: %s\\n\" % (key, value)).encode(\"utf-8\")\n for key, value in self.environ.items()]\n\n return ret\n\n\n #********************************************\n #\n #********************************************\n def sendStatic(self, url):\n fileName = self.templatePath + url\n\n (mimeType, mimeEncoding) = mime.guess_type(fileName)\n\n if mimeType == None:\n mimeType = 'application/octet-stream'\n\n if mimeEncoding == None:\n mimeEncoding = 'binary'\n\n f = open(fileName, 'rb')\n\n headers = [('Content-type', mimeType),\n ('Content-Length', \"%d\" % os.path.getsize(fileName)),\n ('Cache-control', \"private, max-age = 360000\")\n ]\n\n lines = f.readlines()\n f.close()\n\n self.start_response('200 OK', headers)\n return lines;\n\n\n #********************************************\n #\n #********************************************\n def sendTemplate(self, url):\n variables = {}\n\n variables['env'] = {}\n variables['env']['query']=self.query\n variables['env']['query_string']=self.environ['QUERY_STRING']\n variables['env']['path_info']=self.environ['PATH_INFO']\n variables['env']['http_host']=self.environ['HTTP_HOST']\n\n variables['env']['opds'] = {}\n\n variables['env']['opds']['host'] = self.opdsHost\n variables['env']['opds']['port'] = \"%d\" % self.opdsPort\n variables['env']['opds']['url'] = self.opdsUrl\n variables['env']['opds']['query'] = (\"?\" + self.environ['QUERY_STRING'], '') [self.environ['QUERY_STRING'] == \"\"]\n\n try:\n jinjaEnv = jinja2.Environment(\n loader=jinja2.FileSystemLoader(self.templatePath),\n undefined = jinja2.DebugUndefined\n )\n\n jinjaEnv.globals['opdsRequest'] = self.opdsRequest\n jinjaEnv.globals['getEntryLink'] = self.jinja_GetEntryLink\n jinjaEnv.globals['getDownloadLinks'] = self.jinja_GetDownloadLinks\n jinjaEnv.globals['prepareSearchLink'] = self.jinjaPrepareSearchLink\n jinjaEnv.globals['parseSearchLink'] = self.jinjaParseSearchLink\n jinjaEnv.globals['urlQuote'] = urllib.parse.quote\n jinjaEnv.globals['urlUnquote'] = urllib.parse.unquote\n\n jinjaEnv.filters['dateTimeFormat'] = self.jinjaDateTimeFormat\n jinjaEnv.filters['strToDateTime'] = self.jinjaStrToDateTime\n\n jinjaTemplate = jinjaEnv.get_template(url)\n\n res = jinjaTemplate.render(variables)\n\n except urllib.error.HTTPError as e:\n return self.sendHttpError(e)\n\n except jinja2.TemplateNotFound as e:\n traceback.print_exc()\n self.templateError()\n return self.sendError(\"Template error\", 'template \"%s/%s\" not found.' % (self.templatePath, e))\n\n except (jinja2.TemplateSyntaxError,\n jinja2.UndefinedError,\n TypeError,\n ValueError\n ) as e:\n traceback.print_exc()\n return self.sendError(\"Template error\", \"%s\" % e)\n\n headers = [('Content-type', 'text/html; charset=utf-8')]\n self.start_response('200 OK', headers)\n\n return [res.encode('utf-8')]\n\n #********************************************\n #\n #********************************************\n def getUserPass(self):\n if 'HTTP_AUTHORIZATION' in self.environ:\n (type, b64) = self.environ['HTTP_AUTHORIZATION'].split(' ')\n auth = base64.b64decode(b64.encode('latin-1')).decode('UTF-8')\n return auth.split(\":\")\n\n return ('', '')\n\n\n #********************************************\n #\n #********************************************\n def opdsRequest(self, request):\n url = \"%s://%s:%d%s\" % ( self.opdsProto,\n self.opdsHost,\n self.opdsPort,\n request)\n\n (user, password) = self.getUserPass()\n opds = OpdsDocument(url, user, password)\n return opds.variables\n\n\n #********************************************\n #\n #********************************************\n def jinjaPrepareSearchLink(self, template,\n searchTerms,\n count = '0',\n startIndex = '0',\n language = '*',\n inputEncoding = 'UTF-8',\n outputEncoding = 'UTF-8'\n ):\n\n def repl(template, key, value):\n if isinstance(value, list):\n value = value[0]\n\n template = template.replace('{%s}' % key, urllib.parse.quote(value))\n template = template.replace('{%s?}' % key, urllib.parse.quote(value))\n return template\n\n if isinstance(template, list):\n template = template[0]\n\n res = template\n res = repl(res, 'count', count)\n res = repl(res, 'startIndex', startIndex)\n res = repl(res, 'language', language)\n res = repl(res, 'inputEncoding', inputEncoding)\n res = repl(res, 'outputEncoding', outputEncoding)\n res = repl(res, 'searchTerms', searchTerms)\n\n return res\n\n\n #********************************************\n #\n #********************************************\n def jinjaParseSearchLink(self, link):\n href = urllib.parse.unquote(link['href'])\n if href.startswith('?'):\n href = href[1:]\n\n res = []\n fixedItems =[]\n notFixedItems =[]\n for h in href.split('&'):\n (key, val) = h.split('=')\n\n item = {}\n res.append(item)\n\n item['name'] = key\n item['fixed'] = not val.startswith('{')\n item['required'] = not val.endswith('?}')\n item['template'] = val\n\n if item['fixed']:\n item['value'] = val\n fixedItems.append(item)\n else:\n item['value'] = ''\n notFixedItems.append(item)\n\n def isQueryForItem(item):\n for f in fixedItems:\n if (not f['name'] in self.query or\n self.query[f['name']][0] != f['value']):\n return False\n return True\n\n for item in notFixedItems:\n if isQueryForItem(item):\n item['value'] = self.query[item['name']][0]\n\n return res\n\n\n #********************************************\n #\n #********************************************\n def jinja_GetEntryLink(self, entry):\n result = \"\"\n for link in entry['link']:\n result = link\n\n return result\n\n\n #********************************************\n #\n #********************************************\n def jinja_GetDownloadLinks(self, entry, types = [\"application/fb2\", \"application/fb2+zip\", \"application/epub+zip\", \"application/mobi+zip\"]):\n\n result = []\n\n for request in types:\n for link in entry['link']:\n if link['rel'] != \"http://opds-spec.org/acquisition\":\n continue\n\n if link['type'] == request:\n link.href = self.opdsUrl + link.href\n result.append(link)\n\n return result\n\n\n #********************************************\n #\n #********************************************\n def jinjaStrToDateTime(self, value, format='%Y-%m-%dT%H:%M:%SZ'):\n return time.strptime(value, format)\n\n\n #********************************************\n #\n #********************************************\n def jinjaDateTimeFormat(self, value, format='%H:%M / %d-%m-%Y'):\n return time.strftime(format, value)\n\n\n\n#************************************************\n#\n#************************************************\nclass WebServer:\n\n #********************************************\n #\n #********************************************\n def __init__(self, config):\n self.config = config\n\n\n #********************************************\n #\n #********************************************\n def serve(self, environ, start_response):\n response = WebResponse(self.config, environ, start_response)\n return response.process();\n\n\n #********************************************\n #\n #********************************************\n def start(self, ):\n try:\n httpd = make_server(self.config.WEB_BIND_ADDRESS,\n self.config.WEB_PORT,\n self.serve)\n\n print('Started Simple OPDS WEB server on port ', self.config.WEB_PORT)\n httpd.serve_forever()\n\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n httpd.socket.close()\n\n\n\n#************************************************\n#\n#************************************************\ndef start_server(config = None):\n if not config:\n config=sopdscfg.cfgreader()\n\n server = WebServer(config)\n server.start()\n\n\n#************************************************\n#\n#************************************************\nif __name__ == \"__main__\":\n start_server()\n","sub_path":"py/sopdsweb.py","file_name":"sopdsweb.py","file_ext":"py","file_size_in_byte":18399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"475682311","text":"\"\"\"Main functions and routes for Airbnb app.\"\"\"\n\nimport os\nfrom flask import Flask, request, render_template, redirect, url_for, flash\nfrom werkzeug.utils import secure_filename\n\nfrom .wrangler import wrangle_image, predict\nfrom .stuff import AMENITIES\nfrom .models import DB, User, Property\n\n\ndef create_app():\n \"\"\"Construct app and it's routes.\"\"\"\n app = Flask(__name__)\n\n UPLOAD_FOLDER = \"airbnb_app/static/images/\"\n app.config[\"UPLOAD_FOLDER\"] = UPLOAD_FOLDER\n app.config[\"SECRET_KEY\"] = 'secret-key-goes-here'\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.getenv('DATABASE_URL')\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n DB.init_app(app)\n\n @app.route(\"/\")\n def root():\n return render_template(\"base.html\", title=\"Home\")\n\n @app.route(\"/upload\")\n def upload():\n return render_template(\"upload.html\", title=\"Upload\",\n amenities=AMENITIES)\n\n @app.route(\"/upload\", methods=[\"POST\"])\n def upload_post():\n \"\"\"\n Evaluate user input to return estimate.\n \"\"\"\n if request.method == \"POST\":\n email = request.form.get(\"email\")\n if not email:\n flash(\"Please provide an email address\")\n return redirect(url_for(\"upload\"))\n\n name = request.form.get(\"name\")\n if not name:\n flash(\"Please provide a name for your property\")\n return redirect(url_for(\"upload\"))\n\n amens = request.form.getlist(\"amenities\")\n if not amens:\n flash(\"Please select at least one amenity\")\n return redirect(url_for(\"upload\"))\n\n desc = request.form.get(\"description\")\n if not desc:\n flash(\"Please provide a description for your property\")\n return redirect(url_for(\"upload\"))\n\n img = request.files.get(\"file\", False)\n if not img:\n flash(\"Please provide an image\")\n return redirect(url_for(\"upload\"))\n else:\n filename = secure_filename(img.filename)\n orig_dir = os.path.join(app.config[\"UPLOAD_FOLDER\"],\n str(filename))\n new_dir = \"images/resized/\"\n img.save(orig_dir)\n\n path = wrangle_image(orig_dir, new_dir)\n\n price = predict(path, desc, amens)\n\n DB.create_all()\n user = (User.query.get(email)) or User(id=email)\n DB.session.add(user)\n\n new_input = Property(user_id=email,\n name=name,\n amenities=amens,\n description=desc,\n image=filename, ##\n price=price)\n\n DB.session.add(new_input)\n\n DB.session.commit()\n\n return render_template(\"upload.html\", price=price)\n\n @app.route(\"/listings\", methods=[\"POST\"])\n @app.route(\"/listings//\", methods=[\"GET\"])\n def search_post(email=None, property_name=None):\n if request.method == \"GET\":\n user = User.query.get(email)\n properties = user.property\n for property in properties:\n if property.name == property_name:\n return render_template(\"property.html\", title=\"Listing\",\n property=property)\n return str(user.property.name == property_name)\n email = request.form.get(\"search\")\n user = User.query.get(email)\n if user:\n return render_template(\"listings.html\", title=\"Listings\",\n user=user, email=email)\n else:\n return render_template(\"listings.html\", title=\"Listings\",\n properties=\"\", email=email)\n\n return app\n","sub_path":"airbnb_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"313268684","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 18 12:05:47 2021\n\n@author: agbod\n\"\"\"\n\ntry:\n import pyi_splash\n pyi_splash.update_text('Loading complete...')\n pyi_splash.close()\nexcept:\n pass\n\nfrom aiohttp.client_exceptions import ClientConnectorError\nfrom asyncio import new_event_loop, set_event_loop, Queue, LifoQueue, create_task, gather, sleep\nfrom argparse import ArgumentParser\nfrom asyncio.exceptions import TimeoutError\nfrom pandas import Series, DataFrame\nfrom numpy import array, vectorize\nfrom numpy import double as npdouble\nfrom matplotlib import rcParams\nfrom matplotlib.pyplot import Figure, gcf\nfrom matplotlib.dates import DateFormatter, MinuteLocator\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom time import time, strftime, localtime, daylight, timezone, altzone\nfrom time import sleep as tsleep\nimport websockets.legacy.client\nfrom threading import Thread\nimport sys\nimport os\nfrom os.path import abspath, join\nfrom queue import Queue as qQueue\nfrom datetime import datetime\nfrom nest_asyncio import apply\nfrom tkinter import X, BOTH, TOP, BOTTOM, LEFT, END, HORIZONTAL, WORD, Y, RIGHT, SINGLE, VERTICAL\nfrom PIL import ImageTk, Image\nfrom binance import Client, AsyncClient, BinanceSocketManager\nfrom binance.helpers import round_step_size\nfrom binance.exceptions import BinanceRequestException, BinanceAPIException\nfrom tkinter.ttk import Combobox\nfrom tkinter import StringVar, IntVar, Canvas, Tk, Entry, Button, Frame, Label\nfrom tkinter import OptionMenu, Listbox, Scale, Scrollbar, scrolledtext, Radiobutton, messagebox\nfrom subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, Popen, PIPE\n\ndef resource_path(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = abspath(\".\")\n\n return join(base_path, relative_path)\n\nFILE_NAME = resource_path('TradeRecords.txt')\nlogoImg = resource_path('Logo.ico')\nnameImg = resource_path('NameCropped.png')\nwelcomeImg = resource_path('Welcome.png')\n\nparser = ArgumentParser(prog='AutoTrader', add_help=False)\nparser.add_argument('-r', '--restart', action='store_true', default=False)\nparser.add_argument('-t', '--useTestnet', action='store_true', default=False)\nparser.add_argument('-a', '--all', nargs='*')\nparser.add_argument('-b', '--bases', nargs='*')\nparser.add_argument('-o', '--openPositions', nargs='*', action='append', default=[])\nargs = parser.parse_args()\nrcParams['font.size'] = 10\n\nmyFmt = DateFormatter('%H:%M')\nmns = MinuteLocator()\n\nif not args.restart:\n useTestnet = ''\n API_KEY = ''\n SECRET_KEY = ''\n sl = 0.8\n tp = 1.0\n trade_allocation = ''\n useClosingTime = 0\n startTime = 0\n closingTime = 0\n strategy = ''\n quote = 'USDT'\n trade_count = qQueue()\n trade_count.put_nowait(1)\n bases = []\n curr_open_positions = qQueue()\n curr_open_positions.put_nowait([])\nelse:\n useTestnet = args.useTestnet\n API_KEY = args.all[0]\n SECRET_KEY = args.all[1]\n sl = float(args.all[2])\n tp = float(args.all[3])\n trade_allocation = float(args.all[4])\n useClosingTime = int(args.all[5])\n startTime = int(args.all[6])\n closingTime = int(args.all[7])\n strategy = int(args.all[8])\n quote = args.all[9]\n trade_count = qQueue()\n trade_count.put_nowait(int(args.all[10]))\n bases = args.bases\n curr_open_positions = qQueue()\n l = []\n dictionary = {}\n for sublist in args.openPositions:\n dictionary = {'stop-loss': float(sublist[0]), \n 'take-profit': float(sublist[1]), \n 'qty': float(sublist[2]), \n 'id': int(sublist[3]), \n 'time': sublist[4], \n 'base': sublist[5]\n }\n l.append(dictionary) \n curr_open_positions.put_nowait(l)\n\nbase = 'BTC'\nquote = 'USDT'\npair = base + quote\nclient = ''\nfilters = {}\nhourBoxEntry = \"00\"\nminBoxEntry = \"00\"\noffset = ''\nstartBaseBalance = {}\nstartQuoteBalance = ''\ncurrentBaseBalance = {}\ncurrentQuoteBalance = ''\nchart_num = ''\nclose = False\nmaxQuote = ''\nmin_trade_allocation = ''\n\nclass TheWindow:\n def __init__(self):\n self.root = Tk()\n self.root.title(\"AutoTrader\")\n self.root.iconbitmap(logoImg)\n \n self.root.geometry(\"800x400\")\n self.root.configure(bg = \"#80D5FF\")\n \n self.imgs = [0, 0]\n i = Image.open(nameImg)\n j = i.resize((round(i.size[0] * 0.4), round(i.size[1] * 0.4)))\n self.imgs[0] = ImageTk.PhotoImage(j)\n i = Image.open(welcomeImg)\n j = i.resize((round(i.size[0] * 0.8), round(i.size[1] * 0.8)))\n self.imgs[1] = ImageTk.PhotoImage(j)\n \n self.root.resizable(False, False)\n\n self.run()\n\n def run(self):\n if not args.restart:\n self.initGUI()\n else:\n self.initGUI2()\n \n def initGUI(self):\n introFrame = Frame(\n self.root,\n bg = \"#80D5FF\",\n height = 400,\n width = 800,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n introFrame.place(x = 0, y = 0)\n \n label1 = Label(introFrame,\n image=self.imgs[1],\n height = 400,\n width = 400,\n bg=\"#80D5FF\"\n )\n label1.place(\n x=200,\n y=0\n )\n \n next_button = Button(\n introFrame,\n text=\"next\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.destroyWelcomeFrame([introFrame]),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n next_button.place(\n x=700,\n y=320\n )\n \n def initGUI2(self):\n introFrame = Frame(\n self.root,\n bg = \"#80D5FF\",\n height = 400,\n width = 800,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n introFrame.place(x = 0, y = 0)\n \n label1 = Label(introFrame,\n image=self.imgs[1],\n height = 400,\n width = 400,\n bg=\"#80D5FF\"\n )\n label1.place(\n x=200,\n y=0\n )\n \n next_button = Button(\n introFrame,\n text=\"next\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.destroyWelcomeFrame2([introFrame]),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n next_button.place(\n x=700,\n y=320\n )\n \n def destroyWelcomeFrame(self, prevFrames):\n for frame in prevFrames:\n frame.destroy()\n self.testnet()\n \n def destroyWelcomeFrame2(self, prevFrames):\n global client\n global filters\n global offset\n global startBaseBalance\n global startQuoteBalance\n global currentBaseBalance\n global currentQuoteBalance\n global chart_num\n global maxQuote\n global min_trade_allocation\n\n try:\n client = Client(API_KEY, SECRET_KEY, testnet=useTestnet)\n client.get_account()\n except ClientConnectorError:\n messagebox.showerror(\n title=\"Could not create client.\",\n message=\"Please check connection. \")\n return\n except TimeoutError:\n messagebox.showerror(\n title=\"Could not create client.\",\n message=\"Connection timeout. \")\n return\n except Exception as e:\n messagebox.showerror(\n title=\"Could not create client.\",\n message=str(e))\n return\n \n if useClosingTime:\n if localtime().tm_isdst and daylight:\n offset = -altzone\n else:\n offset = -timezone\n\n for b in bases:\n bal = client.get_asset_balance(asset=b)\n bal = float(bal['free'])\n startBaseBalance[b] = bal\n startQuoteBalance = client.get_asset_balance(asset=quote)\n startQuoteBalance = float(startQuoteBalance['free'])\n currentBaseBalance = startBaseBalance\n currentQuoteBalance = startQuoteBalance\n \n if strategy == 0:\n chart_num = 1\n elif strategy == 1:\n chart_num = 1\n elif strategy == 2:\n chart_num = 1\n elif strategy == 3:\n chart_num = 2\n elif strategy == 4:\n chart_num = 3\n elif strategy == 5:\n chart_num = 4\n elif strategy == 6:\n chart_num = 3\n \n maxQuote = []\n min_trade_allocation = []\n for b in bases:\n p = b + quote\n data = client.get_symbol_info(p)\n f = {}\n f['lotStepSize'] = 1.0\n f['minQty'] = 0.0\n f['maxQty'] = 1000000000000000.0\n f['stepSize'] = 1.0\n f['minNotional'] = 1.0\n f['tickSize'] = 1.0\n f['maxPrecision'] = 10 ** -(data['baseAssetPrecision'])\n all_filters = data['filters']\n for dictionary in all_filters:\n if dictionary['filterType'] == 'LOT_SIZE':\n try:\n f['lotStepSize'] = float(dictionary['stepSize'])\n except Exception:\n f['lotStepSize'] = 1.0\n if dictionary['filterType'] == 'MARKET_LOT_SIZE':\n try:\n f['minQty'] = float(dictionary['minQty'])\n except Exception:\n f['minQty'] = 0.0\n try:\n f['maxQty'] = float(dictionary['maxQty'])\n except Exception:\n f['maxQty'] = 1000000000000000.0\n try:\n f['stepSize'] = float(dictionary['stepSize'])\n except Exception:\n f['stepSize'] = 1.0\n if dictionary['filterType'] == 'MIN_NOTIONAL':\n try:\n f['minNotional'] = float(dictionary['minNotional'])\n except Exception:\n f['minNotional'] = 1.0\n if dictionary['filterType'] == 'PRICE_FILTER':\n try:\n f['tickSize'] = float(dictionary['tickSize'])\n except Exception:\n f['tickSize'] = 1.0\n filters[p] = f\n temp = client.get_ticker(symbol=p)\n temp = float(temp[\"lastPrice\"])\n if temp == 0:\n temp = 1\n maxQuote.append(temp * filters[p]['maxQty'] * (100 / 104))\n min_trade_allocation.append(filters[p]['minNotional'] * (100 / 96))\n \n maxQuote = min(maxQuote)\n min_trade_allocation = max(min_trade_allocation)\n \n for frame in prevFrames:\n frame.destroy()\n self.root.geometry(\"1000x500\")\n self.root.configure(bg='#C4C4C4')\n self.mainWindow()\n\n def testnet(self):\n infoFrame = Frame(\n self.root,\n bg = \"#80D5FF\",\n height = 400,\n width = 200,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n infoFrame.place(x = 0, y = 0)\n \n useTestnetFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 400,\n width = 600,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n useTestnetFrame.place(x = 200, y = 0)\n prevFrames = [infoFrame, useTestnetFrame]\n \n button_1 = Button(\n useTestnetFrame,\n text=\"Yes\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.clientKeys(True, prevFrames),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_1.place(\n x=150.0,\n y=150,\n width=100.0,\n height=60.0\n )\n \n button_2 = Button(\n useTestnetFrame,\n text=\"No\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.clientKeys(False, prevFrames),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_2.place(\n x=350.0,\n y=150,\n width=100.0,\n height=60.0\n )\n \n name = Label(\n infoFrame,\n image=self.imgs[0],\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(25), \"bold\")\n )\n name.place(\n x=1,\n y=30\n )\n \n info = Label(\n infoFrame,\n text=\"The testnet is a test environment \\n\"\n \"for the Binance network that allows \\n\"\n \"you trial this program with simulated \\n\"\n \"funds although the prices are not \\n\"\n \"always an accurate reflection of \\n\"\n \"real-time prices. \\n\\n\", \n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n \n \n useTestnetQuestion = Label(\n useTestnetFrame,\n text=\"Do you want to use a Testnet?\",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(20))\n )\n useTestnetQuestion.place(\n x=100,\n y=50\n )\n \n def clientKeys(self, val, prevFrames):\n global useTestnet\n useTestnet = val\n \n infoFrame = Frame(\n self.root,\n bg = \"#80D5FF\",\n height = 400,\n width = 200,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n infoFrame.place(x = 0, y = 0)\n \n prevFrames[0].destroy()\n prevFrames[1].destroy()\n \n clientDetailsFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 400,\n width = 600,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n clientDetailsFrame.place(x = 200, y = 0)\n \n name = Label(\n infoFrame,\n image=self.imgs[0],\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(25), \"bold\")\n )\n name.place(\n x=1,\n y=30\n )\n \n if val:\n text1 = \"Binance Testnet API key\"\n text2 = \"Binance Testnet secret key\"\n info = Label(\n infoFrame,\n text=\"If you don't already have Testnet API \\n\"\n \"keys, head to testnet.binance.vision, \\n\"\n \"login with Github credentials and \\n\"\n \"click on generate HMAC_SHA256 Key, \\n\"\n \"then copy the keys into the spaces \\n\"\n \"provided. \\n\\n\"\n \"Be sure to save the keys to a secure \\n\"\n \"location for future ease of access. \\n\\n\"\n \"Please check that your internet \\n\"\n \"connection is working properly. \",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n else:\n text1 = \"Binance API key\"\n text2 = \"Binance secret key\"\n info = Label(\n infoFrame,\n text=\"If you don't already have Binance API \\n\"\n \"keys, register an account with \\n\"\n \"Binance or login to your existing \\n\"\n \"one, then click 'API Management' \\n\"\n \"from the user center icon. Enter \\n\"\n \"a label for your API and click \\n\"\n \"'Create API'. Ensure the 'Enable \\n\"\n \"Reading' and 'Enable Spot and \\n\"\n \"Margin Trading' restrictions are \\n\"\n \"selected. \\n\\n\"\n \"Be sure to save the keys to a secure \\n\"\n \"location for future ease of access. \\n\\n\"\n \"Please check that your internet \\n\"\n \"connection is working properly. \",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n \n label1 = Label(\n clientDetailsFrame, \n text=text1,\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(18))\n )\n label1.place(x=30, y=70)\n API_KEY_entry = Entry(\n clientDetailsFrame, \n bd=4, \n bg=\"#DADADA\", \n highlightthickness=0,\n font=(\"Roboto\", int(15))\n )\n API_KEY_entry.place(x=30, y=120, width=500, height=40)\n API_KEY_entry.focus()\n \n label2 = Label(\n clientDetailsFrame, \n text=text2,\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(18))\n )\n label2.place(x=30, y=170)\n SECRET_KEY_entry = Entry(\n clientDetailsFrame, \n bd=4, \n bg=\"#DADADA\", \n highlightthickness=0, \n font=(\"Roboto\", int(15)),\n show=\"*\"\n )\n SECRET_KEY_entry.place(x=30, y=220, width=500, height=40)\n \n prevFrames = [infoFrame, clientDetailsFrame]\n keys = [API_KEY_entry, SECRET_KEY_entry]\n \n button_1 = Button(\n clientDetailsFrame,\n text=\"Create client\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.validateClient(prevFrames, keys),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_1.place(\n x=380,\n y=300\n )\n \n button_2 = Button(\n clientDetailsFrame,\n text=\"prev\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.destroyWelcomeFrame(prevFrames),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_2.place(\n x=30,\n y=300\n )\n \n def validateClient(self, prevFrames, keys):\n global client\n global API_KEY\n global SECRET_KEY\n \n API_KEY = keys[0].get()\n SECRET_KEY = keys[1].get()\n \n try:\n client = Client(API_KEY, SECRET_KEY, testnet=useTestnet)\n client.get_account()\n except ClientConnectorError:\n messagebox.showerror(\n title=\"Could not create client.\",\n message=\"Please check connection. \")\n return\n except BinanceRequestException or BinanceAPIException as e:\n messagebox.showerror(\n title=\"Could not create client.\",\n message=str(e) + \"\\nPlease enter valid keys. \")\n return\n except TimeoutError:\n messagebox.showerror(\n title=\"Could not create client.\",\n message=\"Connection timeout. \")\n return\n except Exception as e:\n messagebox.showerror(\n title=\"Could not create client.\",\n message=str(e))\n return\n else:\n self.destroyClientKeysFrame(prevFrames)\n \n def destroyClientKeysFrame(self, prevFrames):\n for frame in prevFrames:\n frame.destroy()\n self.setPairs()\n \n def setPairs(self):\n infoFrame = Frame(\n self.root,\n bg = \"#80D5FF\",\n height = 400,\n width = 200,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n infoFrame.place(x = 0, y = 0)\n \n pairsFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 400,\n width = 600,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n pairsFrame.place(x = 200, y = 0)\n \n name = Label(\n infoFrame,\n image=self.imgs[0],\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(25), \"bold\")\n )\n name.place(\n x=1,\n y=30\n )\n \n info = Label(\n infoFrame,\n text=\"Enter or select a quote currency \\n\"\n \"and up 10 base currencies that \\n\"\n \"form valid Binance pairs. \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n \n quote_label = Label(\n pairsFrame, \n text=\"Quote symbol: \",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(10))\n )\n quote_label.place(x=260, y=120)\n \n quoteChoice = StringVar(pairsFrame) \n quoteDropdown = Combobox(pairsFrame, textvariable=quoteChoice, width=6)\n if useTestnet:\n quoteDropdown[\"values\"] = (\"USDT\", \"BUSD\", \"BTC\", \"BNB\")\n else:\n quoteDropdown[\"values\"] = (\"USDT\", \"USDC\", \"BUSD\", \"TUSD\", \"PAX\", \"BTC\", \"ETH\", \"BNB\")\n \n quoteDropdown.place(x=347, y=120)\n quoteDropdown.set(quote)\n \n base_label = Label(\n pairsFrame, \n text=\"Base symbol: \",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(10))\n )\n base_label.place(x=265, y=170)\n \n baseChoice = StringVar(pairsFrame) \n baseDropdown = Combobox(pairsFrame, textvariable=baseChoice, width=6)\n if useTestnet:\n baseDropdown[\"values\"] = (\"BTC\", \"ETH\", \"BNB\", \"LTC\", \"TRX\", \"XRP\")\n else:\n baseDropdown[\"values\"] = (\"BTC\", \"ETH\", \"BNB\", \"LTC\", \"TRX\", \"XRP\",\n \"NEO\", \"QTUM\", \"EOS\", \"SNT\", \"BNT\", \"DOGE\", \n \"GAS\", \"HSR\", \"OAX\", \"DNT\", \"ICN\", \"MANA\", \n \"WTC\", \"LRC\", \"YOYO\", \"OMG\", \"ZRX\", \"MATIC\", \n \"SNGLS\", \"BQX\", \"KNC\", \"FUN\", \"SNM\", \"IOTA\", \n \"LINK\", \"XVG\", \"SALT\", \"MDA\", \"MTL\", \"SUB\", \n \"ETC\", \"MTH\", \"ENG\", \"DNT\", \"ZEC\", \"AST\", \n \"DASH\", \"BTG\", \"EVX\", \"REQ\", \"VIB\", \"HSR\", \n \"POWR\", \"ARK\", \"DGD\", \"ADA\")\n \n baseDropdown.place(x=347, y=170)\n baseDropdown.set(base)\n \n selectButton = Button(\n pairsFrame,\n text=\"select\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.addPair(baseChoice, quoteChoice, pairsListbox, selectButton, removeButton, quoteDropdown),\n relief=\"ridge\"\n )\n selectButton.place(\n x=420,\n y=170, \n width=40,\n height=20\n )\n\n removeButton = Button(\n pairsFrame,\n text=\"remove\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.removePair(pairsListbox, selectButton, removeButton, quoteDropdown),\n relief=\"ridge\"\n )\n removeButton.place(\n x=270,\n y=250, \n width=50,\n height=20\n )\n\n pairsListbox = Listbox(pairsFrame, selectmode=SINGLE, bg='#DADADA', bd=4)\n pairsListbox.place(x=100, y=100, width=100, height=200)\n for b in bases:\n pairsListbox.insert(\"end\", b + quote)\n if bases == []:\n removeButton.configure(state='disabled')\n else:\n quoteDropdown.configure(state='disabled')\n if len(bases) == 10:\n selectButton.configure(state='disabled')\n \n prevFrames = [infoFrame, pairsFrame]\n \n button_1 = Button(\n pairsFrame,\n text=\"next\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.validatePairs(prevFrames),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_1.place(\n x=440,\n y=300\n )\n\n def addPair(self, baseVar, quoteVar, listBox, addButton, removeButton, quoteBox):\n global base\n global quote\n global bases\n \n base = baseVar.get()\n quote = quoteVar.get()\n pair = base + quote\n \n data = client.get_symbol_info(pair)\n if data == None:\n messagebox.showerror(\n title=\"Error\",\n message=pair + \" is not a valid symbol pair. \")\n return\n elif base in bases:\n messagebox.showerror(\n title=\"Error\",\n message=\"You have already selected \" + pair + \". \")\n return\n else:\n listBox.insert(\"end\", pair)\n if len(listBox.curselection()) == 0:\n listBox.activate(\"end\")\n listBox.select_set(\"end\")\n removeButton.configure(state='normal')\n bases.append(base)\n \n if listBox.size() >= 1:\n quoteBox.configure(state='disabled')\n if listBox.size() >= 10:\n addButton.configure(state='disabled')\n \n def removePair(self, listBox, addButton, removeButton, quoteBox):\n global bases\n \n temp = listBox.curselection()\n if len(temp) != 0:\n listBox.delete(temp[0])\n bases.remove(bases[temp[0]])\n if listBox.size() == 0:\n quoteBox.configure(state='normal')\n else:\n listBox.activate(\"end\")\n listBox.select_set(\"end\")\n\n if listBox.size() < 10:\n addButton.configure(state='normal')\n if len(listBox.curselection()) == 0:\n removeButton.configure(state='disabled')\n \n def validatePairs(self, prevFrames):\n if bases == []:\n messagebox.showerror(\n title=\"Error\",\n message=\"Please select at least one pair. \")\n return\n self.destroyPairsFrame(prevFrames)\n\n def destroyPairsFrame(self, prevFrames):\n for frame in prevFrames:\n frame.destroy()\n self.settings()\n\n def settings(self):\n infoFrame = Frame(\n self.root,\n bg = \"#80D5FF\",\n height = 400,\n width = 200,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n infoFrame.place(x = 0, y = 0)\n \n settingsFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 400,\n width = 600,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n settingsFrame.place(x = 200, y = 0)\n \n name = Label(\n infoFrame,\n image=self.imgs[0],\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(25), \"bold\")\n )\n name.place(\n x=1,\n y=30\n )\n \n info = Label(\n infoFrame,\n text=\"Lot size is a fixed amount of the \\n\"\n \"quote currency to be entered into \\n\"\n \"each trade. \\n\\n\"\n \"Closing time is the time of day the \\n\"\n \"application automatically closes all \\n\"\n \"its trades and exits within 24 hours. \",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n \n sl_label = Label(\n settingsFrame, \n text=\"Stop-loss percentage\",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(10))\n )\n sl_label.place(x=20, y=70)\n \n slScale = Scale(\n settingsFrame, \n from_=0.02, \n to=2.00, \n resolution=0.02, \n orient=HORIZONTAL, \n length=200\n )\n slScale.set(sl)\n slScale.place(x=20, y=100)\n \n tp_label = Label(\n settingsFrame, \n text=\"Take-profit percentage\",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(10))\n )\n tp_label.place(x=300, y=70)\n \n tpScale = Scale(\n settingsFrame, \n from_=0.02, \n to=2.00, \n resolution=0.02, \n orient=HORIZONTAL, \n length=200\n )\n tpScale.set(tp)\n tpScale.place(x=300, y=100)\n \n lotSizeLabel = Label(\n settingsFrame, \n text=\"Lot Size (quote currency)\",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(10))\n )\n lotSizeLabel.place(x=20, y=170)\n \n LotSizeEntry = Entry(\n settingsFrame, \n bd=2, \n width=16\n )\n LotSizeEntry.place(x=20, y=200)\n LotSizeEntry.insert(0, str(trade_allocation))\n LotSizeEntry.focus()\n \n closingTimeLabel = Label(\n settingsFrame, \n text=\"Closing Time [HH:MM]\",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(10))\n )\n closingTimeLabel.place(x=300, y=170)\n \n HourChoice = StringVar(settingsFrame)\n HourBox = Combobox(settingsFrame, textvariable=HourChoice, state=\"readonly\", width=4)\n HourBox[\"values\"] = tuple([f\"{i:02d}\" for i in range(0, 24)])\n HourBox.place(x=300, y=200)\n HourBox.set(hourBoxEntry)\n MinChoice = StringVar(settingsFrame)\n MinBox = Combobox(settingsFrame, textvariable=MinChoice, state=\"readonly\", width=4)\n MinBox[\"values\"] = tuple([f\"{i:02d}\" for i in range(0, 60)])\n MinBox.place(x=350, y=200)\n MinBox.set(minBoxEntry)\n enableButton = Button(\n settingsFrame,\n text=\"Enable\" if not useClosingTime else \"Disable\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=1,\n highlightthickness=0,\n command=lambda: self.enableClosingTime(HourBox, MinBox, enableButton),\n font=(\"Roboto\", int(8)),\n relief=\"ridge\"\n )\n enableButton.place(\n x=400,\n y=200\n )\n if not useClosingTime:\n HourBox.configure(state='disabled')\n MinBox.configure(state='disabled')\n \n prevFrames = [infoFrame, settingsFrame]\n setting_vars = [slScale, tpScale, LotSizeEntry, HourChoice, MinChoice]\n \n button_1 = Button(\n settingsFrame,\n text=\"next\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.validateSettings(prevFrames, setting_vars),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_1.place(\n x=440,\n y=300\n )\n \n button_2 = Button(\n settingsFrame,\n text=\"prev\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.destroyClientKeysFrame(prevFrames),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_2.place(\n x=30,\n y=300\n )\n\n def enableClosingTime(self, HourBox, MinBox, enableButton):\n global useClosingTime\n useClosingTime = int(not useClosingTime)\n \n if useClosingTime:\n HourBox.configure(state='normal')\n MinBox.configure(state='normal')\n enableButton.configure(text=\"Disable\")\n else:\n HourBox.configure(state='disabled')\n MinBox.configure(state='disabled')\n enableButton.configure(text=\"Enable\")\n \n def validateSettings(self, prevFrames, setting_vars):\n global base\n global filters\n global sl\n global tp\n global hourBoxEntry\n global minBoxEntry\n global trade_allocation\n global startTime\n global closingTime\n global offset\n global startBaseBalance\n global startQuoteBalance\n global currentBaseBalance\n global currentQuoteBalance\n global maxQuote\n global min_trade_allocation\n \n sl = setting_vars[0].get()\n tp = setting_vars[1].get()\n hourBoxEntry = setting_vars[3].get()\n minBoxEntry = setting_vars[4].get()\n try:\n trade_allocation = float(setting_vars[2].get())\n except ValueError:\n messagebox.showerror(\n title=\"Error\",\n message=\"Enter a number for Lot Size. \")\n return\n except Exception as e:\n messagebox.showerror(\n title=\"Error\",\n message=e)\n return\n \n if useClosingTime:\n if localtime().tm_isdst and daylight:\n offset = -altzone\n else:\n offset = -timezone\n \n startTime = ((time() % 86400) + offset) % 86400\n closingTime = hourBoxEntry + \":\" + minBoxEntry\n closingTime = datetime.strptime(closingTime, '%H:%M')\n closingTime = (closingTime - datetime(1900, 1, 1)).total_seconds()\n \n if ((time() % 86400) + offset) % 86400 < closingTime:\n closingTime = closingTime - startTime\n else:\n closingTime = 86400 - startTime + closingTime\n\n for b in bases:\n bal = client.get_asset_balance(asset=b)\n bal = float(bal['free'])\n startBaseBalance[b] = bal\n startQuoteBalance = client.get_asset_balance(asset=quote)\n startQuoteBalance = float(startQuoteBalance['free'])\n currentBaseBalance = startBaseBalance\n currentQuoteBalance = startQuoteBalance\n \n maxQuote = []\n min_trade_allocation = []\n for b in bases:\n p = b + quote\n data = client.get_symbol_info(p)\n f = {}\n f['lotStepSize'] = 1.0\n f['minQty'] = 0.0\n f['maxQty'] = 1000000000000000.0\n f['stepSize'] = 1.0\n f['minNotional'] = 1.0\n f['tickSize'] = 1.0\n f['maxPrecision'] = 10 ** -(data['baseAssetPrecision'])\n all_filters = data['filters']\n for dictionary in all_filters:\n if dictionary['filterType'] == 'LOT_SIZE':\n try:\n f['lotStepSize'] = float(dictionary['stepSize'])\n except Exception:\n f['lotStepSize'] = 1.0\n if dictionary['filterType'] == 'MARKET_LOT_SIZE':\n try:\n f['minQty'] = float(dictionary['minQty'])\n except Exception:\n f['minQty'] = 0.0\n try:\n f['maxQty'] = float(dictionary['maxQty'])\n except Exception:\n f['maxQty'] = 1000000000000000.0\n try:\n f['stepSize'] = float(dictionary['stepSize'])\n except Exception:\n f['stepSize'] = 1.0\n if dictionary['filterType'] == 'MIN_NOTIONAL':\n try:\n f['minNotional'] = float(dictionary['minNotional'])\n except Exception:\n f['minNotional'] = 1.0\n if dictionary['filterType'] == 'PRICE_FILTER':\n try:\n f['tickSize'] = float(dictionary['tickSize'])\n except Exception:\n f['tickSize'] = 1.0\n filters[p] = f\n temp = client.get_ticker(symbol=p)\n temp = float(temp[\"lastPrice\"])\n if temp == 0:\n temp = 1\n maxQuote.append(temp * filters[p]['maxQty'] * (100 / 104))\n min_trade_allocation.append(filters[p]['minNotional'] * (100 / 96))\n\n maxQuote = min(maxQuote)\n min_trade_allocation = max(min_trade_allocation)\n \n if trade_allocation < min_trade_allocation:\n messagebox.showerror(\n title=\"Error\",\n message=\"Trade allocation is too small. \\n\"\n \"minimum value is \" + str(min_trade_allocation))\n return\n elif trade_allocation > startQuoteBalance:\n messagebox.showerror(\n title=\"Error\",\n message=\"Trade allocation is too large. \\n\"\n \"You do not have enough \" + quote)\n return\n elif trade_allocation > maxQuote:\n messagebox.showerror(\n title=\"Error\",\n message=\"Trade allocation is too large. \\n\"\n \"Maximum value is\" + str(maxQuote))\n return\n \n base = bases[0]\n \n self.destroySettingsFrame(prevFrames)\n \n def destroySettingsFrame(self, prevFrames):\n for frame in prevFrames:\n frame.destroy()\n self.chooseStrategy()\n \n def chooseStrategy(self): \n infoFrame = Frame(\n self.root,\n bg = \"#80D5FF\",\n height = 400,\n width = 200,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n infoFrame.place(x = 0, y = 0)\n \n strategyFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 400,\n width = 600,\n bd = 0,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n strategyFrame.place(x = 200, y = 0)\n \n name = Label(\n infoFrame,\n image=self.imgs[0],\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(25), \"bold\")\n )\n name.place(\n x=1,\n y=30\n )\n \n strategyChoice = IntVar(strategyFrame) \n info = Label(\n infoFrame,\n text=\"Enter and exit trades \\n\"\n \"manually. \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n option0 = Radiobutton(\n strategyFrame, \n text=\"Manual Trading\", \n variable=strategyChoice, \n command=lambda: self.strategyLabels(strategyChoice, infoFrame),\n value=0, \n width=60, \n anchor=\"w\"\n )\n option0.place(x=50, y=60)\n option1 = Radiobutton(\n strategyFrame, \n text=\"Simple Moving Average Strategy\", \n variable=strategyChoice, \n command=lambda: self.strategyLabels(strategyChoice, infoFrame),\n value=1, \n width=60, \n anchor=\"w\"\n )\n option1.place(x=50, y=90)\n option2 = Radiobutton(\n strategyFrame, \n text=\"Exponential Moving Average Strategy\", \n variable=strategyChoice, \n command=lambda: self.strategyLabels(strategyChoice, infoFrame),\n value=2, \n width=60, \n anchor=\"w\"\n )\n option2.place(x=50, y=120)\n option3 = Radiobutton(\n strategyFrame, \n text=\"MACD Strategy\", \n variable=strategyChoice,\n command=lambda: self.strategyLabels(strategyChoice, infoFrame),\n value=3, \n width=60, \n anchor=\"w\"\n )\n option3.place(x=50, y=150)\n option4 = Radiobutton(\n strategyFrame, \n text=\"MACD + RSI Strategy\", \n variable=strategyChoice,\n command=lambda: self.strategyLabels(strategyChoice, infoFrame),\n value=4, \n width=60, \n anchor=\"w\"\n )\n option4.place(x=50, y=180)\n option5 = Radiobutton(\n strategyFrame, \n text=\"MACD + RSI + Stochastic Strategy\", \n variable=strategyChoice,\n command=lambda: self.strategyLabels(strategyChoice, infoFrame),\n value=5, \n width=60, \n anchor=\"w\"\n )\n option5.place(x=50, y=210)\n option6 = Radiobutton(\n strategyFrame, \n text=\"Moving Average + MACD + RSI Strategy\", \n variable=strategyChoice,\n command=lambda: self.strategyLabels(strategyChoice, infoFrame),\n value=6, \n width=60, \n anchor=\"w\"\n )\n option6.place(x=50, y=240)\n option0.select()\n\n prevFrames = [infoFrame, strategyFrame]\n button_1 = Button(\n strategyFrame,\n text=\"Finish\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.clearWindow(strategyChoice),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_1.place(\n x=420,\n y=300\n )\n \n button_2 = Button(\n strategyFrame,\n text=\"prev\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.destroyPairsFrame(prevFrames),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n button_2.place(\n x=30,\n y=300\n )\n \n def strategyLabels(self, choice, infoFrame):\n for w in infoFrame.winfo_children():\n w.destroy()\n\n name = Label(\n infoFrame,\n text=\"AutoTrader\",\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(25), \"bold\")\n )\n name.place(\n x=12,\n y=30\n )\n\n if choice.get() == 0:\n info = Label(\n infoFrame,\n text=\"Enter and exit trades \\n\"\n \"manually. \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n elif choice.get() == 1:\n info = Label(\n infoFrame,\n text=\"This strategy places a buy \\n\"\n \"order when the current 20 MA \\n\"\n \"crosses the 50 MA upwards or if \\n\"\n \"the price dips below the 20 MA \\n\"\n \"during a perfect uptrend. \\n\"\n \"20 MA > 50 MA > 100 MA > 200 MA \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n elif choice.get() == 2:\n info = Label(\n infoFrame,\n text=\"This strategy places a buy \\n\"\n \"order when the current 12 EMA \\n\"\n \"crossesthe 26 EMA upwards or if \\n\"\n \"the price dips below the 12 EMA \\n\"\n \"in a perfect uptrend. \\n\"\n \"12 EMA > 26 EMA > 50 EMA > 100 EMA\\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n elif choice.get() == 3:\n info = Label(\n infoFrame,\n text=\"This strategy places a buy \\n\"\n \"order when the MACD line \\n\"\n \"crosses the Signal line upwards \\n\"\n \"if the price is above the 100 \\n\"\n \"EMA \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n elif choice.get() == 4:\n info = Label(\n infoFrame,\n text=\"This strategy places a buy \\n\"\n \"order when the MACD line \\n\"\n \"crosses the signal line upwards \\n\"\n \"if the RSI is below 60. \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n elif choice.get() == 5:\n info = Label(\n infoFrame,\n text=\"This strategy places a buy \\n\"\n \"order when the RSI crosses 50 \\n\"\n \"upwards if the MACD is greater \\n\"\n \"than the signal line and the \\n\"\n \"stochastic indicator is less than \\n\"\n \"80. \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n elif choice.get() == 6:\n info = Label(\n infoFrame,\n text=\"This strategy places a buy \\n\"\n \"order when the MACD line \\n\"\n \"crosses the signal line upwards \\n\"\n \"if the RSI is below 60 and the \\n\"\n \"20 MA is greater than the 50 MA. \\n\\n\",\n justify=LEFT,\n bg=\"#80D5FF\",\n fg=\"black\",\n font=(\"Roboto\", int(8))\n )\n info.place(\n x=4,\n y=90\n )\n \n def clearWindow(self, choice):\n global strategy\n global chart_num\n\n strategy = choice.get()\n \n if strategy == 0:\n chart_num = 1\n elif strategy == 1:\n chart_num = 1\n elif strategy == 2:\n chart_num = 1\n elif strategy == 3:\n chart_num = 2\n elif strategy == 4:\n chart_num = 3\n elif strategy == 5:\n chart_num = 4\n elif strategy == 6:\n chart_num = 3\n\n for widgets in self.root.winfo_children():\n widgets.destroy()\n self.root.geometry(\"1000x500\")\n self.root.configure(bg='#C4C4C4')\n self.mainWindow()\n \n def mainWindow(self):\n balanceFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 180,\n width = 180,\n bd = 4,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n balanceFrame.pack_propagate(False)\n balanceFrame.place(x = 10, y = 10) \n balanceLabel = Label(balanceFrame, text='Available Balance', bd=0, font=('Roboto', int(10)))\n balanceLabel.pack(fill=X, side=TOP)\n balanceText = scrolledtext.ScrolledText(balanceFrame, bd=0, font=('Roboto', int(9)), cursor='arrow', wrap=WORD)\n balanceText.insert('end', f'Starting {quote} Balance: \\n{startQuoteBalance} \\n')\n balanceText.pack(fill=BOTH, side=BOTTOM)\n balanceText.configure(state='disabled')\n \n slidersFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 280,\n width = 180,\n bd = 4,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n slidersFrame.place(x = 10, y = 210)\n slidersFrame.pack_propagate(False)\n slidersLabel = Label(slidersFrame, text='Settings', bd=0, font=('Roboto', int(10)))\n slidersLabel.pack(fill=X, side=TOP)\n slScale = Scale(\n slidersFrame, \n from_=0.02, \n to=2.00, \n resolution=0.02, \n orient=HORIZONTAL, \n length=150, \n label=\"Stop-loss percentage\"\n )\n slScale.set(sl)\n slScale.place(x=10, y=20) \n tpScale = Scale(\n slidersFrame, \n from_=0.02, \n to=2.00, \n resolution=0.02, \n orient=HORIZONTAL, \n length=150, \n label=\"Take-profit percentage\"\n )\n tpScale.set(tp)\n tpScale.place(x=10, y=100)\n \n lotSizeLabel = Label(\n slidersFrame, \n text=f\"Lot Size ({quote})\",\n bg=\"white\",\n fg=\"black\",\n font=(\"Roboto\", int(9))\n )\n lotSizeLabel.place(x=10, y=180)\n LotSizeEntry = Entry(\n slidersFrame, \n bd=1, \n width=16\n )\n LotSizeEntry.place(x=10, y=210)\n LotSizeEntry.insert(0, str(trade_allocation))\n LotSizeButton = Button(\n slidersFrame,\n text=\"set\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.validateAllocation(LotSizeEntry),\n relief=\"ridge\"\n )\n LotSizeButton.place(\n x=125,\n y=210, \n width=40,\n height=20\n )\n \n chartFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 230,\n width = 580,\n bd = 4,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n chartFrame.pack_propagate(False)\n chartFrame.place(x = 210, y = 10)\n chartLabel = Label(chartFrame, text='Chart', font=('Roboto', int(10)), bd=0)\n chartLabel.pack(fill=X, side=TOP)\n chartScrollables = {}\n for b in bases:\n chartScrollables[b] = V_ScrollableFrame(chartFrame)\n chartScrollables[bases[0]].canvas.pack(side=LEFT)\n chartScrollables[bases[0]].scrollbar.pack(side=RIGHT, fill=Y)\n gcf().autofmt_xdate()\n self.createCharts(chartScrollables)\n \n tradesFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 230,\n width = 380,\n bd = 4,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n tradesFrame.pack_propagate(False)\n tradesFrame.place(x = 210, y = 260)\n tradesLabel = Label(tradesFrame, text='Trade Records', font=('Roboto', int(10)), bd=0)\n tradesLabel.pack(fill=X, side=TOP)\n tradesText = scrolledtext.ScrolledText(tradesFrame, bd=0, cursor='arrow', wrap=WORD)\n tradesText.pack(fill=BOTH, side=BOTTOM)\n tradesText.configure(state='disabled')\n \n openTradesFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 230,\n width = 180,\n bd = 4,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n openTradesFrame.pack_propagate(False)\n openTradesFrame.place(x = 610, y = 260)\n openTradesLabel = Label(openTradesFrame, text='Open Positions', font=('Roboto', int(10)), bd=0)\n openTradesLabel.pack(fill=X, side=TOP)\n openTradesScrollbar = Scrollbar(openTradesFrame)\n openTradesScrollbar.pack(fill=Y, side=RIGHT)\n openTradesListbox = Listbox(openTradesFrame, yscrollcommand=openTradesScrollbar.set, bd=0, selectmode=SINGLE)\n openTradesListbox.pack(fill=BOTH, expand=True)\n openTradesScrollbar.config(command=openTradesListbox.yview)\n\n pricesFrame = Frame(\n self.root,\n bg = \"#FFFFFF\",\n height = 280,\n width = 180,\n bd = 4,\n highlightthickness = 0,\n relief = \"ridge\"\n )\n pricesFrame.pack_propagate(False)\n pricesFrame.place(x = 810, y = 10)\n pricesLabel = Label(pricesFrame, text='Live Prices', font=('Roboto', int(10)), bd=0)\n pricesLabel.pack(fill=X, side=TOP)\n pricesTexts = {}\n for b in bases:\n pricesTexts[b] = scrolledtext.ScrolledText(pricesFrame, bd=0, cursor='arrow')\n pricesTexts[b].configure(state='disabled')\n \n pricesTexts[bases[0]].pack(fill=BOTH, side=BOTTOM)\n\n baseChoice = StringVar(self.root)\n baseChoice.set(bases[0])\n baseMenu = OptionMenu(self.root, \n baseChoice,\n *bases, \n command=lambda _: self.displaySelected(baseChoice, chartScrollables, pricesTexts)\n )\n baseMenu.configure(bg=\"#C4C4C4\")\n baseMenu.configure(fg=\"black\")\n baseMenu.configure(borderwidth=2)\n baseMenu.configure(highlightthickness=0)\n baseMenu.configure(font=(\"Roboto\", int(10)))\n baseMenu.configure(relief=\"ridge\")\n baseMenu.place(\n x=850,\n y=300,\n width=80,\n height=20\n )\n \n buyButton = Button(\n self.root,\n text=\"Buy\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.thread.manualBuy(baseChoice.get()),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n buyButton.place(\n x=850,\n y=330, \n width=80,\n height=40\n )\n\n sellButton = Button(\n self.root,\n text=\"Sell\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.manualSellHandler(sellButton, openTradesListbox),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n sellButton.place(\n x=850,\n y=390, \n width=80,\n height=40\n )\n sellButton.configure(state='disabled')\n \n reloadButton = Button(\n self.root,\n text=\"Reload\",\n bg=\"#C4C4C4\",\n fg=\"black\",\n activebackground=\"#969696\",\n borderwidth=2,\n highlightthickness=0,\n command=lambda: self.reload(reloadButton, openTradesListbox),\n font=(\"Roboto\", int(18)),\n relief=\"ridge\"\n )\n reloadButton.place(\n x=850,\n y=450, \n width=80,\n height=40\n )\n\n frames = {'prices': pricesTexts, \n 'balances': balanceText, \n 'trades': tradesText, \n 'chart': chartScrollables, \n 'sliders': slidersFrame, \n 'positions': openTradesListbox, \n 'sell': sellButton}\n sliders = {'tp': tpScale, 'sl': slScale}\n self.startAsync(frames, sliders)\n \n def displaySelected(self, var, chartScrollables, pricesTexts):\n global base\n \n new = var.get()\n chartScrollables[base].canvas.pack_forget()\n chartScrollables[base].scrollbar.pack_forget()\n pricesTexts[base].pack_forget()\n chartScrollables[new].canvas.pack(side=LEFT)\n chartScrollables[new].scrollbar.pack(side=RIGHT, fill=Y)\n pricesTexts[new].pack(side=BOTTOM, fill=BOTH)\n base = new\n \n def validateAllocation(self, entry):\n global trade_allocation\n \n try:\n allocation = float(entry.get())\n except ValueError:\n entry.delete(0, END)\n entry.insert(0, str(trade_allocation))\n messagebox.showerror(\n title=\"Error\",\n message=\"Enter a number for Lot Size. \")\n return\n except Exception as e:\n entry.delete(0, END)\n entry.insert(0, str(trade_allocation))\n messagebox.showerror(\n title=\"Error\",\n message=e)\n return\n \n if allocation < min_trade_allocation:\n entry.delete(0, END)\n entry.insert(0, str(trade_allocation))\n messagebox.showerror(\n title=\"Error\",\n message=\"Trade allocation is too small. \\n\"\n \"minimum value is \" + str(min_trade_allocation))\n return\n elif allocation > currentQuoteBalance:\n entry.delete(0, END)\n entry.insert(0, str(trade_allocation))\n messagebox.showerror(\n title=\"Error\",\n message=\"Trade allocation is too large. \\n\"\n \"You do not have enough \" + quote)\n return\n elif allocation > maxQuote:\n entry.delete(0, END)\n entry.insert(0, str(trade_allocation))\n messagebox.showerror(\n title=\"Error\",\n message=\"Trade allocation is too large. \\n\"\n \"Maximum value is\" + str(maxQuote))\n return\n else:\n trade_allocation = allocation\n return\n\n def startAsync(self, frames, sliders):\n self.thread = AsyncioThread(self)\n self.thread.start()\n self.root.protocol('WM_DELETE_WINDOW', lambda: self.on_closing())\n try:\n self.root.after(200, lambda: self.refresh_frames(frames, sliders))\n except Exception as e:\n self.root.protocol('WM_DELETE_WINDOW', lambda: sys.exit())\n messagebox.showerror(\n title=\"Error\",\n message=str(e) + \"\\nAutoTrader will now restart.\")\n restartProgram()\n\n \n def reload(self, button, tradesListbox):\n button.configure(state='disabled')\n temp = self.thread\n self.thread = AsyncioThread(self)\n while tradesListbox.size() != 0:\n tradesListbox.delete(\"end\")\n self.thread.start()\n temp.kill()\n temp.join()\n del(temp)\n button.configure(state='normal')\n \n def on_closing(self):\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit? \"):\n closeProgram()\n \n def manualSellHandler(self, button, tradesListbox):\n button.configure(state='disabled')\n self.thread.sellQ.put_nowait(tradesListbox.curselection()[0])\n tsleep(1)\n if not close and not tradesListbox.curselection() == ():\n button.configure(state='normal')\n else:\n button.configure(state='disabled')\n \n def createCharts(self, frames):\n func = lambda i: datetime.fromtimestamp(int(i) / 1000)\n self.vecfunc = vectorize(func)\n \n self.axis = {}\n self.canvas = {}\n for b in bases:\n fig = Figure()\n fig.set_figheight(1.8 * chart_num)\n fig.suptitle(b + quote)\n self.axis[b] = fig.subplots(chart_num, 1, squeeze=False)\n self.canvas[b] = FigureCanvasTkAgg(fig, frames[b].frame)\n self.canvas[b].get_tk_widget().pack(fill=X)\n fig.tight_layout(pad=1.0, w_pad=1.5, h_pad=1.2)\n self.canvas[b].draw()\n \n def updateChart(self, df, b): \n x = df['Close Time'].to_numpy()\n x = x[-60:]\n x = self.vecfunc(x)\n \n self.axis[b][0, 0] = plotPrice(df, x, self.axis[b][0, 0])\n if strategy == 1:\n y1 = df['SMA_20'].to_numpy()\n y2 = df['SMA_50'].to_numpy()\n self.axis[b][0, 0].plot(x, y1[-60:], \"-b\", label=\"20 S.M.A.\")\n self.axis[b][0, 0].plot(x, y2[-60:], \"-r\", label=\"50 S.M.A.\")\n self.axis[b][0, 0].legend(loc=\"upper right\", fontsize=\"x-small\")\n elif strategy == 2:\n y1 = df['EMA_12'].to_numpy()\n y2 = df['EMA_26'].to_numpy()\n self.axis[b][0, 0].plot(x, y1[-60:], \"-b\", label=\"12 E.M.A.\")\n self.axis[b][0, 0].plot(x, y2[-60:], \"-r\", label=\"26 E.M.A.\")\n self.axis[b][0, 0].legend(loc=\"upper right\", fontsize=\"x-small\")\n elif strategy == 3:\n y1 = df['EMA_100'].to_numpy()\n self.axis[b][0, 0].plot(x, y1[-60:], \"-b\", label=\"100 E.M.A.\")\n self.axis[b][0, 0].legend(loc=\"upper right\", fontsize=\"x-small\")\n self.axis[b][1, 0] = plotMACD(df, x, self.axis[b][1, 0])\n elif strategy == 4:\n self.axis[b][0, 0].legend(loc=\"upper right\", fontsize=\"x-small\")\n self.axis[b][1, 0] = plotMACD(df, x, self.axis[b][1, 0])\n self.axis[b][2, 0] = plotRSI(df, x, self.axis[b][2, 0])\n elif strategy == 5:\n self.axis[b][0, 0].legend(loc=\"upper right\", fontsize=\"x-small\")\n self.axis[b][1, 0] = plotMACD(df, x, self.axis[b][1, 0])\n self.axis[b][2, 0] = plotRSI(df, x, self.axis[b][2, 0])\n self.axis[b][3, 0] = plotStochastic(df, x, self.axis[b][3, 0])\n elif strategy == 6:\n y1 = df['SMA_20'].to_numpy()\n y2 = df['SMA_50'].to_numpy()\n self.axis[b][0, 0].plot(x, y1[-60:], \"-b\", label=\"20 S.M.A.\")\n self.axis[b][0, 0].plot(x, y2[-60:], \"-r\", label=\"50 S.M.A.\")\n self.axis[b][0, 0].legend(loc=\"upper right\", fontsize=\"x-small\")\n self.axis[b][1, 0] = plotMACD(df, x, self.axis[b][1, 0])\n self.axis[b][2, 0] = plotRSI(df, x, self.axis[b][2, 0])\n self.canvas[b].draw()\n self.root.update()\n tsleep(0.1)\n fun = lambda x: x.clear()\n vectorize(fun)(self.axis[b])\n \n def refresh_frames(self, frames, sliders):\n global tp\n global sl\n \n while True:\n while not self.thread.price.empty():\n [price, p] = self.thread.price.get_nowait()\n b = p.removesuffix(quote)\n frames['prices'][b].configure(state='normal')\n frames['prices'][b].insert(\"end\", str(price) + '\\n')\n frames['prices'][b].see(\"end\")\n frames['prices'][b].yview(\"end\")\n frames['prices'][b].configure(state='disabled')\n if not self.thread.trades.empty():\n frames['balances'].configure(state='normal')\n frames['balances'].delete(1.0, END)\n frames['balances'].insert('end', \n f'Starting {quote} Balance: \\n{startQuoteBalance} \\n\\n'\n f'Current {quote} Balance: \\n{currentQuoteBalance} \\n'\n )\n for b in bases:\n frames['balances'].insert('end', \n f'Current {b} Balance: \\n{currentBaseBalance[b]} \\n'\n )\n frames['balances'].configure(state='disabled')\n frames['trades'].configure(state='normal')\n frames['trades'].insert(\"end\", self.thread.trades.get_nowait())\n frames['trades'].see(\"end\")\n frames['trades'].yview(\"end\")\n frames['trades'].configure(state='disabled')\n while not self.thread.data_frame_figure.empty():\n [data, p] = self.thread.data_frame_figure.get_nowait()\n b = p.removesuffix(quote)\n self.updateChart(data, b)\n if not self.thread.open_positions_display.empty():\n pos = self.thread.open_positions_display.get_nowait()\n if pos[0] == 'add':\n frames['positions'].insert(\"end\", f\"[{pos[2]}] {pos[1]} {pos[3]}\")\n elif pos[0] == 'del':\n frames['positions'].delete(pos[1])\n if not self.thread.errors.empty():\n e = self.thread.errors.get_nowait()\n messagebox.showerror(\n title=\"Error\",\n message=str(e) + \"\\nAutoTrader will now restart.\")\n self.root.protocol('WM_DELETE_WINDOW', lambda: sys.exit())\n restartProgram()\n if not close and not frames['positions'].curselection() == ():\n frames['sell'].configure(state='normal')\n else:\n frames['sell'].configure(state='disabled')\n if not close:\n tp = sliders['tp'].get()\n sl = sliders['sl'].get()\n self.root.update()\n tsleep(0.1)\n\nclass AsyncioThread(Thread):\n def __init__(self, theWindow):\n self.asyncio_loop = new_event_loop()\n set_event_loop(self.asyncio_loop)\n \n self.theWindow = theWindow\n Thread.__init__(self)\n self.daemon = True\n self.killed = False\n\n def start(self):\n self.__run_backup = self.run\n self.run = self.__run\n Thread.start(self)\n \n def __run(self):\n sys.settrace(self.globaltrace)\n self.__run_backup()\n self.run = self.__run_backup\n \n def globaltrace(self, frame, event, arg):\n if event == 'call':\n return self.localtrace\n else:\n return None\n \n def localtrace(self, frame, event, arg):\n if self.killed:\n if event == 'line':\n raise SystemExit()\n return self.localtrace\n \n def kill(self):\n while self.open_positions.empty():\n tsleep(0.1)\n if not self.open_positions.empty():\n self.open_positions.get_nowait()\n self.open_positions.put_nowait([])\n try:\n self.client.close_connection()\n except:\n pass\n try:\n self.asyncio_loop.stop()\n except:\n pass\n try:\n self.asyncio_loop.close()\n except:\n pass\n self.killed = True\n \n def run(self):\n self.asyncio_loop.run_until_complete(self.trader()) #, self.asyncio_loop)\n \n async def trader(self):\n self.open_positions = Queue()\n self.price = LifoQueue()\n self.price_buy = LifoQueue()\n self.price_sell = LifoQueue()\n self.data = Queue()\n self.data_frame = Queue()\n self.data_frame_figure = Queue()\n self.signals = Queue()\n self.trades = Queue()\n self.errors = Queue()\n self.open_positions_display = Queue()\n self.stopQ = Queue()\n self.sellQ = Queue()\n \n try:\n self.client = await AsyncClient.create(API_KEY, SECRET_KEY, testnet=useTestnet)\n except Exception as e:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(e))\n\n temp = curr_open_positions.get()\n curr_open_positions.put_nowait(temp)\n self.stopQ.put_nowait(1)\n \n await self.open_positions.put(temp)\n for t in temp:\n self.asyncio_loop.call_soon_threadsafe(self.open_positions_display.put_nowait, \n ['add', t['qty'], t['time'], t['base']])\n\n try:\n self.bm = BinanceSocketManager(self.client)\n except Exception as e:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(e))\n \n self.save_to_records(startQuoteBalance, 'open')\n self.tasks = [create_task(self.check_closing_time()),\n create_task(self.kline_data()), \n create_task(self.update_data()),\n create_task(self.generate_signals()), \n create_task(self.place_buy_order()),\n create_task(self.place_sell_order()), \n create_task(self.manualSell()), \n create_task(self.close_positions())]\n\n try:\n await gather(*self.tasks)\n except Exception:\n self.theWindow.root.protocol('WM_DELETE_WINDOW', lambda: sys.exit())\n \n return\n\n async def check_closing_time(self):\n global close\n if useClosingTime:\n while time_diff() < closingTime and time_diff() < 86400:\n await sleep(10)\n close = True\n\n async def kline_data(self):\n streams = []\n for b in bases:\n s = b + quote + '@kline_1m'\n streams.append(s.lower())\n \n ms = self.bm.multiplex_socket(streams)\n myKeys = ['s', 't', 'o', 'h', 'l', 'c', 'v', 'T', 'q', 'n', 'V', 'Q']\n async with ms as mscm:\n while not close:\n try:\n a = await mscm.recv()\n except Exception as e:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(e))\n else:\n a = a['data']\n res = a['k']\n self.asyncio_loop.call_soon_threadsafe(self.price.put_nowait, [float(res['c']), res['s']])\n self.asyncio_loop.call_soon_threadsafe(self.price_buy.put_nowait, [float(res['c']), res['s']])\n self.asyncio_loop.call_soon_threadsafe(self.price_sell.put_nowait, [float(res['c']), res['s']])\n if res['x']:\n candle = [res[x] for x in myKeys]\n self.asyncio_loop.call_soon_threadsafe(self.data.put_nowait, candle)\n return\n\n async def update_data(self):\n dfs = await self.get_historical_data()\n for b in bases:\n p = b + quote\n df = dfs[p]\n self.data_frame_figure.put_nowait([df, p])\n while not close:\n new_row = await self.data.get()\n p = new_row[0]\n new_row.remove(p)\n if strategy == 1:\n new_row = new_row + [0, 0, 0, 0]\n elif strategy == 2:\n new_row = new_row + [0, 0, 0, 0]\n elif strategy == 3:\n new_row = new_row + [0, 0, 0, 0, 0]\n elif strategy == 4:\n new_row = new_row + [0, 0, 0, 0, 0]\n elif strategy == 5:\n new_row = new_row + [0, 0, 0, 0, 0, 0, 0]\n elif strategy == 6:\n new_row = new_row + [0, 0, 0, 0, 0, 0, 0]\n new_row = array(new_row).astype(npdouble)\n dfs = self.update_df(p, dfs, new_row)\n df = dfs[p]\n self.data_frame.put_nowait([df, p])\n self.data_frame_figure.put_nowait([df, p])\n return\n\n async def get_historical_data(self):\n headings = ['Open Time', 'Open', 'High', 'Low', 'Close',\n 'Volume', 'Close Time', 'Quote Asset Volume',\n 'Number of Trades', 'Taker buy Base Asset Volume',\n 'Taker buy Quote Asset Volume', 'Ignore']\n dfs = {}\n for b in bases:\n p = b + quote\n klines = array(await self.client.get_historical_klines(p, AsyncClient.KLINE_INTERVAL_1MINUTE, '1 day ago UTC', limit=1000)).astype(npdouble)\n df = DataFrame.from_records(klines, columns=headings)\n try:\n df.drop(['Ignore'], axis=1, inplace=True)\n except Exception:\n pass\n df.dropna(inplace=True)\n if strategy == 1:\n df['SMA_20'] = df['Close'].rolling(window=20).mean()\n df['SMA_50'] = df['Close'].rolling(window=50).mean()\n df['SMA_100'] = df['Close'].rolling(window=100).mean()\n df['SMA_200'] = df['Close'].rolling(window=200).mean()\n elif strategy == 2:\n df['EMA_12'] = df['Close'].ewm(span=12, adjust=False).mean()\n df['EMA_26'] = df['Close'].ewm(span=26, adjust=False).mean()\n df['EMA_50'] = df['Close'].ewm(span=50, adjust=False).mean()\n df['EMA_100'] = df['Close'].ewm(span=100, adjust=False).mean()\n elif strategy == 3:\n df['EMA_12'] = df['Close'].ewm(span=12, adjust=False).mean()\n df['EMA_26'] = df['Close'].ewm(span=26, adjust=False).mean()\n df['EMA_100'] = df['Close'].ewm(span=100, adjust=False).mean()\n df['MACD'] = df['EMA_12'] - df['EMA_26']\n df['Signal Line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n elif strategy == 4:\n df['EMA_12'] = df['Close'].ewm(span=12, adjust=False).mean()\n df['EMA_26'] = df['Close'].ewm(span=26, adjust=False).mean()\n df['MACD'] = df['EMA_12'] - df['EMA_26']\n df['Signal Line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n delta = df['Close'].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=13, adjust=False).mean()\n ema_down = down.ewm(com=13, adjust=False).mean()\n rs = ema_up / ema_down\n df['RSI'] = 100 - (100 / (1 + rs))\n elif strategy == 5:\n df['EMA_12'] = df['Close'].ewm(span=12, adjust=False).mean()\n df['EMA_26'] = df['Close'].ewm(span=26, adjust=False).mean()\n df['MACD'] = df['EMA_12'] - df['EMA_26']\n df['Signal Line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n delta = df['Close'].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=13, adjust=False).mean()\n ema_down = down.ewm(com=13, adjust=False).mean()\n rs = ema_up / ema_down\n df['RSI'] = 100 - (100 / (1 + rs))\n high_14 = df['High'].rolling(14).max()\n low_14 = df['Low'].rolling(14).min()\n df['%K'] = (df['Close'] - low_14) * 100 / (high_14 - low_14)\n df['%D'] = df['%K'].rolling(3).mean()\n elif strategy == 6:\n df['SMA_20'] = df['Close'].rolling(window=20).mean()\n df['SMA_50'] = df['Close'].rolling(window=50).mean()\n df['EMA_12'] = df['Close'].ewm(span=12, adjust=False).mean()\n df['EMA_26'] = df['Close'].ewm(span=26, adjust=False).mean()\n df['MACD'] = df['EMA_12'] - df['EMA_26']\n df['Signal Line'] = df['MACD'].ewm(span=9, adjust=False).mean()\n delta = df['Close'].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=13, adjust=False).mean()\n ema_down = down.ewm(com=13, adjust=False).mean()\n rs = ema_up / ema_down\n df['RSI'] = 100 - (100 / (1 + rs))\n dfs[p] = df\n return dfs\n\n def update_df(self, p, dfs, new_row):\n df = dfs[p]\n new_row = Series(new_row, index=df.columns)\n if new_row[0] != df.iloc[-1, 0]:\n df = df.append(new_row, ignore_index=True)\n else:\n df.iloc[-1, :] = new_row\n if strategy == 1:\n df['SMA_20'][-80:] = df['Close'][-80].rolling(window=20).mean()\n df['SMA_50'][-110:] = df['Close'][-110:].rolling(window=50).mean()\n df['SMA_100'][-160:] = df['Close'][-160:].rolling(window=100).mean()\n df['SMA_200'][-260:] = df['Close'][-260:].rolling(window=200).mean()\n elif strategy == 2:\n df['EMA_12'][-200:] = df['Close'][-200:].ewm(span=12, adjust=False).mean()\n df['EMA_26'][-200:] = df['Close'][-200:].ewm(span=26, adjust=False).mean()\n df['EMA_50'][-200:] = df['Close'][-200:].ewm(span=50, adjust=False).mean()\n df['EMA_100'][-200:] = df['Close'][-200:].ewm(span=100, adjust=False).mean()\n elif strategy == 3:\n df['EMA_12'][-200:] = df['Close'][-200:].ewm(span=12, adjust=False).mean()\n df['EMA_26'][-200:] = df['Close'][-200:].ewm(span=26, adjust=False).mean()\n df['EMA_100'][-200:] = df['Close'][-200:].ewm(span=100, adjust=False).mean()\n df['MACD'].iloc[-1] = df['EMA_12'].iloc[-1] - df['EMA_26'].iloc[-1]\n df['Signal Line'][-80:] = df['MACD'][-80:].ewm(span=9, adjust=False).mean()\n elif strategy == 4:\n df['EMA_12'][-100:] = df['Close'][-100:].ewm(span=12, adjust=False).mean()\n df['EMA_26'][-100:] = df['Close'][-100:].ewm(span=26, adjust=False).mean()\n df['MACD'].iloc[-1] = df['EMA_12'].iloc[-1] - df['EMA_26'].iloc[-1]\n df['Signal Line'][-80:] = df['MACD'][-80:].ewm(span=9, adjust=False).mean()\n delta = df['Close'][-80:].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=13, adjust=False).mean()\n ema_down = down.ewm(com=13, adjust=False).mean()\n rs = ema_up / ema_down\n df['RSI'][-80:] = 100 - (100 / (1 + rs))\n elif strategy == 5:\n df['EMA_12'][-100:] = df['Close'][-100:].ewm(span=12, adjust=False).mean()\n df['EMA_26'][-100:] = df['Close'][-100:].ewm(span=26, adjust=False).mean()\n df['MACD'].iloc[-1] = df['EMA_12'].iloc[-1] - df['EMA_26'].iloc[-1]\n df['Signal Line'][-80:] = df['MACD'][-80:].ewm(span=9, adjust=False).mean()\n delta = df['Close'][-80:].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=13, adjust=False).mean()\n ema_down = down.ewm(com=13, adjust=False).mean()\n rs = ema_up / ema_down\n df['RSI'][-80:] = 100 - (100 / (1 + rs))\n high_14 = df['High'][-80:].rolling(14).max()\n low_14 = df['Low'][-80:].rolling(14).min()\n df['%K'][-80:] = (df['Close'][-80:] - low_14) * 100 / (high_14 - low_14)\n df['%D'][-80:] = df['%K'][-80:].rolling(3).mean()\n elif strategy == 6:\n df['SMA_20'][-80:] = df['Close'][-80:].rolling(window=20).mean()\n df['SMA_50'][-110:] = df['Close'][-110:].rolling(window=50).mean()\n df['EMA_12'][-100:] = df['Close'][-100:].ewm(span=12, adjust=False).mean()\n df['EMA_26'][-100:] = df['Close'][-100:].ewm(span=26, adjust=False).mean()\n df['MACD'].iloc[-1] = df['EMA_12'].iloc[-1] - df['EMA_26'].iloc[-1]\n df['Signal Line'][-80:] = df['MACD'][-80:].ewm(span=9, adjust=False).mean()\n delta = df['Close'][-80:].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=13, adjust=False).mean()\n ema_down = down.ewm(com=13, adjust=False).mean()\n rs = ema_up / ema_down\n df['RSI'][-80:] = 100 - (100 / (1 + rs))\n dfs[p] = df\n return dfs\n\n async def generate_signals(self):\n while not close:\n [df, p] = await self.data_frame.get()\n if strategy == 0:\n pass\n elif strategy == 1:\n if self.strat1(df):\n self.asyncio_loop.call_soon_threadsafe(self.signals.put_nowait, p)\n elif strategy == 2:\n if self.strat2(df):\n self.asyncio_loop.call_soon_threadsafe(self.signals.put_nowait, p)\n elif strategy == 3:\n if self.strat3(df):\n self.asyncio_loop.call_soon_threadsafe(self.signals.put_nowait, p)\n elif strategy == 4:\n if self.strat4(df):\n self.asyncio_loop.call_soon_threadsafe(self.signals.put_nowait, p)\n elif strategy == 5:\n if self.strat5(df):\n self.asyncio_loop.call_soon_threadsafe(self.signals.put_nowait, p)\n elif strategy == 6:\n if self.strat6(df):\n self.asyncio_loop.call_soon_threadsafe(self.signals.put_nowait, p)\n return\n \n def strat1(self, df):\n price = df['Close']\n ma_20 = df['SMA_20']\n ma_50 = df['SMA_50']\n ma_100 = df['SMA_100']\n ma_200 = df['SMA_200']\n if ma_20.iloc[-1] > ma_50.iloc[-1] and ma_20.iloc[-2] < ma_50.iloc[-2]:\n return True\n if ma_20.iloc[-1] > ma_50.iloc[-1] and ma_50.iloc[-1] > ma_100.iloc[-1] and ma_100.iloc[-1] > ma_200.iloc[-1]:\n if price.iloc[-1] < ma_20.iloc[-1] and price.iloc[-1] > ma_50.iloc[-1] and (ma_20.iloc[-1] - price.iloc[-1]) / price.iloc[-1] > tp * 100:\n return True\n return False\n \n def strat2(self, df):\n price = df['Close']\n ma_12 = df['EMA_12']\n ma_26 = df['EMA_26']\n ma_50 = df['EMA_50']\n ma_100 = df['EMA_100']\n if ma_12.iloc[-1] > ma_26.iloc[-1] and ma_12.iloc[-2] < ma_26.iloc[-2]:\n return True\n if ma_12.iloc[-1] > ma_26.iloc[-1] and ma_26.iloc[-1] > ma_50.iloc[-1] and ma_50.iloc[-1] > ma_100.iloc[-1]:\n if price.iloc[-1] < ma_12.iloc[-1] and price.iloc[-1] > ma_26.iloc[-1] and (ma_12.iloc[-1] - price.iloc[-1]) / price.iloc[-1] > tp * 100:\n return True\n return False\n \n def strat3(self, df):\n price = df['Close']\n macd = df['MACD']\n signalLine = df['Signal Line']\n ma_100 = df['EMA_100']\n dif = (price.iloc[-10:] - ma_100.iloc[-10:]) >= 0\n if macd.iloc[-1] > signalLine.iloc[-1] and macd.iloc[-2] < signalLine.iloc[-2] and dif.all():\n return True\n return False\n\n def strat4(self, df):\n macd = df['MACD']\n signalLine = df['Signal Line']\n rsi = df['RSI']\n if macd.iloc[-1] > signalLine.iloc[-1] and macd.iloc[-2] < signalLine.iloc[-2] and rsi.iloc[-1] < 60:\n return True\n return False\n\n def strat5(self, df):\n macd = df['MACD']\n signalLine = df['Signal Line']\n rsi = df['RSI']\n k = df['%K']\n d = df['%D']\n if macd.iloc[-1] > signalLine.iloc[-1] and rsi.iloc[-2] < 50 and rsi.iloc[-1] > 50 and k.iloc[-1] < 80 and d.iloc[-1] < 80:\n return True\n return False\n\n def strat6(self, df):\n ma_20 = df['SMA_20']\n ma_50 = df['SMA_50']\n macd = df['MACD']\n signalLine = df['Signal Line']\n rsi = df['RSI']\n if macd.iloc[-1] > signalLine.iloc[-1] and macd.iloc[-2] < signalLine.iloc[-2] and rsi.iloc[-1] < 60:\n if ma_20.iloc[-1] > ma_50.iloc[-1]:\n return True\n return False\n\n def manualBuy(self, b):\n self.asyncio_loop.call_soon_threadsafe(self.signals.put_nowait, b + quote)\n\n async def manualSell(self):\n while not close:\n if not self.sellQ.empty():\n i = self.sellQ.get_nowait()\n sell_list = await self.open_positions.get()\n await self.open_positions.put(sell_list)\n try:\n s = sell_list[i]\n except IndexError:\n pass\n else:\n try:\n data = await self.client.get_symbol_ticker(symbol=s['base'] + quote)\n except Exception as e:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(e))\n else:\n current_price = float(data['price'])\n await self.sell(s['qty'], current_price, s['id'], s['base'] + quote, i)\n await sleep(0.5)\n \n async def place_buy_order(self):\n while not close:\n p = await self.signals.get()\n try:\n balance = await self.client.get_asset_balance(asset=quote)\n except Exception as e:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(e))\n else:\n data = await self.client.get_symbol_ticker(symbol=p)\n current_price = float(data['price'])\n balance = float(balance['free'])\n quantity = trade_allocation / current_price\n if balance < min_trade_allocation:\n pass\n elif balance < trade_allocation:\n pass\n else:\n if trade_allocation >= filters[p]['minNotional']:\n quantity = trade_allocation / current_price\n else:\n quantity = (filters[p]['minNotional'] * 1.001) / current_price\n if quantity > filters[p]['maxQty']:\n quantity = filters[p]['maxQty'] * 0.97\n count = trade_count.get()\n trade_count.put_nowait(count)\n res = await self.buy(quantity, current_price, count, p)\n if res:\n count = trade_count.get()\n count += 1\n trade_count.put_nowait(count)\n return\n \n async def buy(self, qty, current_price, count, p):\n if filters[p]['stepSize'] == 0:\n qty = round_step_size(qty, filters[p]['lotStepSize'])\n else:\n qty = round_step_size(qty, filters[p]['stepSize'])\n \n try:\n order = await self.client.order_market_buy(\n symbol=p,\n quantity=qty)\n except Exception as e:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(e))\n return False\n else:\n while True:\n await sleep(0.5)\n try:\n trade_status = await self.client.get_order(symbol=p, orderId=order['orderId'])\n except Exception as ex:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(ex))\n return False\n else:\n if float(trade_status['executedQty']) == qty:\n await self.save_trades('buy', qty, current_price, count, p.removesuffix(quote))\n data = {'stop-loss': (1 - (sl / 100)) * current_price, \n 'take-profit': (1 + (tp / 100)) * current_price, \n 'qty': qty, \n 'id': count, \n 'time': strftime('%H:%M'), \n 'base': p.removesuffix(quote)\n }\n sell_list = await self.open_positions.get()\n sell_list.append(data)\n await self.open_positions.put(sell_list)\n curr_open_positions.get()\n curr_open_positions.put_nowait(sell_list)\n self.asyncio_loop.call_soon_threadsafe(self.open_positions_display.put_nowait, \n ['add', qty, strftime('%H:%M'), p.removesuffix(quote)])\n return True\n \n async def place_sell_order(self): \n while not close:\n [current_price, p] = await self.price_sell.get()\n sell_list = await self.open_positions.get()\n await self.open_positions.put(sell_list)\n \n for s in sell_list:\n if p == s['base'] + quote and (current_price < s['stop-loss'] or current_price > s['take-profit']):\n await self.sell(s['qty'], current_price, s['id'], p, sell_list.index(s))\n return\n \n async def sell(self, qty, current_price, count, p, idx):\n temp = curr_open_positions.get()\n \n try:\n order = await self.client.order_market_sell(\n symbol=p,\n quantity=qty)\n except Exception as e:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(e))\n return False\n else:\n await sleep(0.5)\n try:\n await self.client.get_order(symbol=p, orderId=order['orderId'])\n except Exception as ex:\n self.asyncio_loop.call_soon_threadsafe(self.errors.put_nowait, str(ex))\n return False\n else:\n temp.remove(temp[idx])\n curr_open_positions.put_nowait(temp)\n self.asyncio_loop.call_soon_threadsafe(self.open_positions_display.put_nowait, \n ['del', idx])\n await self.open_positions.get()\n await self.open_positions.put(temp)\n await self.save_trades('sell', qty, current_price, count, p.removesuffix(quote))\n return True\n \n async def close_positions(self):\n while not close:\n await sleep(0.5)\n await self.stopQ.get()\n res = await self.open_positions.get()\n self.open_positions.put_nowait(res)\n if not res == []:\n try:\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, 'Closing positions. \\n\\n')\n prices = {}\n for b in bases:\n data = await self.client.get_symbol_ticker(symbol=b + quote)\n price = float(data['price'])\n prices[b] = price\n \n for i in res:\n await self.sell(i['qty'], prices[i['base']], i['id'], i['base'] + quote, res.index(i))\n\n closing_balance = await self.client.get_asset_balance(asset=quote)\n closing_balance = float(closing_balance['free'])\n self.save_to_records(closing_balance, 'close')\n except Exception:\n self.save_to_records(currentQuoteBalance, 'close')\n self.stopQ.put_nowait(1)\n return\n \n self.save_to_records(currentQuoteBalance, 'close')\n self.stopQ.put_nowait(1)\n\n def save_to_records(self, balance, trade_period):\n try:\n file = open(FILE_NAME, 'a')\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, strftime('%c') + '\\n\\n')\n if useTestnet:\n file.write('Testnet \\n')\n if trade_period == 'open':\n file.write(strftime('%c') + '\\n')\n file.write(f'Opening balance: {balance} {quote}. \\n')\n elif trade_period == 'close':\n file.write(f'Closing balance: {balance} {quote}. \\n\\n')\n finally:\n file.close()\n \n async def save_trades(self, trade_type, qty, price, count, b):\n global currentBaseBalance\n global currentQuoteBalance\n \n temp = await self.client.get_asset_balance(asset=b)\n currentBaseBalance[b] = float(temp['free']) \n currentQuoteBalance = await self.client.get_asset_balance(asset=quote)\n currentQuoteBalance = float(currentQuoteBalance['free'])\n\n try:\n file = open(FILE_NAME, 'a')\n file.write(strftime('%H:%M') + '\\n')\n if trade_type == 'buy':\n file.write(f'Trade {count} \\n')\n file.write(f'Bought {qty} {b} at {price} {quote} per {b} ({qty * price} {quote}). \\n')\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, strftime(\"%H:%M\") + '\\n')\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, f'Trade {count} \\n')\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, \n f'Bought {qty} {b} at {price} {quote} per {b} ({round(qty * price, 4)} {quote}) \\n\\n')\n elif trade_type == 'sell':\n file.write(f'Closed trade {count} \\n')\n file.write(f'Sold {qty} {b} at {price} {quote} per {b} ({qty * price} {quote}). \\n')\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, strftime(\"%H:%M\") + '\\n')\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, f'Closed trade {count} \\n')\n self.asyncio_loop.call_soon_threadsafe(self.trades.put_nowait, \n f'Sold {qty} {b} at {price} {quote} per {b} ({round(qty * price, 4)} {quote}) \\n\\n')\n finally:\n file.close()\n \ndef plotPrice(df, x, ax):\n y = df['Close'].to_numpy()\n ax.plot(x, y[-60:], \"-g\", label=\"Price\")\n ax.xaxis.set_major_formatter(myFmt)\n ax.xaxis.set_minor_locator(mns)\n ax.set_title(\"Price\")\n return ax\n\ndef plotMACD(df, x, ax):\n y1 = df['MACD'].to_numpy()\n y2 = df['Signal Line'].to_numpy()\n ax.plot(x, y1[-60:], \"-b\", label=\"MACD\")\n ax.plot(x, y2[-60:], \"-r\", label=\"Signal Line\")\n ax.plot(x, [0]*len(x), color='k', linestyle='--')\n ax.xaxis.set_major_formatter(myFmt)\n ax.xaxis.set_minor_locator(mns)\n ax.legend(loc=\"upper right\", fontsize=\"x-small\")\n ax.set_title(\"MACD\")\n return ax\n\ndef plotRSI(df, x, ax):\n y = df['RSI'].to_numpy()\n ax.plot(x, y[-60:], \"-b\", label=\"RSI\")\n ax.plot(x, [30]*len(x), color='k', linestyle='--')\n ax.plot(x, [70]*len(x), color='k', linestyle='--')\n ax.xaxis.set_major_formatter(myFmt)\n ax.xaxis.set_minor_locator(mns)\n ax.legend(loc=\"upper right\", fontsize=\"x-small\")\n ax.set_title(\"RSI\")\n return ax\n\ndef plotStochastic(df, x, ax):\n y1 = df['%K'].to_numpy()\n y2 = df['%D'].to_numpy()\n ax.plot(x, y1[-60:], \"-b\", label=\"%K\")\n ax.plot(x, y2[-60:], \"-r\", label=\"%D\")\n ax.plot(x, [20]*len(x), color='k', linestyle='--')\n ax.plot(x, [80]*len(x), color='k', linestyle='--')\n ax.xaxis.set_major_formatter(myFmt)\n ax.xaxis.set_minor_locator(mns)\n ax.legend(loc=\"upper right\", fontsize=\"x-small\")\n ax.set_title(\"Stochastic Indicator\")\n return ax\n \ndef closeProgram():\n global close\n\n close = True\n \n tsleep(1)\n while window.thread.stopQ.empty():\n tsleep(0.5)\n try:\n window.thread.client.close_connection()\n except:\n pass\n try:\n window.thread.asyncio_loop.stop()\n except:\n pass\n try:\n window.thread.asyncio_loop.close()\n except:\n pass\n window.root.quit()\n window.root.destroy()\n try:\n sys.exit()\n except SystemExit:\n pass\n\ndef restartProgram():\n global close\n\n close = True\n \n try:\n window.thread.client.close_connection()\n except:\n pass\n try:\n window.thread.asyncio_loop.stop()\n except:\n pass\n try:\n window.thread.asyncio_loop.close()\n except:\n pass\n temp = trade_count.get()\n command = 'AutoTrader -r '\n if useTestnet:\n command = command + '-t '\n command = command + f'-a {API_KEY} {SECRET_KEY} {sl} {tp} {trade_allocation} {useClosingTime} {startTime} {closingTime} {strategy} {quote} {temp} '\n command = command + '-b '\n for b in bases:\n command = command + f'{b} '\n temp = curr_open_positions.get()\n for t in temp:\n command = command + '-o '\n for i in t.values():\n command = command + f'{i} '\n window.root.quit()\n window.root.destroy()\n os.system(command)\n os.system('exit')\n try:\n sys.exit()\n except SystemExit:\n pass\n \nclass V_ScrollableFrame:\n def __init__(self, container):\n self.canvas = Canvas(container, width=550, height=230)\n self.scrollbar=Scrollbar(container, orient=VERTICAL, command=self.canvas.yview)\n self.frame = Frame(self.canvas, \n bg = \"#FFFFFF\",\n bd=0\n )\n self.frame.bind('', \n lambda e: self.canvas.configure(scrollregion=self.canvas.bbox('all')\n )\n )\n self.canvas.create_window((0, 0), window=self.frame, anchor='nw', width=550)\n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n\ndef time_diff():\n currTime = ((time() % 86400) + offset) % 86400\n if currTime > startTime:\n return currTime - startTime\n else:\n return 86400 - startTime + currTime\n\ndef popen(cmd: str) -> str:\n \"\"\"For pyinstaller -w\"\"\"\n startupinfo = STARTUPINFO()\n startupinfo.dwFlags |= STARTF_USESHOWWINDOW\n process = Popen(cmd,startupinfo=startupinfo, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n return process.stdout.read()\n\nif __name__ == '__main__':\n apply()\n window = TheWindow()\n window.root.mainloop()\n try:\n sys.exit()\n except SystemExit:\n pass\n","sub_path":"v3/appv3_single_async_thread.py","file_name":"appv3_single_async_thread.py","file_ext":"py","file_size_in_byte":98084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409534806","text":"import requests\nimport os\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef get_photo_list():\n url = \"https://api.flickr.com/services/rest/?method=flickr.interestingness.getList&api_key=%s&format=json&nojsoncallback=1\" % os.environ[\"FLICKR_API_KEY\"]\n\n res = requests.get(url).json()\n\n return res\n\n\ndef download_photo(_photo):\n photo_url = \"https://farm%s.staticflickr.com/%s/%s_%s.jpg\" % (_photo[\"farm\"], _photo[\"server\"], _photo[\"id\"], _photo[\"secret\"])\n\n photo_res = requests.get(photo_url)\n\n return photo_res.content\n\n\ndef save_photo(_photo, _photo_content):\n with open(os.path.join(os.path.realpath(os.path.dirname(__file__)), \"..\", \"downloaded\", _photo[\"id\"] + \".jpg\"), \"wb\") as photo_file:\n photo_file.write(_photo_content)\n\n\nif __name__ == \"__main__\":\n photos_list_res = get_photo_list()\n\n photo_list = photos_list_res[\"photos\"][\"photo\"]\n\n with ThreadPoolExecutor() as e:\n future_photos = {e.submit(download_photo, photo): photo for photo in photo_list}\n\n for future in as_completed(future_photos):\n photo_content = future.result()\n save_photo(future_photos[future], photo_content)\n\n","sub_path":"section-1/ioperf/ioperf_refactored.py","file_name":"ioperf_refactored.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"237304909","text":"# \"C:\\Program Files (x86)\\Python_3_5_1\\Python.exe\" \"C:\\Users\\Xylozi\\Documents\\Python\\Projects\\EU4\\Monarch History Gen\\monarch_history_gen.py\"\n\nimport os\nimport sys\nimport random\nimport time\n\ndef getRandomNumber( min, max ):\n return random.randrange( min, max )\n\ndef getScriptPath():\n \"\"\" Returns current path of script \"\"\"\n return os.path.dirname(os.path.realpath(sys.argv[0]))\n\ndef getRandomEntry( list ):\n \"\"\"Returns a random entry within the passed list\"\"\"\n return list[random.randint(0, len(list)-1)]\n \ndef getRandomGender():\n rand = random.randrange( 0, 100 )\n if( rand < 75 ):\n return \"male\"\n else:\n return \"female\"\n \ndef getBirthDate( date ):\n year = date.split( \".\" )[0]\n birthYear = int(year) - random.randrange( 18, 90 )\n birthYear = str(birthYear) + \".1.1\"\n return birthYear\n \ndef getDeathDate( date ):\n year = date.split( \".\" )[0]\n birthYear = int(year) + random.randrange( 18, 90 )\n birthYear = str(birthYear) + \".1.1\"\n return birthYear\n \ndef getNames( file ):\n \"\"\"Returns a list containing all the names found in the passed file, with newlines stripped\"\"\"\n tempList = []\n filepath = getScriptPath() + \"/\" + file\n \n with open( filepath, \"rt\" ) as sourceFile:\n text = sourceFile.readlines()\n \n for i in range( len( text ) ):\n tempList.append( text[i].strip( \"\\n\" ) )\n \n return tempList\n \ndef writeMonarch( file, name, surname, birth_date, adm, dip, mil, date, gender ):\n \"\"\"Writes the monarch script code to a file\"\"\"\n file.write( date + \"= {\\n\" )\n file.write( \"\\t\" + \"monarch = {\\n\")\n file.write( \"\\t\\t\" + \"name = \\\"\" + name + \"\\\"\\n\" )\n file.write( \"\\t\\t\" + \"dynasty = \\\"\" + surname + \"\\\"\\n\" )\n file.write( \"\\t\\t\" + \"birth_date = \" + birth_date + \"\\n\" )\n file.write( \"\\t\\t\" + \"adm = \" + str(adm) + \"\\n\" )\n file.write( \"\\t\\t\" + \"dip = \" + str(dip) + \"\\n\" )\n file.write( \"\\t\\t\" + \"mil = \" + str(mil) + \"\\n\" )\n \n if gender == \"female\":\n file.write( \"\\t\\t\" + \"female = yes\\n\" )\n \n file.write( \"\\t}\\n\" )\n \ndef writeHeir( file, name, surname, birth_date, death_date, adm, dip, mil, date, gender, claim ):\n \"\"\"Writes the heir script code to a file\"\"\"\n file.write( \"\\t\" + \"heir = {\\n\")\n file.write( \"\\t\\t\" + \"name = \\\"\" + name + \"\\\"\\n\" )\n file.write( \"\\t\\t\" + \"monarch_name = \\\"\" + name + \"\\\"\\n\" )\n file.write( \"\\t\\t\" + \"dynasty = \\\"\" + surname + \"\\\"\\n\" )\n file.write( \"\\t\\t\" + \"birth_date = \" + birth_date + \"\\n\" )\n file.write( \"\\t\\t\" + \"death_date = \" + death_date + \"\\n\" )\n file.write( \"\\t\\t\" + \"adm = \" + str(adm) + \"\\n\" )\n file.write( \"\\t\\t\" + \"dip = \" + str(dip) + \"\\n\" )\n file.write( \"\\t\\t\" + \"mil = \" + str(mil) + \"\\n\" )\n file.write( \"\\t\\t\" + \"claim = \" + str(claim) + \"\\n\" )\n \n if gender == \"female\":\n file.write( \"\\t\\t\" + \"female = yes\\n\" )\n \n file.write( \"\\t}\\n\" )\n file.write( \"}\\n\\n\" )\n \ndef main():\n script_path = getScriptPath()\n \n dates = [ \"2282.1.1\" ]\n \n maleNames = getNames( \"male_forenames.txt\" )\n femaleNames = getNames( \"female_forenames.txt\" )\n surnames = getNames( \"surnames.txt\" )\n \n print( getRandomEntry( surnames ) )\n \n os.chdir( script_path )\n \n output = open( \"output.txt\", \"w+\" )\n \n for i in range( len( surnames ) ):\n currentSurname = surnames[i]\n \n output.write( \"\\n### \" + currentSurname + \"\\n\\n\" )\n \n for j in range( len( dates ) ):\n currentDate = dates[j]\n \n gender = getRandomGender()\n \n if gender == \"male\":\n writeMonarch( output, getRandomEntry(maleNames), currentSurname, getBirthDate( currentDate ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), currentDate, gender )\n else:\n writeMonarch( output, getRandomEntry(femaleNames), currentSurname, getBirthDate( currentDate ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), currentDate, gender )\n \n gender = getRandomGender()\n \n if gender == \"male\":\n writeHeir( output, getRandomEntry(maleNames), currentSurname, getBirthDate( currentDate ), getDeathDate( currentDate ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), currentDate, gender, getRandomNumber( 25, 100 ) )\n else:\n writeHeir( output, getRandomEntry(femaleNames), currentSurname, getBirthDate( currentDate ), getDeathDate( currentDate ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), getRandomNumber( 0, 6 ), currentDate, gender, getRandomNumber( 25, 100 ) )\n \n \n output.close()\n\t\n os.system(\"pause\")\n \nif __name__ == \"__main__\":\n main()\n \n","sub_path":"Monarch History Gen/monarch_history_gen.py","file_name":"monarch_history_gen.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"359224286","text":"\n# pip install pillow\n# pip install pyscreenshot\n\n\nimport pyscreenshot as ImageGrab\n\ndef grab_screen_area(x1, y1, x2, y2):\n # Grab part of the screen\n im = ImageGrab.grab(bbox=(x1, y1, x2, y2))\n\n # Save the file\n im.save(\"sshot_part.png\")\n\n # Show image in a window\n # im.show()\n\ndef grab_full_screen():\n # Grab the screen\n im = ImageGrab.grab()\n\n # Save the file\n im.save(\"sshot.png\")\n\n # Show image in a window\n # im.show()\n\nif __name__ == '__main__':\n # freeze_support()\n\n grab_full_screen()\n grab_screen_area(10, 10, 510, 510)\n","sub_path":"client_tools/svc/sshot.py","file_name":"sshot.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"365665869","text":"# -*- encoding: utf-8 -*-\nimport ast\nimport json\nfrom datetime import datetime\nfrom itertools import islice\nfrom pprint import pprint\n\nimport requests\nfrom esipy import App\nfrom esipy import EsiClient\nfrom esipy import EsiSecurity\nfrom esipy.exceptions import APIException\n\nfrom flask import Flask\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import session\nfrom flask import url_for\n\nfrom flask_login import LoginManager\nfrom flask_login import UserMixin\nfrom flask_login import current_user\nfrom flask_login import login_required\nfrom flask_login import login_user\nfrom flask_login import logout_user\n\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport config\nimport hashlib\nimport hmac\nimport logging\nimport random\nimport time\n\n# logger stuff\nlogger = logging.getLogger(__name__)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.DEBUG)\nconsole.setFormatter(formatter)\nlogger.addHandler(console)\n\n# init app and load conf\napp = Flask(__name__)\napp.config.from_object(config)\ncache_timer = 0\n# init db\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n# init flask login\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n\n### This is a proper change - different branch\n\n# -----------------------------------------------------------------------\n# Database models\n# -----------------------------------------------------------------------\nclass User(db.Model, UserMixin):\n # our ID is the character ID from EVE API\n character_id = db.Column(\n db.BigInteger,\n primary_key=True,\n autoincrement=False\n )\n character_owner_hash = db.Column(db.String(255))\n character_name = db.Column(db.String(200))\n character_contacts_id = db.Column(db.String(20000))\n\n # SSO Token stuff\n access_token = db.Column(db.String(100))\n access_token_expires = db.Column(db.DateTime())\n refresh_token = db.Column(db.String(100))\n\n def get_id(self):\n \"\"\" Required for flask-login \"\"\"\n return self.character_id\n\n def get_sso_data(self):\n \"\"\" Little \"helper\" function to get formated data for esipy security\n \"\"\"\n return {\n 'access_token': self.access_token,\n 'refresh_token': self.refresh_token,\n 'expires_in': (\n self.access_token_expires - datetime.utcnow()\n ).total_seconds()\n }\n\n def update_token(self, token_response):\n \"\"\" helper function to update token data from SSO response \"\"\"\n self.access_token = token_response['access_token']\n self.access_token_expires = datetime.fromtimestamp(\n time.time() + token_response['expires_in'],\n )\n if 'refresh_token' in token_response:\n self.refresh_token = token_response['refresh_token']\n\n\nclass Contact(db.Model):\n character_id = db.Column(db.BigInteger, primary_key=True, autoincrement=False\n )\n character_contact_id = db.Column(db.BigInteger)\n character_contact_standing = db.Column(db.String(100))\n\n def update_standing(self, character_id, standing):\n if self.character_contact_id == character_id:\n self.character_contact_standing = standing\n print(\"Successfully changed %d standing to %d\".format(character_id, standing))\n\n\nclass Killmails(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n solar_system = db.Column(db.Integer)\n killmail_time = db.Column(db.String(20000, convert_unicode=True))\n attackers = db.Column(db.String(20000))\n zkb = db.Column(db.String(20000))\n victim = db.Column(db.String(20000))\n killmail_id = db.Column(db.Integer)\n\n def insert_killmails(self):\n pass\n\n def parse_killmails(self):\n pass\n\n\n# -----------------------------------------------------------------------\n# Flask Login requirements\n# -----------------------------------------------------------------------\n@login_manager.user_loader\ndef load_user(character_id):\n \"\"\" Required user loader for Flask-Login \"\"\"\n return User.query.get(character_id)\n\n\n# -----------------------------------------------------------------------\n# ESIPY Init\n# -----------------------------------------------------------------------\n# create the app\nesiapp = App.create(config.ESI_SWAGGER_JSON)\n\n# init the security object\nesisecurity = EsiSecurity(app=esiapp,redirect_uri=config.ESI_CALLBACK,client_id=config.ESI_CLIENT_ID,secret_key=config.ESI_SECRET_KEY,)\n\n# init the client\nesiclient = EsiClient(security=esisecurity, cache=None, headers={'User-Agent': config.ESI_USER_AGENT})\n\n\n# -----------------------------------------------------------------------\n# Login / Logout Routes\n# -----------------------------------------------------------------------\ndef generate_token():\n \"\"\"Generates a non-guessable OAuth token\"\"\"\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()\n\n\n@app.route('/sso/login')\ndef login():\n \"\"\" this redirects the user to the EVE SSO login \"\"\"\n token = generate_token()\n session['token'] = token\n return redirect(esisecurity.get_auth_uri(\n scopes=['esi-wallet.read_character_wallet.v1 esi-characters.read_contacts.v1 esi-characters.write_contacts.v1'],\n state=token,\n ))\n\n\n@app.route('/sso/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for(\"index\"))\n\n\n@app.route('/callback')\ndef callback():\n \"\"\" This is where the user comes after he logged in SSO \"\"\"\n # get the code from the login process\n code = request.args.get('code')\n token = request.args.get('state')\n\n # compare the state with the saved token for CSRF check\n sess_token = session.pop('token', None)\n if sess_token is None or token is None or token != sess_token:\n return 'Login EVE Online SSO failed: Session Token Mismatch', 403\n\n # now we try to get tokens\n try:\n auth_response = esisecurity.auth(code)\n except APIException as e:\n return 'Login EVE Online SSO failed: %s' % e, 403\n\n # we get the character informations\n cdata = esisecurity.verify()\n\n # if the user is already authed, we log him out\n if current_user.is_authenticated:\n logout_user()\n\n # now we check in database, if the user exists\n # actually we'd have to also check with character_owner_hash, to be\n # sure the owner is still the same, but that's an example only...\n try:\n user = User.query.filter(\n User.character_id == cdata['CharacterID'],\n ).one()\n\n except NoResultFound:\n user = User()\n user.character_id = cdata['CharacterID']\n\n user.character_owner_hash = cdata['CharacterOwnerHash']\n user.character_name = cdata['CharacterName']\n user.update_token(auth_response)\n\n # now the user is ready, so update/create it and log the user\n try:\n db.session.merge(user)\n db.session.commit()\n\n login_user(user)\n session.permanent = True\n\n except:\n logger.exception(\"Cannot login the user - uid: %d\" % user.character_id)\n db.session.rollback()\n logout_user()\n\n return redirect(url_for(\"index\"))\n\n\ndef chk_contacts(contacts_list, current_user):\n iop_start = time.time()\n get_contacts = esiapp.op['get_characters_character_id_contacts'](character_id=current_user.character_id)\n current_contacts = esiclient.request(get_contacts)\n if current_contacts is not None:\n for contact in current_contacts.data:\n if ('contact_type' in contact) and (contact['contact_type'] == 'character'):\n for x in contacts_list:\n if x == contact['contact_id']:\n contacts_list.remove(x)\n if contacts_list > 0:\n if len(contacts_list) > 100:\n popping_cucks = list(chunk(contacts_list, 100))\n for x in popping_cucks:\n x = list(set(x))\n x = sorted(x)\n set_contacts = esiapp.op['post_characters_character_id_contacts'](character_id=current_user.character_id, contact_ids=x, standing=float(-10),)\n r = esiclient.request(set_contacts)\n\n else:\n for x in contacts_list:\n set_contacts = esiapp.op['post_characters_character_id_contacts'] (\n character_id=current_user.character_id,\n contact_ids=x,\n standing=(-10)\n )\n r = esiclient.request(set_contacts)\n for x in r:\n print(x)\n\n pprint('Nothing in Contact List')\n\n\ndef chunk(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n# -----------------------------------------------------------------------\n# Index Routes\n# -----------------------------------------------------------------------\n@app.route('/updatecontacts', methods=['POST'])\ndef updateContacts():\n print('Updating Contacts')\n return\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n \"\"\"\n Fill this in later\n :return:\n \"\"\"\n wallet = None\n ganked_kills = None\n global cache_timer\n code = False\n list_of_all_attackers = []\n npc_id = []\n # if the user is authed, get the wallet content !\n if current_user.is_authenticated:\n # give the token data to esisecurity, it will check alone\n # if the access token need some update\n esisecurity.update_token(current_user.get_sso_data())\n\n op = esiapp.op['get_characters_character_id_wallet'](\n character_id=current_user.character_id\n )\n # op2 = esiapp.op['get_characters_character_id_contacts'](character_id=current_user.character_id)\n wallet = esiclient.request(op)\n # contact_list = esiclient.request(op2)\n # if contact_list is not None:\n # for x in contact_list.data:\n # if x['contact_type'] not in ('corporation', 'alliance'):\n # op3 = esiapp.op['get_characters_names'](character_ids=str(x['contact_id']))\n # character_name = esiclient.request(op3).data[0]['character_name']\n # x['character_name'] = character_name\n\n ###############################\n ### Beging Killmail Parsing ###\n ###############################\n # Kill ID url: https://zkillboard.com/api/killID/69334556/\n # Ganked Url: https://zkillboard.com/api/ganked/\n ct = time.time()\n\n if (ct - cache_timer) > float(900):\n print(ct - cache_timer)\n ganked_kills = json.loads(requests.get('https://zkillboard.com/api/ganked/').content)\n km = Killmails()\n cc = Contact()\n cache_timer = time.time()\n for killmail in ganked_kills:\n try:\n km.query.filter_by(killmail_id=killmail['killmail_id']).one()\n except NoResultFound:\n km.attackers = str(killmail['attackers'])\n km.killmail_id = int(killmail['killmail_id'])\n km.killmail_time = str(killmail['killmail_time'])\n km.solar_system = int(killmail['solar_system_id'])\n km.victim = str(killmail['victim'])\n try:\n db.session.merge(km)\n db.session.commit()\n except:\n logger.exception(\"Cannot login the user - uid: %d\" % km.killmail_id)\n db.session.rollback()\n for index in killmail['attackers']:\n if 'alliance_id' in index and 99002775 == index['alliance_id']:\n if code:\n pass\n else:\n code = True\n elif ('alliance_id' in index and 99002775 != index['alliance_id']) or ('alliance_id' not in index):\n try:\n npc_id.append(index['character_id'])\n except:\n print(Exception.message)\n if code:\n ## Add npc_id character ID's to contact list with -10 standings\n pass\n else:\n ## remove NPC id's and proceed to next killmail\n pass\n chk_contacts(npc_id, current_user)\n # for id in npc_id:\n # try:\n # Contact.query.filter_by(character_contact_id=id).one()\n # except NoResultFound:\n # cc.character_contact_id = id\n # cc.character_contact_standing = (-10)\n # try:\n # db.session.merge(cc)\n # db.session.commit()\n # except:\n # logger.exception('Cannot insert - {}'.format(cc.character_id))\n # db.session.rollback()\n # for attacker in killmail['attackers']:\n # try:\n # list_of_all_attackers.append(\n # {'character_id': attacker['character_id'],\n # 'corporation_id': attacker['corporation_id'],\n # 'alliance_id': attacker['alliance_id']\n # }\n # )\n # except KeyError:\n # pass\n\n print(ct - cache_timer)\n # kms = Killmails.query.all()\n # t = 0\n # ## CODE. Alliance ID: 99002775\n # atk_list = []\n # old_cid = []\n # new_cids = []\n # for x in range(len(kms)):\n # l = len(ast.literal_eval(ast.literal_eval(json.dumps(kms[x].attackers))))\n # for a in range(l):\n # attks = ast.literal_eval(ast.literal_eval(json.dumps(kms[x].attackers)))[a]\n # try:\n # if attks['alliance_id'] == 99002775:\n # code = True\n # pass\n # except KeyError:\n # try:\n # old_cid.append(attks['character_id'])\n # except KeyError:\n # pass\n # if code:\n # for a in range(l):\n # cid = a['character_id']\n # new_cids.append(cid)\n #\n # op5 = esiapp.op['post_characters_character_id_contacts_contact_ids'](character_id='{}'.format(cid))\n # char = esiclient.request(op5)\n #\n #\n #\n #\n # pass\n # ast.literal_eval(ast.literal_eval(json.dumps(kms[x].attackers)))[x]['character_id']\n #\n # pass\n\n\n\n # op5 = esiapp.op['get_characters_character_id_ok'](character_id='{}'.format(cid))\n # try:\n # db.session.merge(km)\n # db.session.commit()\n # except:\n # logger.exception(\"Cannot login the user - uid: %d\" % km.killmail_id)\n # db.session.rollback()\n\n\n return render_template('base.html', **{\n 'wallet': wallet,\n })\n\n\n\nif __name__ == '__main__':\n # for killID in zkill:\n # killmail.append(json.loads(requests.get('https://zkillboard.com/api/killID/[]/'.format(killID)).content))\n app.run(port=4200, host=config.HOST)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"368908445","text":"# Example 1:\n#\n# sequence = ['1', '2', '3', '3', '6', '4', '5', '6', '5']\n# unique = []\n# [unique.append(item) for item in sequence if item not in unique]\n#\n# print(unique)\n\n# the same as above:\n# a = ['1', '2', '3', '3', '6', '4', '5', '6', '5']\n# b = []\n# c = [b.append(i) for i in a if i not in b]\n#\n#\n# print(b)\n\n\n\n# Example 2/can use SET to eliminate the duplicates:\n\nlist1 = ['b', 'c', 'd', 'b', 'c', 'a', 'a']\nlist2 = list(set(list1)) # eliminating duplicate entries. set is an unordered collection with no duplicate elements.\nlist2.sort(key=list1.index) # sort using the index or using the (key=len)\nprint(list2)\n","sub_path":"Unique_values_list.py","file_name":"Unique_values_list.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"637196022","text":"import sys\nimport random\nfrom tkinter import *\nimport time\nimport math\n\n#MAP_DIMENSION = 10\nCELL_SIZE = 45\n\n\nclass Create_map(object):\n def __init__(self, w, Obstacle_number1, Obstacle_cell_Pos1, des_statex, des_statey, MAP_DIMENSION):\n self.nrows = MAP_DIMENSION\n self.ncolums = MAP_DIMENSION\n self.Ob_number = Obstacle_number1\n self.des_statex = des_statex\n self.des_statey = des_statey\n self.w = w\n self.Create_Obstacles(self.Ob_number, Obstacle_cell_Pos1)\n self.Draw_map()\n \n def Create_Obstacles(self, Ob_number1, Ob_Cell_Pos1):\n for i in range(Ob_number1):\n dx1, dx2, dy1, dy2 = self.Convert_to_Ob_Coor(Ob_Cell_Pos1[2*i],Ob_Cell_Pos1[2*i+1])\n \n rec1 = self.w.create_rectangle(dx1,dy1,dx2,dy2, fill = \"black\") \n \n # create map, destination, and obstacles\n def Draw_map(self): \n j = 20\n for i in range(self.nrows+1):\n self.w.create_line(20,j, CELL_SIZE*self.ncolums + 20,j, width = 2, fill = \"black\")\n j += CELL_SIZE\n j = 20\n for i in range(self.ncolums+1):\n self.w.create_line(j, 20, j, CELL_SIZE*self.nrows+20, width = 2, fill = \"black\")\n j += CELL_SIZE\n \n # create a destination square on the map\n des_x, des_y = self.Convert_to_coordinate(self.des_statex, self.des_statey)\n self.w.create_rectangle(des_x-8, des_y-8, des_x+8, des_y+8, fill = \"red\")\n\n #self.Create_Obstacles(w) \n \n # convert the crosswork map to coorindate\n def Convert_to_coordinate(self, xloc, yloc):\n return (xloc)*CELL_SIZE +43, (yloc)*CELL_SIZE+43\n \n def Convert_to_Ob_Coor(self, xCell, yCell):\n return (xCell -1)*CELL_SIZE + 20, (xCell -1)*CELL_SIZE + 65, (yCell -1)*CELL_SIZE + 20, (yCell -1)*CELL_SIZE + 65 \n","sub_path":"Init_map.py","file_name":"Init_map.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"459272092","text":"# COMP3311 19T3 Assignment 3\r\n\r\nimport sys\r\nimport cs3311\r\nconn = cs3311.connect()\r\n\r\ntry:\r\n\tincommon = sys.argv[1]\r\nexcept:\r\n\tincommon = 2\r\ncodeDict = {}\r\n\r\ndef addToDict(d, code):\r\n\tletters = code[:4]\r\n\tnumbers = code[4:]\r\n\t#print(\"letters:{} numbers:{}\".format(letters, numbers))\r\n\tif numbers not in d:\r\n\t\td[numbers] = {\r\n\t\t\t'noTime' : 1,\r\n\t\t\t'courses' : [],\r\n\t\t}\r\n\t\td[numbers]['courses'].append(letters)\r\n\telif numbers not in d[numbers]['courses']:\r\n\t\td[numbers]['noTime'] += 1\r\n\t\td[numbers]['courses'].append(letters)\r\n\r\ncur = conn.cursor()\r\ncur.execute(\r\n\t\"SELECT DISTINCT code, id \\\r\n\tFROM subjects \\\r\n\tORDER BY code\"\r\n)\r\n\r\nallCode = cur.fetchall()\r\n\r\nfor code in allCode:\r\n\tstringCode, stringId = code\r\n\taddToDict(codeDict, stringCode)\r\n\t\r\nfor key, value in sorted(codeDict.items()):\r\n\tif (int(incommon) == int(value['noTime'])):\r\n\t\tresult = \"{}:\".format(key)\r\n\t\tfor courseNo in value['courses']:\r\n\t\t\tresult += \" {}\".format(courseNo)\r\n\t\tprint(\"{}\".format(result))\r\n\r\ncur.close()\r\nconn.close()\r\n","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"321006193","text":"import sys\nfrom PyQt5.QtWidgets import (QApplication, QWidget)\nfrom PyQt5.Qt import Qt\nimport pytimeparse\nimport datetime \nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n \n def keyPressEvent(self, event):\n try:\n print(chr(event.key()))\n except:\n pass\n\n def test_method(self):\n print('Space key pressed')\n\nif __name__ == '__main__':\n # app = QApplication(sys.argv)\n t = '01:05:23'\n t = datetime.datetime.strptime(t, '%H:%M:%S')\n sec = ((t.hour * 60) + t.minute) * 60 + t.second\n print(datetime.timedelta(seconds=sec))\n\n # s = datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)\n # print(datetime.timedelta(seconds=s.seconds))\n \n # demo = MainWindow()\n # demo.show()\n # demo.close()\n # sys.exit(app.exec_())","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"172907381","text":"#!/usr/bin/env python\r\n#\r\n# Command line tool to convert simple ESP-IDF Makefile & component.mk files to\r\n# CMakeLists.txt files\r\n#\r\nimport argparse\r\nimport subprocess\r\nimport re\r\nimport os.path\r\nimport glob\r\nimport sys\r\n\r\ndebug = False\r\n\r\ndef get_make_variables(path, makefile=\"Makefile\", expected_failure=False, variables={}):\r\n \"\"\"\r\n Given the path to a Makefile of some kind, return a dictionary of all variables defined in this Makefile\r\n\r\n Uses 'make' to parse the Makefile syntax, so we don't have to!\r\n\r\n Overrides IDF_PATH= to avoid recursively evaluating the entire project Makefile structure.\r\n \"\"\"\r\n variable_setters = [ (\"%s=%s\" % (k,v)) for (k,v) in variables.items() ]\r\n\r\n cmdline = [\"make\", \"-rpn\", \"-C\", path, \"-f\", makefile ] + variable_setters\r\n if debug:\r\n print(\"Running %s...\" % (\" \".join(cmdline)))\r\n\r\n p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n (output, stderr) = p.communicate(\"\\n\")\r\n\r\n if (not expected_failure) and p.returncode != 0:\r\n raise RuntimeError(\"Unexpected make failure, result %d\" % p.returncode)\r\n\r\n if debug:\r\n print(\"Make stdout:\")\r\n print(output)\r\n print(\"Make stderr:\")\r\n print(stderr)\r\n\r\n next_is_makefile = False # is the next line a makefile variable?\r\n result = {}\r\n BUILT_IN_VARS = set([\"MAKEFILE_LIST\", \"SHELL\", \"CURDIR\", \"MAKEFLAGS\"])\r\n\r\n for line in output.decode().split(\"\\n\"):\r\n if line.startswith(\"# makefile\"): # this line appears before any variable defined in the makefile itself\r\n next_is_makefile = True\r\n elif next_is_makefile:\r\n next_is_makefile = False\r\n m = re.match(r\"(?P[^ ]+) :?= (?P.+)\", line)\r\n if m is not None:\r\n if not m.group(\"var\") in BUILT_IN_VARS:\r\n result[m.group(\"var\")] = m.group(\"val\").strip()\r\n\r\n return result\r\n\r\ndef get_component_variables(project_path, component_path):\r\n make_vars = get_make_variables(component_path,\r\n os.path.join(os.environ[\"IDF_PATH\"],\r\n \"make\",\r\n \"component_wrapper.mk\"),\r\n expected_failure=True,\r\n variables = {\r\n \"COMPONENT_MAKEFILE\" : os.path.join(component_path, \"component.mk\"),\r\n \"COMPONENT_NAME\" : os.path.basename(component_path),\r\n \"PROJECT_PATH\": project_path,\r\n })\r\n\r\n if \"COMPONENT_OBJS\" in make_vars: # component.mk specifies list of object files\r\n # Convert to sources\r\n def find_src(obj):\r\n obj = os.path.splitext(obj)[0]\r\n for ext in [ \"c\", \"cpp\", \"S\" ]:\r\n if os.path.exists(os.path.join(component_path, obj) + \".\" + ext):\r\n return obj + \".\" + ext\r\n print(\"WARNING: Can't find source file for component %s COMPONENT_OBJS %s\" % (component_path, obj))\r\n return None\r\n\r\n srcs = []\r\n for obj in make_vars[\"COMPONENT_OBJS\"].split(\" \"):\r\n src = find_src(obj)\r\n if src is not None:\r\n srcs.append(src)\r\n make_vars[\"COMPONENT_SRCS\"] = \" \".join(srcs)\r\n else: # Use COMPONENT_SRCDIRS\r\n make_vars[\"COMPONENT_SRCDIRS\"] = make_vars.get(\"COMPONENT_SRCDIRS\", \".\")\r\n\r\n make_vars[\"COMPONENT_ADD_INCLUDEDIRS\"] = make_vars.get(\"COMPONENT_ADD_INCLUDEDIRS\", \"include\")\r\n\r\n return make_vars\r\n\r\n\r\ndef convert_project(project_path):\r\n if not os.path.exists(project_path):\r\n raise RuntimeError(\"Project directory '%s' not found\" % project_path)\r\n if not os.path.exists(os.path.join(project_path, \"Makefile\")):\r\n raise RuntimeError(\"Directory '%s' doesn't contain a project Makefile\" % project_path)\r\n\r\n project_cmakelists = os.path.join(project_path, \"CMakeLists.txt\")\r\n if os.path.exists(project_cmakelists):\r\n raise RuntimeError(\"This project already has a CMakeLists.txt file\")\r\n\r\n project_vars = get_make_variables(project_path, expected_failure=True)\r\n if not \"PROJECT_NAME\" in project_vars:\r\n raise RuntimeError(\"PROJECT_NAME does not appear to be defined in IDF project Makefile at %s\" % project_path)\r\n\r\n component_paths = project_vars[\"COMPONENT_PATHS\"].split(\" \")\r\n\r\n # \"main\" component is made special in cmake, so extract it from the component_paths list\r\n try:\r\n main_component_path = [ p for p in component_paths if os.path.basename(p) == \"main\" ][0]\r\n if debug:\r\n print(\"Found main component %s\" % main_component_path)\r\n main_vars = get_component_variables(project_path, main_component_path)\r\n except IndexError:\r\n print(\"WARNING: Project has no 'main' component, but CMake-based system requires at least one file in MAIN_SRCS...\")\r\n main_vars = { \"COMPONENT_SRCS\" : \"\"} # dummy for MAIN_SRCS\r\n\r\n # Remove main component from list of components we're converting to cmake\r\n component_paths = [ p for p in component_paths if os.path.basename(p) != \"main\" ]\r\n\r\n # Convert components as needed\r\n for p in component_paths:\r\n convert_component(project_path, p)\r\n\r\n # Look up project variables before we start writing the file, so nothing\r\n # is created if there is an error\r\n\r\n main_srcs = main_vars[\"COMPONENT_SRCS\"].split(\" \")\r\n # convert from component-relative to absolute paths\r\n main_srcs = [ os.path.normpath(os.path.join(main_component_path, m)) for m in main_srcs ]\r\n # convert to make relative to the project directory\r\n main_srcs = [ os.path.relpath(m, project_path) for m in main_srcs ]\r\n\r\n project_name = project_vars[\"PROJECT_NAME\"]\r\n\r\n # Generate the project CMakeLists.txt file\r\n with open(project_cmakelists, \"w\") as f:\r\n f.write(\"\"\"\r\n# (Automatically converted from project Makefile by convert_to_cmake.py.)\r\n\r\n# The following four lines of boilerplate have to be in your project's CMakeLists\r\n# in this exact order for cmake to work correctly\r\ncmake_minimum_required(VERSION 3.5)\r\n\r\n\"\"\")\r\n f.write(\"set(MAIN_SRCS %s)\\n\" % \" \".join(main_srcs))\r\n f.write(\"\"\"\r\ninclude($ENV{IDF_PATH}/tools/cmake/project.cmake)\r\n\"\"\")\r\n f.write(\"project(%s)\\n\" % project_name)\r\n\r\n print(\"Converted project %s\" % project_cmakelists)\r\n\r\ndef convert_component(project_path, component_path):\r\n if debug:\r\n print(\"Converting %s...\" % (component_path))\r\n cmakelists_path = os.path.join(component_path, \"CMakeLists.txt\")\r\n if os.path.exists(cmakelists_path):\r\n print(\"Skipping already-converted component %s...\" % cmakelists_path)\r\n return\r\n v = get_component_variables(project_path, component_path)\r\n\r\n # Look up all the variables before we start writing the file, so it's not\r\n # created if there's an erro\r\n component_srcs = v.get(\"COMPONENT_SRCS\", None)\r\n component_srcdirs = None\r\n if component_srcs is not None:\r\n # see if we should be using COMPONENT_SRCS or COMPONENT_SRCDIRS, if COMPONENT_SRCS is everything in SRCDIRS\r\n component_allsrcs = []\r\n for d in v.get(\"COMPONENT_SRCDIRS\", \"\").split(\" \"):\r\n component_allsrcs += glob.glob(os.path.normpath(os.path.join(component_path, d, \"*.[cS]\")))\r\n component_allsrcs += glob.glob(os.path.normpath(os.path.join(component_path, d, \"*.cpp\")))\r\n abs_component_srcs = [os.path.normpath(os.path.join(component_path, p)) for p in component_srcs.split(\" \")]\r\n if set(component_allsrcs) == set(abs_component_srcs):\r\n component_srcdirs = v.get(\"COMPONENT_SRCDIRS\")\r\n\r\n component_add_includedirs = v[\"COMPONENT_ADD_INCLUDEDIRS\"]\r\n cflags = v.get(\"CFLAGS\", None)\r\n\r\n with open(cmakelists_path, \"w\") as f:\r\n f.write(\"set(COMPONENT_ADD_INCLUDEDIRS %s)\\n\\n\" % component_add_includedirs)\r\n\r\n f.write(\"# Edit following two lines to set component requirements (see docs)\\n\")\r\n f.write(\"set(COMPONENT_REQUIRES \"\")\\n\")\r\n f.write(\"set(COMPONENT_PRIV_REQUIRES \"\")\\n\\n\")\r\n\r\n if component_srcdirs is not None:\r\n f.write(\"set(COMPONENT_SRCDIRS %s)\\n\\n\" % component_srcdirs)\r\n f.write(\"register_component()\\n\")\r\n elif component_srcs is not None:\r\n f.write(\"set(COMPONENT_SRCS %s)\\n\\n\" % component_srcs)\r\n f.write(\"register_component()\\n\")\r\n else:\r\n f.write(\"register_config_only_component()\\n\")\r\n if cflags is not None:\r\n f.write(\"component_compile_options(%s)\\n\" % cflags)\r\n\r\n print(\"Converted %s\" % cmakelists_path)\r\n\r\n\r\ndef main():\r\n global debug\r\n\r\n parser = argparse.ArgumentParser(description='convert_to_cmake.py - ESP-IDF Project Makefile to CMakeLists.txt converter', prog='convert_to_cmake')\r\n\r\n parser.add_argument('--debug', help='Display debugging output',\r\n action='store_true')\r\n\r\n parser.add_argument('project', help='Path to project to convert (defaults to CWD)', default=os.getcwd(), metavar='project path', nargs='?')\r\n\r\n args = parser.parse_args()\r\n debug = args.debug\r\n print(\"Converting %s...\" % args.project)\r\n convert_project(args.project)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"RTOS/tools/cmake/convert_to_cmake.py","file_name":"convert_to_cmake.py","file_ext":"py","file_size_in_byte":9367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"604354447","text":"import pickle as cPickle\nimport numpy as np\nnp.set_printoptions(threshold=np.inf)\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.datasets import mnist, fashion_mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Dense, Dropout, Flatten, Activation\nfrom keras import backend as k\nimport csv\nimport Helper as helper\n\n#============ Multi-GPU ==========\n#import multi_gpu\nfrom tensorflow.keras.utils import multi_gpu_model\n\ndef baseline_model(strategy, tam_imagen, dropout, optimizer, activation, convolutional_layer_1, convolutional_layer_2, pooling_layer_1, pooling_layer_2):\n\t\"\"\"#Crear el modelo\n\tmodel = Sequential()\n\t#Primera capa entrada\n\tmodel.add(convolutional_layer_1)\n\t#model.add(activation)\n\tmodel.add(pooling_layer_1)\n\t#Segunda capa, intermedia\n\tmodel.add(convolutional_layer_2)\n\t#model.add(activation)\n\tmodel.add(pooling_layer_2)\n\t#ANN fully connected\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu'))\n\tmodel.add(Dropout(rate=dropout))\n\tmodel.add(Dense(10, activation='softmax'))\n\t#============ Multi-GPU ============\n\tmodel = multi_gpu_model(model,gpus=2)\n\t#===================================\n\t#Compilar modelo\n\tmodel.compile(loss=keras.losses.categorical_crossentropy,\n \t\t\toptimizer=keras.optimizers.Adadelta(),\n \t\t\tmetrics=['accuracy'])\"\"\"\n\twith strategy.scope():\n\t\tmodel = Sequential()\n\t\tmodel.add(convolutional_layer_1)\n\t\tmodel.add(pooling_layer_1)\n\t\tmodel.add(convolutional_layer_2)\n\t\tmodel.add(pooling_layer_2)\n\t\tmodel.add(Flatten())\n\t\tmodel.add(Dense(128, activation='relu'))\n\t\tmodel.add(Dropout(rate=dropout))\n\t\tmodel.add(Dense(10, activation='softmax'))\n\n\t\tmodel.compile(loss=keras.losses.categorical_crossentropy,\n\t\t\t\toptimizer=optimizer,\n\t\t\t\tmetrics=['accuracy'])\n\t#strategy = tf.distribute.MirroredStrategy()\n\t#print('number of devices: {}'.format(strategy.num_replicas_in_sync))\n\treturn model\n\ndef fitModel(datos_imagenes_entrenamiento, datos_target_entrenamiento, tam_imagen, epochs, dropout, optimizer, activation, convolutional_layer_1, convolutional_layer_2, pooling_layer_1, pooling_layer_2):\n\t#batch_size_per_replica = batch_size / strategy.num_replicas_in_sync\n\t#N gpus\n\t#strategy = tf.distribute.MirroredStrategy()\n\t#2 gpus\n\tstrategy = tf.distribute.MirroredStrategy(devices=['/gpu:2','/gpu:3'])\n\t#1 gpu\n\t#strategy = tf.distribute.OneDeviceStrategy(device=\"/gpu:0\")\n\tbatch = 1000\n\tbatch_size = batch * strategy.num_replicas_in_sync \n\t#batch_size = 1000 \n\tbuffer_size = 10000\n\ttamData = len(datos_imagenes_entrenamiento)\n\ttrain_dataset = tf.data.Dataset.from_tensor_slices((datos_imagenes_entrenamiento,datos_target_entrenamiento))\n\ttrain_dataset = train_dataset.shuffle(buffer_size).repeat().batch(batch_size)\n\t#Construyedo el modelo\n\tmodel = baseline_model(strategy, tam_imagen, dropout, optimizer, activation, convolutional_layer_1, convolutional_layer_2, pooling_layer_1, pooling_layer_2)\n\t#Empieza el entrenamiento con fit.\n\tprint('Empezo el entrenamiento:')\n\t#model.fit(datos_imagenes_entrenamiento, datos_target_entrenamiento, batch_size=batch_size, epochs=epochs, verbose=1)\n\tmodel.fit(train_dataset, epochs=epochs,steps_per_epoch=tamData/batch, verbose=1)\n\tprint('Finalizo el entrenamiento')\n\treturn model\n\n\ndef experimento(serie, epochs, learning_rate, training_rate, optimizer, activation, filter_size, stride, padding, pool, dropout, numero_paso):\n\t#Mejor tamaño de imagen encontrado en experimento de tamaños de imagenes\n\ttam_imagen = 28\n\t#Mejor valor dropout encontrado en experimentos, pasado por el genético\n\t#valor_dropout = 0.4\n\t#Los datos, shuffled y split entre los conjuntos de entrenamiento y prueba\n\tnum_clases = 10\n\t(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\n\tk.clear_session()\n\tif k.image_data_format() == 'channels_first':\n\t\tx_train = x_train.reshape(x_train.shape[0], 1, tam_imagen, tam_imagen)\n\t\tx_test = x_test.reshape(x_test.shape[0], 1, tam_imagen, tam_imagen)\n\t\tinput_shape = (1, tam_imagen, tam_imagen)\n\t\tform = 'channels_first'\n\telse:\n\t\tx_train = x_train.reshape(x_train.shape[0], tam_imagen, tam_imagen, 1)\n\t\tx_test = x_test.reshape(x_test.shape[0], tam_imagen, tam_imagen, 1)\n\t\tinput_shape = (tam_imagen, tam_imagen, 1)\n\t\tform = 'channels_last'\n\n\tx_train = x_train.astype('float32')\n\tx_test = x_test.astype('float32')\n\tx_train /= 255\n\tx_test /= 255\n\n\tprint('x_train shape: ', x_train.shape)\n\tprint(x_train.shape[0], 'train samples.')\n\tprint(x_test.shape[0], 'test samples.')\n\n\ty_train = keras.utils.to_categorical(y_train, num_clases)\n\ty_test = keras.utils.to_categorical(y_test, num_clases)\n\topt = helper.optimizerFactory(optimizer, learning_rate)\n\tact = helper.activationFactory(activation)\n\n\ttype = '2d'\n\tfilter_number = 64\n\tkernel_size = (filter_size, filter_size)\n\tpadd = padding\n\n\tconv1 = helper.convolutionLayerFactory('2d_first', filter_number, kernel_size, input_shape, padding, activation, form)\n\tconv2 = helper.convolutionLayerFactory(type, filter_number, kernel_size, input_shape, padding, activation, form)\n\tpool1 = helper.poolingFactory(pool)\n\tpool2 = helper.poolingFactory(pool)\n\n\tmodel = fitModel(x_train, y_train, tam_imagen, epochs, dropout, opt, act, conv1, conv2, pool1, pool2)\n\tscore = model.evaluate(x_test, y_test, verbose=0)\n\treturn score, model\n","sub_path":"CNN2.py","file_name":"CNN2.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"205843519","text":"from Repository.activity_repository import ActivityRepository, ActivityRepositoryException\r\nimport pickle\r\n\r\n\r\nclass ActivityBinaryFileRepositoryException(ActivityRepositoryException):\r\n def __init__(self, message):\r\n super().__init__(message)\r\n\r\n\r\nclass ActivityBinaryFileRepository(ActivityRepository):\r\n def __init__(self, file_name='activities.pickle'):\r\n super().__init__()\r\n self._file_name = file_name\r\n self._load_from_binary_file()\r\n\r\n def add_activity(self, new_activity):\r\n super().add_activity(new_activity)\r\n self._save_to_binary_file()\r\n\r\n def remove_activity(self, id_of_activity):\r\n removed_activity = super().remove_activity(id_of_activity)\r\n self._save_to_binary_file()\r\n\r\n return removed_activity\r\n\r\n def update_activity(self, index_of_activity, new_version_of_activity):\r\n super().update_activity(index_of_activity, new_version_of_activity)\r\n self._save_to_binary_file()\r\n\r\n def _save_to_binary_file(self):\r\n binary_file = open(self._file_name, 'wb')\r\n try:\r\n pickle.dump(self.chronological_list, binary_file)\r\n binary_file.close()\r\n except Exception as exception_message:\r\n raise ActivityBinaryFileRepositoryException(str(exception_message))\r\n\r\n\r\n def _load_from_binary_file(self):\r\n \"\"\"\r\n Load data from file\r\n We assume file-saved data is valid\r\n \"\"\"\r\n binary_file = open(self._file_name, 'rb') # read text\r\n try:\r\n self.chronological_list = pickle.load(binary_file)\r\n binary_file.close()\r\n except EOFError:\r\n return\r\n except Exception as exception_message:\r\n raise ActivityBinaryFileRepositoryException(str(exception_message))","sub_path":"a10-911-Andrioaie-Daria/Repository/activity_binary_file_repository.py","file_name":"activity_binary_file_repository.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629336891","text":"\"\"\"Unit and regression tests for pyAirviro.indico's series module.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport datetime\nimport math\nfrom StringIO import StringIO\n\nfrom pkg_resources import resource_stream\nfrom pyAirviro.system import DATE_FORMAT, TIME_FORMAT\nfrom pyAirviro.indico.series import (\n Ts,\n read_ts_from_stream,\n ShipTs,\n read_ship_time_series, read_ship_time_series_raw,\n)\nimport pyAirviro.tests\nfrom pyAirviro.tests.indico import AvIndicoTestCase\n\nDATETIME_FORMAT = DATE_FORMAT + ',' + TIME_FORMAT\n\nMULTI_TS_INSTREAM = \"\"\"\\\nSTN+ 0\n1 MTEMP002 1 0 #Celsius\n2 MWSPD010 1 0\n3 MWDIR010 1 0\n4 MTCCf002 1 0\n5 MDTMP008 1 0\nEOH\n111231,1700, 8.416, 5.67, 350.0, 90.0, -1.1381\n111231,1800, 8.45, 6.0, 350.0, 90.0, -1.1381\n111231,1900, 7.983, 6.44, 338.06, 90.0, -1.13\n111231,2000, 7.51, 7.13, 328.05, 90.0, -1.13\n111231,2100, 7.05, 8.0, 320.0, 90.0, -1.13\n111231,2200, 6.83, 8.63, 323.84, 90.0, -1.13\n111231,2300, 6.58, 9.30, 327.14, 90.0, -1.13\nEOF\n\"\"\"\n\nSHIP_TS_SAMPLE = \"\"\"\\\n140101, 0000, 272771, 4855765, 0.00, 1, 11, 11,\n140101, 0005, 272696, 4855832, 0.34, 1, 11, 11,\n140101, 0010, 272716, 4855806, 0.11, 1, 11, 11,\n140101, 0015, 272802, 4855736, 0.37, 1, 11, 11,\n140101, 0020, 272810, 4855703, 0.11, 1, 11, 11,\n140101, 0025, 272743, 4855789, 0.36, 1, 11, 11,\n140101, 0030, 272683, 4855824, 0.23, 1, 11, 11,\n140101, 0035, , , , , , ,\n140101, 0040, , , , , , ,\n140101, 0045, 272683, 4855824, 0, 3, 11, 11,\n\"\"\"\n\nclass TsTests(AvIndicoTestCase):\n\n \"\"\"Test time-series management.\"\"\"\n\n def test_read_ts_from_stream(self):\n stream = StringIO(MULTI_TS_INSTREAM)\n ts_list = read_ts_from_stream(stream)\n ts0 = ts_list[0]\n ts1 = ts_list[1]\n self.assertEqual(ts0.begin, ts1.begin)\n self.assertEqual(ts0.end, ts1.end)\n self.assertEqual(ts0.data[0]['datetime'],\n datetime.datetime.strptime(\"111231,1700\",\n DATETIME_FORMAT))\n\n self.assertEqual(ts0.data[0]['value'],\n 8.416)\n\n def test_shift_time(self):\n stream = StringIO(MULTI_TS_INSTREAM)\n ts = read_ts_from_stream(stream)[0]\n begin = datetime.datetime.strptime('111231,1800', DATETIME_FORMAT)\n end = datetime.datetime.strptime('111231,2000', DATETIME_FORMAT)\n dt = datetime.timedelta(hours=1)\n\n ts.shift_time(dt,\n begin=begin,\n end=end)\n\n self.assertEqual(ts.data[1]['datetime'], begin + dt)\n self.assertEqual(ts.data[3]['datetime'], end + dt)\n self.assertEqual(ts.data[4]['datetime'], end + dt)\n\n\nclass ShipTsTests(AvIndicoTestCase):\n \"\"\"Test ShipTs class.\"\"\"\n\n def test_read_shiptsget_output(self):\n start_time = datetime.datetime(year=2014, month=1, day=1, hour=0)\n stop_time = datetime.datetime(year=2014, month=1, day=3, hour=0)\n\n ts = ShipTs(begin=start_time,\n end=stop_time,\n res=300,\n mmsi=None)\n\n with resource_stream(__name__, 'data/shiptsget_output.txt') as stream:\n ts.read(instream=stream)\n\n self.assertEqual(ts.data['date'][0], start_time.strftime(\"%y%m%d\"))\n self.assertEqual(ts.data['time'][0], start_time.strftime(\"%H%M\"))\n\n def test_index(self):\n\n start_time = datetime.datetime(year=2014, month=1, day=1, hour=0)\n stop_time = datetime.datetime(year=2014, month=1, day=3, hour=0)\n\n ts = ShipTs(begin=start_time,\n end=stop_time,\n res=300,\n mmsi=None)\n\n with resource_stream(__name__, 'data/shiptsget_output.txt') as stream:\n ts.read(instream=stream)\n\n self.assertEqual(\n ts.index(datetime.datetime(year=2014,\n month=1,\n day=2,\n hour=1,\n minute=5)\n ),\n 301\n )\n\n def test_nvals(self):\n start_time = datetime.datetime(year=2014, month=1, day=1, hour=0)\n stop_time = datetime.datetime(year=2014, month=1, day=3, hour=0)\n\n ts = ShipTs(begin=start_time,\n end=stop_time,\n res=300,\n mmsi=None)\n\n ts.read(instream=StringIO(SHIP_TS_SAMPLE))\n\n self.assertEqual(\n ts.nvals(),\n 8\n )\n\n\nclass ShipTimeSeriesTests(pyAirviro.tests.TestCase):\n\n \"\"\"Unit and regression tests for reading ship time series.\"\"\"\n\n def test_read_ship_time_series(self):\n stream = StringIO(SHIP_TS_SAMPLE)\n data = read_ship_time_series(stream)\n self.assertEqual(\n tuple(data[0]),\n ('140101', '0000', 272771., 4855765., 0., 1, 11, 11),\n )\n self.assertEqual(\n tuple(data[1]),\n ('140101', '0005', 272696., 4855832., 0.34, 1, 11, 11),\n )\n self.assertEqual(data[7][0], '140101')\n self.assertEqual(data[7][1], '0035')\n self.assertTrue(math.isnan(data[7][2]))\n self.assertTrue(math.isnan(data[7][3]))\n self.assertTrue(math.isnan(data[7][4]))\n self.assertEqual(data[7][5], -1)\n self.assertEqual(data[7][6], -1)\n self.assertEqual(data[7][7], -1)\n\n def test_read_ship_time_series_raw(self):\n stream = StringIO(SHIP_TS_SAMPLE)\n data = read_ship_time_series_raw(stream)\n self.assertEqual(\n data[0],\n ['140101', '0000', '272771', '4855765', '0.00', '1', '11', '11'],\n )\n self.assertEqual(\n data[1],\n ['140101', '0005', '272696', '4855832', '0.34', '1', '11', '11'],\n )\n self.assertEqual(\n data[7],\n ['140101', '0035', '', '', '', '', '', ''],\n )\n","sub_path":"pyAirviro/tests/indico/test_series.py","file_name":"test_series.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"346553375","text":"epsilon = 0.01\ny = 120.0\nguess = y/2.0\nnum_guesses = 0\n\n# Using guess - (g**2 - y)/(2g)\n\nwhile abs(guess*guess - y) >= epsilon:\n num_guesses += 1\n guess = guess - (((guess**2) - y) / (2*guess))\nprint('numGuesses = ', str(num_guesses))\nprint('Square root of ' + str(y) + ' is about ' + str(guess))\n","sub_path":"day3/newton-raphson.py","file_name":"newton-raphson.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"216413286","text":"# coding: utf-8\n\nimport sys\nimport numpy as np\n\n\nif __name__ == \"__main__\":\n try:\n experiment_id = 0\n seed = int(sys.argv[1])\n features_set = \"set 2\"\n magnitude = float(sys.argv[2])\n experiment_desc = \"two_missing_features_%f_magnitude\" % magnitude\n version = magnitude\n print(\n \"running experiment {expid} with seed {seed}\".format(\n expid=experiment_id, seed=seed\n )\n )\n db_name = sys.argv[3]\n except IndexError:\n print(\n \"\"\"Invalid usage, you should use the following syntax:\n\n python missing_data_experiment.py seed missing_feature_magnitude db_name.db\n\n where both seed is an integer and missing_feature_magnitude is a float.\n In this experiment, missing_feature_magnitude controls the missing\n features RI order of magnitude.\n \n db_name should be the path/name to the database created with \n utils/databaseconfig.py\"\"\"\n )\n\n # Simulation parameters\n n_features = 16\n lags = 49\n n_cases = 2000\n n_intervals = 750\n effects_str = \"\"\"null_effect = [ce.constant_effect(1)] * 7\n constant_effect = ce.constant_effect(1.5)\n early_effect = ce.bell_shaped_effect(2, 20)\n intermediate_effect = ce.bell_shaped_effect(2, 30, 15, 15)\n late_effects = [ce.increasing_effect(2, curvature_type=1)[::-1],\n ce.increasing_effect(2, curvature_type=1),\n ce.increasing_effect(2, curvature_type=2),\n ce.increasing_effect(2, curvature_type=4)]\n\n missing_effect_1 = ce.constant_effect(%f)\n missing_effect_2 = ce.bell_shaped_effect(%f, 30, 15, 15)\n sim_effects = [*null_effect, constant_effect, early_effect,\n intermediate_effect, *late_effects, missing_effect_1, \n missing_effect_2]\"\"\" % (\n magnitude,\n magnitude,\n )\n hidden_features = [14, 15]\n time_drift = None\n time_drift_str = \"time_drift = lambda t: np.log(8 * np.sin(.01 * t) + 9)\"\n n_corr = n_features\n\n import json\n from pickle import dumps\n from datetime import datetime\n from time import time\n\n # Database\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker\n from utils.database_config import (\n Base,\n Experiment,\n ExperimentResult,\n Simulation,\n ConvSCCSModel,\n )\n\n # Utility functions\n from utils.preprocessing import to_nonparasccs\n from utils.metrics import squared_error, absolute_error,\\\n absolute_percentage_error\n\n # Simulation and model\n from tick.survival import SimuSCCS, ConvSCCS\n from tick.survival.simu_sccs import CustomEffects\n\n # init DB connection\n engine = create_engine(\"sqlite:///\" + db_name)\n Base.metadata.bind = engine\n DBsession = sessionmaker(bind=engine)\n session = DBsession()\n\n # --- Simulate data\n # Setup sim\n n_cols = lags + 1\n ce = CustomEffects(n_cols)\n null_effect = [ce.constant_effect(1)] * 7\n constant_effect = ce.constant_effect(1.5)\n early_effect = ce.bell_shaped_effect(2, 20)\n intermediate_effect = ce.bell_shaped_effect(2, 30, 15, 15)\n late_effects = [\n ce.increasing_effect(2, curvature_type=1)[::-1],\n ce.increasing_effect(2, curvature_type=1),\n ce.increasing_effect(2, curvature_type=2),\n ce.increasing_effect(2, curvature_type=4),\n ]\n\n missing_effect_1 = ce.constant_effect(magnitude)\n missing_effect_2 = ce.bell_shaped_effect(magnitude, 30, 15, 15)\n sim_effects = [\n *null_effect,\n constant_effect,\n early_effect,\n intermediate_effect,\n *late_effects,\n missing_effect_1,\n missing_effect_2,\n ]\n n_features = len(sim_effects)\n coeffs = [np.log(s) for s in sim_effects]\n simu_n_lags = np.repeat(49, n_features).astype(\"uint64\")\n\n n_missing_features = 2\n hidden_features = [n_features - (i + 1) for i in range(n_missing_features)]\n sim = SimuSCCS(\n int(n_cases),\n n_intervals,\n n_features,\n simu_n_lags,\n time_drift=time_drift,\n n_correlations=n_features,\n coeffs=coeffs,\n seed=seed,\n verbose=False,\n hidden_features=hidden_features,\n )\n\n features, censored_features, labels, censoring, coeffs = sim.simulate()\n [coeffs.pop(n_features - i - 1) for i in range(n_missing_features)]\n n_features = n_features - n_missing_features\n n_lags = np.repeat(49, n_features).astype(\"uint64\")\n\n adjacency_matrix = sim.hawkes_exp_kernels.adjacency.tobytes()\n\n # Convert to DataFrame format\n df = to_nonparasccs(censored_features, labels, censoring, lags)\n df[\"indiv\"] = df.index\n df = df.astype(\"int64\")\n\n exposures_frequencies = df.drugid.value_counts()\n\n exp_log = Experiment(\n experiment_id=experiment_id,\n version=version,\n description=experiment_desc,\n features_set=features_set,\n effects=effects_str,\n time_drift=time_drift_str,\n n_features=n_features,\n n_intervals=n_intervals,\n n_cases=n_cases,\n sim_n_lags=n_lags,\n sim_n_corr=n_corr,\n sim_coeffs_obj=dumps(coeffs),\n )\n\n session.merge(exp_log)\n\n sim_log = Simulation(\n experiment_id=experiment_id,\n version=version,\n seed=seed,\n sim_adjacency_matrix=dumps(adjacency_matrix),\n features_frequency=dumps(exposures_frequencies),\n )\n\n session.merge(sim_log)\n session.commit()\n\n start = time()\n lrn = ConvSCCS(\n n_lags=n_lags, penalized_features=np.arange(n_features), verbose=False\n )\n C_tv_range = (1, 5)\n C_group_l1_range = (1, 5)\n fitted_coeffs, cv_track = lrn.fit_kfold_cv(\n censored_features,\n labels,\n censoring,\n C_tv_range=C_tv_range,\n C_group_l1_range=C_group_l1_range,\n confidence_intervals=False,\n ) # WARNING: no bootstrap in this simulation\n elapsed_time = time() - start\n\n model_id = \"ConvSCCS\"\n model_log = ConvSCCSModel(\n experiment_id=experiment_id,\n version=version,\n seed=seed,\n model_id=model_id,\n run_time=elapsed_time,\n model_params=str(cv_track.model_params),\n cv_track=dumps(cv_track),\n )\n session.add(model_log)\n\n # Send results to DB\n se_age = -1\n ae_age = -1\n ape_age = -1\n for drug_id in range(n_features):\n # True coefficients\n c = np.exp(coeffs[drug_id])\n # ConvSCCS estimate\n d_fit = np.exp(fitted_coeffs[drug_id])\n\n se_features = squared_error(c, d_fit)\n ae_features = absolute_error(c, d_fit)\n ape_features = absolute_percentage_error(c, d_fit)\n result = ExperimentResult(\n experiment_id=experiment_id,\n version=version,\n seed=seed,\n model_id=model_id,\n drug_id=drug_id,\n insert_date=datetime.now(),\n se_features=se_features,\n se_age=se_age,\n ae_features=ae_features,\n ae_age=ae_age,\n ape_features=ape_features,\n ape_age=ape_age,\n )\n session.add(result)\n\n session.commit()\n\n # --- Close DB connexion\n session.close()\n","sub_path":"experiments_scripts/experiments/two_missing_features_experiment.py","file_name":"two_missing_features_experiment.py","file_ext":"py","file_size_in_byte":7252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"492153342","text":"#!/usr/bin/python\nfrom service.message_define import *\nfrom storage_pool_config import *\n\nclass StoragePoolStatus(object):\n def __init__(self):\n self.name = \"\"\n self.uuid = \"\"\n self.type = StoragePoolConfig.pool_type_slow\n self.page_size = StoragePoolConfig.page_size\n ##list of data node name\n self.nodes = []\n self.available_volume = 0\n self.total_volume = 0\n self.enable = True\n\n @staticmethod\n def packToMessage(msg, data_list):\n name = []\n uuid = []\n pool_type = []\n page_size = []\n nodes = []\n available = []\n total_volume = []\n status = []\n for data in data_list:\n name.append(data.name)\n uuid.append(data.uuid)\n pool_type.append(data.type)\n page_size.append(data.page_size)\n nodes.append(data.nodes)\n available.append(data.available_volume)\n total_volume.append(data.total_volume)\n if data.enable:\n status.append(1)\n else:\n status.append(0)\n\n msg.setStringArray(ParamKeyDefine.name, name)\n msg.setStringArray(ParamKeyDefine.uuid, uuid)\n msg.setUIntArray(ParamKeyDefine.type, pool_type)\n msg.setUIntArray(ParamKeyDefine.size, page_size)\n\n msg.setStringArrayArray(ParamKeyDefine.node_name, nodes)\n msg.setUIntArray(ParamKeyDefine.available, available)\n msg.setUIntArray(ParamKeyDefine.total_volume, total_volume)\n msg.setUIntArray(ParamKeyDefine.status, status)\n\n @staticmethod\n def unpackFromMessage(msg):\n name = msg.getStringArray(ParamKeyDefine.name)\n uuid = msg.getStringArray(ParamKeyDefine.uuid)\n pool_type = msg.getUIntArray(ParamKeyDefine.type)\n page_size = msg.getUIntArray(ParamKeyDefine.size)\n\n nodes = msg.getStringArrayArray(ParamKeyDefine.node_name)\n available_volume = msg.getUIntArray(ParamKeyDefine.available)\n total_volume = msg.getUIntArray(ParamKeyDefine.total_volume)\n status = msg.getUIntArray(ParamKeyDefine.status)\n\n result = []\n for i in range(len(name)):\n pool = StoragePoolStatus()\n pool.name = name[i]\n pool.uuid = uuid[i]\n pool.type = pool_type[i]\n pool.page_size = page_size[i]\n pool.nodes = nodes[i]\n pool.available_volume = available_volume[i]\n pool.total_volume = total_volume[i]\n if 1 == status[i]:\n pool.enable = True\n else:\n pool.enable = False\n \n result.append(pool)\n \n return result\n","sub_path":"zctool_v1.25_共享存储/shared/data/storage_pool_status.py","file_name":"storage_pool_status.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"1743057","text":"import os\r\nimport plotly.graph_objs as go\r\nfrom plotly.graph_objs import Scatter\r\nfrom plotly.graph_objects import Line\r\nimport plotly as py\r\nfrom datetime import datetime\r\nimport scipy.io as scio\r\nfrom plotly.subplots import make_subplots\r\nimport numpy as np\r\nimport pandas as pd\r\nimport plotly.express as px\r\npyplot = py.offline.plot\r\n\r\n\r\n\r\n\r\nDistanceData = pd.DataFrame([98.44,\t98.52\t,97.58,\t97.3,\t98.12\t,97.0],\r\n columns=['Distance'],\r\n index=np.arange(5, 35, 5))\r\nfig = go.Figure([\r\n go.Bar(name='User1', x=DistanceData.index, y=[98.44,98.52,97.58,97.3,98.37,97.72], marker_color='#9FBCC2',width=2),\r\n go.Bar(name='User2', x=DistanceData.index, y=[97.86,98.33,97.71,97.5,97.14,92.92], marker_color='rgb(175,199,191)',width=2),\r\n ]\r\n )\r\n\r\n\r\nfig['layout'].update(\r\nheight=520 ,width = 620,\r\nfont=dict(\r\nfamily=\"Time New Roman\", # 所有标题文字的字体\r\nsize = 32 , # 所有标题文字的大小\r\n),\r\n legend=dict(\r\n orientation=\"h\", # 将legend改为横排放置\r\n yanchor=\"bottom\",\r\n y=1.02,\r\n xanchor=\"right\",\r\n x=1,\r\n # bordercolor =\"black\",\r\n # borderwidth = 2,\r\n font=dict(\r\n size=32, # 25\r\n color='black', )\r\n ),\r\n)\r\nfig.update_xaxes(showgrid=True,#将网格去掉\r\n #showline=True,\r\n linewidth=1.5,\r\n linecolor='black', # 将颜色设定为黑色\r\n mirror=True,\r\n gridcolor='#F2F2F2',\r\n\r\n ) # 加上这个 四周都是黑色 ,不加的话只有左下两条线黑色 (就是镜像过去)\r\nfig.update_yaxes(showgrid=True,\r\n #showline=True,\r\n linewidth=1.5,\r\n linecolor='black',\r\n mirror=True,\r\n gridcolor='#F2F2F2',\r\n\r\n )\r\nfig[\"layout\"][\"xaxis\"].update({\"title\": \"Distance (cm)\",\"titlefont\": {\"size\": 32}})# \"titlefont\": {\"color\": \"pink\"}\r\nfig[\"layout\"][\"yaxis\"].update({\"title\": \"Accuracy (%)\",\"titlefont\": {\"size\": 32}})\r\nfig[\"layout\"][\"template\"] = \"simple_white\"\r\n# fig.write_image('images/DiatanceData.eps')\r\nhtml_path = \"../htmls/ImpactofDistance.html\"\r\nfig.write_image('../images/ImpactofDistance.eps')\r\npyplot(fig,filename=html_path)","sub_path":"draw/code/DistanceData.py","file_name":"DistanceData.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"574247794","text":"# Copyright 2020 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Test the derive and get_vcpus_per_osd methods of the HCI module\"\"\"\n\nimport yaml\n\nfrom tripleo_ansible.ansible_plugins.modules import tripleo_derive_hci_parameters as derive_params\nfrom tripleo_ansible.tests import base as tests_base\n\n\nclass TestTripleoDeriveHciParameters(tests_base.TestCase):\n \"\"\"Test the derive method of the HCI module\"\"\"\n\n def test_derive_positive(self):\n \"\"\"Test the derive method with valid input and confirm expected result\n \"\"\"\n der = derive_params.derive(mem_gb=256, vcpus=4, osds=1,\n average_guest_memory_size_in_mb=2048,\n average_guest_cpu_utilization_percentage=20)\n self.assertFalse(der['failed'])\n self.assertEqual(der['nova_reserved_mem_mb'], 56320)\n self.assertEqual(der['cpu_allocation_ratio'], 3.75)\n\n def test_derive_negative(self):\n \"\"\"Test the derive method with invalid input\n \"\"\"\n der = derive_params.derive(mem_gb=2, vcpus=1, osds=1,\n average_guest_memory_size_in_mb=0,\n average_guest_cpu_utilization_percentage=0)\n self.assertTrue(der['failed'])\n\n def test_vcpu_ratio(self):\n \"\"\"Test the get_vcpus_per_osd method and confirm expected result\n \"\"\"\n\n def mock_ironic():\n \"\"\"Return a dictionary with partial disks section of introspection\n \"\"\"\n return {'data':\n {'inventory':\n {'disks':\n [\n {'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:5',\n 'name': '/dev/sda',\n 'rotational': True,\n 'wwn': None},\n {'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:4',\n 'name': '/dev/sdb',\n 'rotational': True,\n 'wwn': None},\n {'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:3',\n 'name': '/dev/sdc',\n 'rotational': True,\n 'wwn': None},\n {'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:2',\n 'name': '/dev/sdd',\n 'rotational': True,\n 'wwn': None},\n {'by_path': '/dev/disk/by-path/pci-0000:00:01.1-ata-1',\n 'name': '/dev/sde',\n 'rotational': True,\n 'wwn': None}\n ]\n }\n }\n }\n\n def get_ironic(flavor='hdd'):\n \"\"\"Returns a dictionary which mocks ironic introspection\n data. Uses mock introspection data as the source but then\n applies flavor variations to make it look like the system\n which was introspected has SSD or NVMe SSDs.\n \"\"\"\n ironic = mock_ironic()\n if flavor in 'ssd':\n for dev in ironic['data']['inventory']['disks']:\n dev['rotational'] = False\n if flavor in 'nvme':\n i = 1\n for dev in ironic['data']['inventory']['disks']:\n nvm_name = \"/dev/nvme0n%i\" % i\n dev['name'] = nvm_name\n dev['rotational'] = False\n i += 1\n return ironic\n\n def get_env(flavor='hdd', osds_per_device=1):\n \"\"\"Returns a dictionary which mocks the content of the\n tripleo_environment_parameters CephAnsibleDisksConfig\n where the deployer requests four OSDs using device\n list within ceph-ansible of differing flavor types.\n The flavor may be set to one of hdd, ssd, by_path,\n or nvme and it is also possible to set the\n osds_per_device (usually used with NVMe). Uses mock\n introspection data in molecule to build the device\n list with flavor variations.\n \"\"\"\n ironic = mock_ironic()\n devices = []\n i = 1\n for dev in ironic['data']['inventory']['disks']:\n if flavor in ('hdd', 'ssd'):\n devices.append(dev['name'])\n elif flavor in 'by_path':\n devices.append(dev['by_path'])\n elif flavor in 'nvme':\n nvm_name = \"/dev/nvme0n%i\" % i\n devices.append(nvm_name)\n i += 1\n if i > 4:\n break\n disks_config = {\n \"osd_objectstore\": \"bluestore\",\n \"osd_scenario\": \"lvm\",\n \"devices\": devices\n }\n if osds_per_device > 1:\n disks_config['osds_per_device'] = osds_per_device\n env = {\n \"CephAnsibleDisksConfig\": disks_config\n }\n return env\n\n ratio_map = {\n 'hdd': 1,\n 'ssd': 4,\n 'by_path': 1,\n 'nvme': 3\n }\n for flavor in ratio_map:\n if flavor == 'nvme':\n osds_per_device = 4\n else:\n osds_per_device = 0\n env = get_env(flavor, osds_per_device)\n ironic = get_ironic(flavor)\n num_osds = len(env['CephAnsibleDisksConfig']['devices'])\n vcpu_ratio, vcpu_msg, vcpu_warn = derive_params.get_vcpus_per_osd(ironic,\n env,\n num_osds)\n self.assertEqual(vcpu_ratio, ratio_map[flavor])\n self.assertIsNotNone(vcpu_msg)\n self.assertFalse(vcpu_warn)\n\n def test_derive_without_workload(self):\n \"\"\"Test the derive method without passing the expected average\n guest cpu and mem utilization and confirm expected result\n \"\"\"\n der = derive_params.derive(mem_gb=256, vcpus=56, osds=16)\n self.assertFalse(der['failed'])\n self.assertEqual(der['nova_reserved_mem_mb'], 81920)\n","sub_path":"tripleo_ansible/tests/modules/test_derive_hci_parameters.py","file_name":"test_derive_hci_parameters.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"500009711","text":"\n'''\nGiven an array of words such as [\"cat\", \"dog\", \"dog\"]\nand given an array of patterns such as [\"a\", \"b\", \"b\"]\nreturn True if the words map to patterns\n(such as cat maps to a, dog maps to b, and we return True since all patterns match)\n\nExample:\n[\"cat\", \"dog\", \"dog\"]\n[\"a\", \"b\", \"b\"]\nreturns True\n\n[\"hat\", \"mat\", \"kick\"]\n[\"a\", \"b\", \"a\"]\nreturns False\n'''\n\n#O(n) #O(n) space\ndef encodeString(pattern,s):\n d = {}\n\n if len(s) != len(pattern): \n return False\n \n for i in range(len(pattern)):\n if pattern[i] not in d:\n d[pattern[i]] = s[i]\n else:\n print(d[pattern[i]])\n if d[pattern[i]] != s[i]:\n return False \n return True\n\n # mapp = {} \n # reversemap = {} \n\n # if len(s) != len(pattern): \n # return False \n \n # for i in range(0, len(pattern)): \n # if pattern[i] not in mapp.keys() and s[i] not in reversemap.keys(): \n # mapp[pattern[i]] = s[i]\n # reversemap[s[i]] = pattern[i]\n # else: \n # if mapp[pattern[i]] != s[i] or reversemap[s[i]] != pattern[i]: \n # return False \n # return True\n\n\npattern = [\"a\", \"b\", \"b\"]\ns =[\"cat\",\"dog\", \"dog\"]\na = [\"hat\", \"mat\", \"kick\"]\nd = [\"a\", \"b\", \"a\"]\n\nprint(encodeString(pattern,s)) #true \nprint(encodeString(d,a)) #false \n\n","sub_path":"OA/CapitalOne/encodeString.py","file_name":"encodeString.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"141162063","text":"from PIL import Image\r\nimport numpy as np\r\n\r\n\r\ndef get_mosaic(image, size, gradation):\r\n image_arr = np.array(Image.open(image)).astype(int)\r\n limit = 255 // gradation\r\n image_len = len(image_arr)\r\n image_h = len(image_arr[0])\r\n i = 0\r\n while i < image_len:\r\n j = 0\r\n while j < image_h:\r\n segment = image_arr[i: i + size, j: j + size]\r\n sum_c = np.sum(segment)\r\n avg = int(sum_c // (size ** 2))\r\n set_color(int(avg // limit) * limit / 3, image_arr, size, i, j)\r\n j += size\r\n i += size\r\n return Image.fromarray(np.uint8(image_arr))\r\n\r\n\r\ndef set_color(new_c, matrix, size, i, j):\r\n for x in range(i, i + size):\r\n for y in range(j, j + size):\r\n for z in range(3):\r\n matrix[x][y][z] = new_c\r\n\r\n\r\nget_mosaic(input(\"Введите имя файла изображения: \"),\r\n int(input(\"Введите размер мозаики: \")),\r\n int(input(\"Введите размер градации: \"))).save('res.jpg')","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"144036219","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pytest\nfrom example import fizzbuzz\n\n\n@pytest.mark.parametrize(\n \"n, expect\",\n [\n (1, \"1\"),\n (2, \"2\"),\n (3, \"Fizz\"),\n (4, \"4\"),\n (5, \"Buzz\"),\n (6, \"Fizz\"),\n (7, \"7\"),\n (8, \"8\"),\n (9, \"Fizz\"),\n (10, \"Buzz\"),\n (11, \"11\"),\n (12, \"Fizz\"),\n (13, \"13\"),\n (14, \"14\"),\n (15, \"FizzBuzz\"),\n (16, \"16\"),\n ],\n)\ndef test_fizzbuzz(n, expect):\n assert fizzbuzz.fizzbuzz(n) == expect\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-v\", __file__])\n","sub_path":"tests/test_fizzbuzz.py","file_name":"test_fizzbuzz.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"410388171","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 25 13:52:02 2018\r\n\r\n@author: sunka\r\n\"\"\"\r\n\r\nimport pandas as pd\r\n\r\ndataframe = pd.read_csv('concrete.csv')\r\n\r\nprint(dataframe)\r\n\r\n\r\ndf1 = dataframe.iloc[:,1:]\r\ndf2 = dataframe.iloc[:,:8]\r\n\r\n\r\nfrom sklearn import preprocessing\r\n\r\ndf1 = preprocessing.normalize(df1)\r\n\r\ndf2 = preprocessing.normalize(df2) \r\n\r\n\r\nfrom sklearn import model_selection\r\n\r\ntrain_data , test_data , train_target , test_target = model_selection.train_test_split(df1,df2)\r\n\r\n\r\n\r\nfrom sklearn import linear_model\r\n\r\nregression = linear_model.LinearRegression()\r\n\r\nfitting = regression.fit(train_data,train_target)\r\n\r\nresult = regression.predict(test_data)\r\n\r\nprint(result)\r\n\r\n\r\ncoefficient = regression.coef_\r\nintercept = regression.intercept_\r\n\r\nprint(\"The coefficeint is \" + str(coefficient))\r\nprint(\"Intercept is \" + str(intercept))\r\n\r\n\r\n\r\n\r\nfrom sklearn import metrics\r\n\r\nmean_square_error = metrics.mean_squared_error(test_target,result)\r\n\r\nprint(\"Mean square error is \" + str(mean_square_error))\r\n\r\nvarience = metrics.r2_score(test_target,result)\r\n\r\nprint(\"Varience is \" + str(varience))\r\n\r\n\r\n\r\n\r\nfrom matplotlib import pyplot\r\n\r\npyplot.hist(train_data)\r\npyplot.hist(result)\r\n\r\n\r\n# Output\r\n\r\npyplot.scatter(test_target,result)\r\npyplot.title(\"Output\")\r\n\r\npyplot.scatter(result,result-test_target)\r\npyplot.title('Residue')\r\n\r\n\r\n\r\n\r\n","sub_path":"Concrete Data Set/normalize_conco.py","file_name":"normalize_conco.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"326407062","text":"import vk_api, time, random\n\nvk = vk_api.VkApi(token=\"c4cf0a0ac196623b06a0799377938038521e702a702134cf7f75d8ddd62f4e25f48c5c1b19109e1c8a4c7\" )\n\nprint(\"VK bot is active\")\n\nwhile True:\n messages = vk.method(\"messages.getConversations\", {\"offset\": 0, \"count\": 20, \"filter\": \"unread\"})\n\n if messages[\"count\"] >= 1:\n\n id = messages[\"items\"][0][\"last_message\"][\"from_id\"]\n text = messages[\"items\"][0][\"last_message\"][\"text\"]\n\n\n if text.lower() == \"привет\":\n vk.method(\"messages.send\", {\"peer_id\": id, \"message\": \"Привет!\", \"random_id\": random.randint(1, 999999)})\n else:\n vk.method(\"messages.send\", {\"peer_id\": id, \"message\": \"Я не понял тебя\",\"random_id\": random.randint(1, 999999)})\n\n time.sleep(1)\n \n\n","sub_path":"first_bot/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"233371693","text":"\"\"\"\n app.py\n\n Starts Flask server.\n\"\"\"\n\n\nfrom flask import Flask, render_template, request\nfrom processing import processingWholesale\nfrom processing import processingRetail\nfrom processing import search\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/dashboard')\ndef dashboard():\n return render_template('dashboard.html')\n\n@app.route('/search', methods=['GET', 'POST'])\ndef send():\n if request.method == 'POST':\n query = request.form['search']\n\n queryHeader = query\n\n wholesalePriceDetails = processingWholesale.aggregatePrices(query)\n retailPriceDetails = processingRetail.aggregatePrices(query)\n\n averageWholesalePrice = wholesalePriceDetails['averageItemPrice']\n averageRetailPrice = retailPriceDetails['averageItemPrice']\n\n priceDifferential = averageRetailPrice - averageWholesalePrice\n percentDifferential = (priceDifferential / averageWholesalePrice) * 100\n\n awpFormatted = '{0:.2f}'.format(averageWholesalePrice)\n arpFormatted = '{0:.2f}'.format(averageRetailPrice)\n pdFormatted = '{0:.2f}'.format(priceDifferential)\n pctFormatted = '{0:.2f}'.format(percentDifferential)\n\n searchDetails = search.performSearch(query)\n\n sentimentScore = searchDetails['sentimentScore']\n sentimentValue = searchDetails['sentimentValue']\n\n articles = searchDetails['articles']\n\n articleTitle1 = articles[0]['name']\n articleDescription1 = articles[0]['description']\n articleUrl1 = articles[0]['url']\n\n articleTitle2 = articles[1]['name']\n articleDescription2 = articles[1]['description']\n articleUrl2 = articles[1]['url']\n\n articleTitle3 = articles[2]['name']\n articleDescription3 = articles[2]['description']\n articleUrl3 = articles[2]['url']\n\n articleTitle4 = articles[3]['name']\n articleDescription4 = articles[3]['description']\n articleUrl4 = articles[3]['url']\n\n articleTitle5 = articles[4]['name']\n articleDescription5 = articles[4]['description']\n articleUrl5 = articles[4]['url']\n\n return render_template('search.html',\n queryHeader=queryHeader,\n awp=awpFormatted,\n arp=arpFormatted,\n pd=pdFormatted,\n pct=pctFormatted,\n sentimentScore=sentimentScore,\n sentimentValue=sentimentValue,\n a1t=articleTitle1,\n a1d=articleDescription1,\n a1u=articleUrl1,\n a2t=articleTitle2,\n a2d=articleDescription2,\n a2u=articleUrl2,\n a3t=articleTitle3,\n a3d=articleDescription3,\n a3u=articleUrl3,\n a4t=articleTitle4,\n a4d=articleDescription4,\n a4u=articleUrl4,\n a5t=articleTitle5,\n a5d=articleDescription5,\n a5u=articleUrl5)\n\n return render_template('dashboard.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"152485308","text":"\n# payoff matrix:\n# C D\n# -----------\n# C |R, R|S, T|\n# D |T, S|P, P|\n# -----------\n# T > R > P > S\n\nclass MyPlayer:\n '''\n analyzes first 6 moves and tries to guess strategy\n '''\n #also repeatedly checks for random patterns in opponent's moves\n\n def __init__(self, payoff_matrix, number_of_iterations = 0):\n self.matrix = payoff_matrix\n self.max = number_of_iterations\n self.counter = 0 # iteration counter\n self.op_moves = [] # opponent's moves\n self.op_strat = \"\" # opponent's strategy\n\n def move(self):\n\n self.counter += 1\n\n #------------------------------------------------\n # Matrix analysis\n if self.matrix[1][1][1] >= self.matrix[0][0][0]:\n return True\n\n #------------------------------------------------\n # Strategy analysis\n if self.counter == 7:\n self.op_strat = self.initial_guess()\n #print(self.op_strat)\n elif self.counter >= 17 and self.counter % 8 == 1:\n self.op_strat = self.check_for_random()\n #print(self.op_strat)\n\n #------------------------------------------------\n if self.counter == self.max:\n return True\n elif self.counter == 1:\n return False\n elif self.counter == 7:\n\n if self.op_strat == \"COOP\":\n return self.op_moves[-1]\n elif self.op_strat == \"STFT\":\n return False\n elif self.op_strat == \"PAVD\" or self.op_strat == \"ALLD\" or self.op_strat == \"RAND\":\n return True\n\n elif self.counter == 8 and (self.op_strat == \"STFT\" or self.op_strat == \"PAVD\"):\n return False\n elif self.op_strat == \"RAND\":\n return True\n else:\n return self.op_moves[-1]\n\n\n def record_opponents_move(self, opponent_move):\n self.op_moves.append(opponent_move)\n\n\n # analyse first 6 moves\n def initial_guess(self):\n\n if self.op_moves.count(True) <= 1:\n return \"COOP\"\n elif self.op_moves == 3*[True, False]:\n return \"STFT\"\n elif self.op_moves == 2*[True, True, False]:\n return \"PAVD\"\n elif False not in self.op_moves:\n return \"ALLD\"\n else:\n return \"RAND\"\n\n\n # after every 8 moves, check if opponent's strategy is not random\n def check_for_random(self):\n temp = self.op_moves[-8:].count(True)\n return (\"RAND\" if temp >= 2 and temp <= 6 else self.op_strat)\n","sub_path":"pd/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"187479700","text":"# -*- coding: utf-8 -*- \n \nimport sys, os, datetime, time\nfrom PyQt4 import QtGui, QtCore, QtSql, uic, Qt\n\nimport oms6.cxml\nimport oms6.analize\nimport oms6.korrect\nimport oms6.separator\nimport oms6.dbf_join\nfrom oms6.api import *\n#import oms6.dbf_diff\n\n\n\nclass IntDialog(QtGui.QDialog):\n\tdef __init__(self, caption='', parent = None):\n\t\tsuper(IntDialog, self).__init__(parent)\n\n\t\tlayout = QtGui.QVBoxLayout(self)\n\t\tself.label = QtGui.QLabel(self)\n\t\tself.label.setText(caption)\n\t\tself.num = QtGui.QSpinBox(self)\n\t\t#self.num.setLocale(QtCore.QLocale('C'))\n\t\tself.num.setMaximum(999999999)\n\t\tlayout.addWidget(self.label)\n\t\tlayout.addWidget(self.num)\n\n\t\tbuttons = QtGui.QDialogButtonBox(\n\t\t\tQtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,\n\t\t\tQtCore.Qt.Horizontal, self)\n\t\tbuttons.accepted.connect(self.accept)\n\t\tbuttons.rejected.connect(self.reject)\n\t\tlayout.addWidget(buttons)\n\t\n\tdef getVal(self):\n\t\treturn self.num.value()\n\n\t@staticmethod\n\tdef getIntVal(caption,parent = None):\n\t\tdialog = IntDialog(caption,parent)\n\t\tresult = dialog.exec_()\t\t\n\t\tint_val = dialog.getVal()\n\t\treturn (int_val, result == QtGui.QDialog.Accepted)\n\t\t\n\t\t\n\nclass FloatDialog(QtGui.QDialog):\n\tdef __init__(self, caption='', parent = None):\n\t\tsuper(FloatDialog, self).__init__(parent)\n\n\t\tlayout = QtGui.QVBoxLayout(self)\n\t\tself.label = QtGui.QLabel(self)\n\t\tself.label.setText(caption)\n\t\tself.num = QtGui.QDoubleSpinBox(self)\n\t\tself.num.setLocale(QtCore.QLocale('C'))\n\t\tself.num.setMaximum(float('9999.99'))\n\t\tlayout.addWidget(self.label)\n\t\tlayout.addWidget(self.num)\n\n\t\tbuttons = QtGui.QDialogButtonBox(\n\t\t\tQtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,\n\t\t\tQtCore.Qt.Horizontal, self)\n\t\tbuttons.accepted.connect(self.accept)\n\t\tbuttons.rejected.connect(self.reject)\n\t\tlayout.addWidget(buttons)\n\t\n\tdef getVal(self):\n\t\treturn self.num.value()\n\n\t@staticmethod\n\tdef getFloatVal(caption,parent = None):\n\t\tdialog = FloatDialog(caption,parent)\n\t\tresult = dialog.exec_()\t\t\n\t\tfloat_val = dialog.getVal()\n\t\treturn (float_val, result == QtGui.QDialog.Accepted)\n\t\t\n\t\t\n\nclass DateDialog(QtGui.QDialog):\n\tdef __init__(self, caption='', parent = None):\n\t\tsuper(DateDialog, self).__init__(parent)\n\n\t\tlayout = QtGui.QVBoxLayout(self)\n\t\tself.label = QtGui.QLabel(self)\n\t\tself.label.setText(caption)\n\t\tself.date = QtGui.QDateEdit(self)\n\t\tself.date.setCalendarPopup(True)\n\t\tself.date.setDate(QtCore.QDate.currentDate())\n\t\tlayout.addWidget(self.label)\n\t\tlayout.addWidget(self.date)\n\n\t\tbuttons = QtGui.QDialogButtonBox(\n\t\t\tQtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,\n\t\t\tQtCore.Qt.Horizontal, self)\n\t\tbuttons.accepted.connect(self.accept)\n\t\tbuttons.rejected.connect(self.reject)\n\t\tlayout.addWidget(buttons)\n\n\tdef date_now(self):\n\t\treturn self.date.date()\n\n\t@staticmethod\n\tdef getDateTime(caption,parent = None):\n\t\tdialog = DateDialog(caption,parent)\n\t\tresult = dialog.exec_()\n\t\ttoday = dialog.date_now()\n\t\treturn (today.currentDate(), result == QtGui.QDialog.Accepted)\n\t\t\n######################################################################################################\n\nPATH = 'oms6' + os.sep\n\ndef MSG(header, msg, parent = None ):\n QtGui.QMessageBox.about(parent, header, msg) \n \ndef INPUT_DIALOG(caption):\n\ttext, ok = QtGui.QInputDialog.getText(None, 'Input dialog', caption)\n\tif ok:\n\t\treturn text\n\t\t\ndef FILE_DIALOG(path = '.', mask = '*'):\n\tdialog = QtGui.QFileDialog()\n\treturn dialog.getOpenFileName(None, 'Open file dialog', path, mask)\n\t\ndef SCHET(recs, n_scht, dt_scht, tarif, org_plat, MONTH, YEAR, nom_pak, rsmo):\n\treturn oms6.cxml.xml_dump(recs, n_scht, dt_scht, tarif, org_plat, MONTH, YEAR, nom_pak, rsmo, PATH)\n\ndef SCHET_SAVE(ZL_LIST, PERS_LIST, FILENAME1, FILENAME):\n\toms6.cxml.save_xml(ZL_LIST, PATH_XML + FILENAME1 + '.XML', PATH=PATH)\n\toms6.cxml.save_xml(PERS_LIST, PATH_XML + FILENAME + '.XML', PATH=PATH)\n\ndef ANALIZE(recs):\n\treturn oms6.analize.analize(recs, PATH)\n\ndef ANALIZE_SAVE(filename, recs):\n\toms6.analize.save(recs, unicode(filename), PATH)\n\ndef KORRECT(recs, struct):\n\trecs, korrect_struct_dbf = oms6.korrect.korrect(recs, struct, PATH)\n\treturn recs, korrect_struct_dbf\n\ndef KORRECT_SAVE(filename, recs, struct):\n\toms6.korrect.save(unicode(filename), recs, struct, PATH)\n\ndef DBF_STRUCT(filename):\n\treturn struct_dbf(unicode(filename))\n\ndef DBF(filename, codepage = 'cp866'):\n\trecs = open_dbf(unicode(filename), str(codepage))\n\trecs = list(recs)\n\treturn recs\n\ndef DATE_DIALOG(caption):\n\tdate, ok = DateDialog.getDateTime(caption)\n\tif ok:\n\t\treturn str(date.toString('YYYY-MM-DD'))\n\t\t\ndef FLOAT_DIALOG(caption):\n\tval, ok = FloatDialog.getFloatVal(caption)\n\tif ok:\n\t\treturn str(val)\n\ndef INT_DIALOG(caption):\n\tval, ok = IntDialog.getIntVal(caption)\n\tif ok:\n\t\treturn str(val)\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"171479674","text":"# Import libraries\nimport cv2\n\n# Create the rectified cluster figure, with all cluster matches\ndef create_rectified_cluster_figure(rectified_matches_image,\n axis):\n # Create final figure\n axis.imshow(cv2.cvtColor(rectified_matches_image, cv2.COLOR_BGR2RGB))\n axis.set_title('Rectified object\\nmatches', size=5)\n axis.tick_params(labelsize=2, width=0.1, length=1)\n #axis.set_aspect(1./axis.get_data_ratio()) ","sub_path":"python/functions/create_figures/create_rectified_cluster_figure.py","file_name":"create_rectified_cluster_figure.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"397766866","text":"import re\n\nfrom .stream_url import *\n\ndef fetcher(session, url, check):\n \n anime_name = re.match(r'^(?:https?://)?(?:\\S+\\.)?twist\\.moe/a/([^?&/]+)', url).group(1)\n for index, data in enumerate(get_twistmoe_anime_uri(session, anime_name), 1):\n if check(index):\n yield [{'quality': 'unknown', 'stream_url': data.get('stream_url'), 'headers': {'referer': 'https://twist.moe'}}]","sub_path":"core/providers/twistmoe/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"578875816","text":"import io\nimport os\nimport unittest\nimport sys\ntry:\n # Python 3\n from contextlib import redirect_stdout\nexcept ImportError:\n # Python 2\n from contextlib import contextmanager\n\n @contextmanager\n def redirect_stdout(filehandler):\n stdout = sys.stdout\n sys.stdout = filehandler\n yield\n filehandler.flush()\n sys.stdout = stdout # restore stdout\n\nimport requests\n\nfrom kinto_wizard.__main__ import main\n\n\nclass RoundTrip(unittest.TestCase):\n def setUp(self):\n self.server = os.getenv(\"SERVER_URL\", \"http://localhost:8888/v1\")\n self.auth = os.getenv(\"AUTH\", \"user:pass\")\n self.file = os.getenv(\"FILE\", \"tests/kinto.yaml\")\n self.original = open(self.file).read()\n requests.post(self.server + \"/__flush__\")\n\n def test_round_trip(self):\n cmd = 'kinto-wizard {} --server={} --auth={}'\n load_cmd = cmd.format(\"load {}\".format(self.file),\n self.server, self.auth)\n sys.argv = load_cmd.split(\" \")\n main()\n\n dump_cmd = cmd.format(\"dump\", self.server, self.auth)\n sys.argv = dump_cmd.split(\" \")\n output = io.StringIO()\n with redirect_stdout(output):\n main()\n output.flush()\n\n # Check that identical to original file.\n generated = output.getvalue()\n assert self.original == generated\n\n def test_full_dump(self):\n # Load some data\n cmd = 'kinto-wizard {} --server={} --auth={}'\n load_cmd = cmd.format(\"load {}\".format(self.file),\n self.server, self.auth)\n sys.argv = load_cmd.split(\" \")\n main()\n\n cmd = 'kinto-wizard {} --server={} --auth={} --full'\n load_cmd = cmd.format(\"dump\", self.server, self.auth)\n sys.argv = load_cmd.split(\" \")\n output = io.StringIO()\n with redirect_stdout(output):\n main()\n output.flush()\n assert 'last_modified' in output.getvalue()\n","sub_path":"tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"408741720","text":"'''Data Converter program that translates raw data into tables storing that data\r\n@author Will Thompson\r\n@author Daniel Busis\r\n10/19/2018'''\r\n\r\n\r\nimport sys\r\nimport re\r\nimport csv\r\nimport os\r\n\r\ncategories_list = [\r\n\t\"GDP per capita, PPP (current international $)\",\r\n\t\"GDP per capita (current US$)\",\r\n\t\"GDP, PPP (constant 2011 international $)\",\r\n\t\"GDP (current US$)\",\r\n\t\"Central government debt, total (% of GDP)\",\r\n\t\"Consumer price index (2010 = 100)\",\r\n\t\"Unemployment, total (% of total labor force) (national estimate)\",\r\n\t\"Military expenditure (% of GDP)\",\r\n\t\"Literacy rate, adult total (% of people ages 15 and above)\",\r\n\t\"Literacy rate, adult male (% of males ages 15 and above)\",\r\n\t\"Literacy rate, adult female (% of females ages 15 and above)\",\r\n\t\"Literacy rate, youth male (% of males ages 15-24)\",\r\n\t\"Literacy rate, youth (ages 15-24), gender parity index (GPI)\",\r\n\t\"Literacy rate, youth female (% of females ages 15-24)\",\r\n\t\"Mortality rate, adult, male (per 1,000 male adults)\",\r\n\t\"Mortality rate, adult, female (per 1,000 female adults)\",\r\n\t\"Mortality rate, infant (per 1,000 live births)\",\r\n\t\"School enrollment, secondary, male (% gross)\",\r\n\t\"School enrollment, secondary, female (% gross)\",\r\n\t\"School enrollment, secondary (% gross)\",\r\n\t\"Educational attainment, at least completed short-cycle tertiary, population 25+, total (%) (cumulative)\",\r\n\t\"Educational attainment, at least completed short-cycle tertiary, population 25+, male (%) (cumulative)\",\r\n\t\"Educational attainment, at least completed short-cycle tertiary, population 25+, female (%) (cumulative)\",\r\n\t\"Educational attainment, at least Master's or equivalent, population 25+, total (%) (cumulative)\",\r\n\t\"Educational attainment, at least Master's or equivalent, population 25+, male (%) (cumulative)\",\r\n\t\"Educational attainment, at least Master's or equivalent, population 25+, female (%) (cumulative)\",\r\n\t\"Educational attainment, Doctoral or equivalent, population 25+, total (%) (cumulative)\",\r\n\t\"Educational attainment, Doctoral or equivalent, population 25+, male (%) (cumulative)\",\r\n\t\"Educational attainment, Doctoral or equivalent, population 25+, female (%) (cumulative)\",\r\n\t\"Educational attainment, at least Bachelor's or equivalent, population 25+, total (%) (cumulative)\",\r\n\t\"Educational attainment, at least Bachelor's or equivalent, population 25+, male (%) (cumulative)\",\r\n\t\"Educational attainment, at least Bachelor's or equivalent, population 25+, female (%) (cumulative)\",\r\n\t\"Population, male (% of total)\",\r\n\t\"Population, male\",\r\n\t\"Population, female (% of total)\",\r\n\t\"Population, female\",\r\n\t\"Population, total\"\r\n]\r\n\r\ndef load_from_data_folder(folder):\r\n\tfile_list = os.listdir(folder)\r\n\tlist_of_all_countries = []\r\n\t\r\n\tdict_of_stat_ids = {}\r\n\tdict_of_country_ids = {}\r\n\t\r\n\tfor file_name in file_list:\r\n\t\tcur_country_id = len(dict_of_country_ids)\r\n\t\t\r\n\t\tcsv_file = open(folder+'/'+file_name)\r\n\t\treader = csv.reader(csv_file)\r\n\t\tcur_country_data = []\r\n\t\tlist_of_years = []\r\n\t\t\r\n\t\tfor row in reader:\r\n\t\t\tif len(row) < 4:\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tif row[0] == \"Country Name\":\r\n\t\t\t\tlist_of_years = row\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif len(cur_country_data) == 0:\r\n\t\t\t\tdict_of_country_ids[row[0]] = cur_country_id\r\n\t\t\t\t\r\n\t\t\tcur_data_category = row[2]\r\n\t\t\tif cur_data_category in categories_list:\r\n\t\t\t\tif cur_data_category not in dict_of_stat_ids:\r\n\t\t\t\t\tdict_of_stat_ids[cur_data_category] = len(dict_of_stat_ids)\r\n\t\t\t\t\t\r\n\t\t\t\tcur_category_list = []\r\n\t\t\t\tcur_category_list.append(cur_country_id)\r\n\t\t\t\tcur_category_list.append(dict_of_stat_ids[cur_data_category])\r\n\t\t\t\tcur_category_list.extend(row[4:len(row)-1])\r\n\t\t\t\tcur_country_data.append(cur_category_list)\r\n\t\t\t\t\t\t\r\n\t\tlist_of_all_countries.extend(cur_country_data)\r\n\treturn (list_of_all_countries, dict_of_stat_ids, dict_of_country_ids)\r\n\r\n\r\ndef save_country_tables(list_of_all_countries):\r\n\tfilename = \"data_output/annual_data.csv\"\t\t\r\n\toutput_file = open(filename, 'w')\r\n\twriter = csv.writer(output_file)\r\n\tfor list in list_of_all_countries:\r\n\t\twriter.writerow(list)\r\n\toutput_file.close()\r\n\r\ndef save_stat_id_tables(dict_of_stat_ids):\r\n\tfilename = \"data_output/stat_ids.csv\"\r\n\toutput_file = open(filename, 'w')\r\n\twriter = csv.writer(output_file)\r\n\tfor key in dict_of_stat_ids:\r\n\t\twriter.writerow([dict_of_stat_ids[key], key])\r\n\toutput_file.close()\r\n\r\ndef save_country_id_tables(dict_of_country_ids):\r\n\tfilename = \"data_output/country_ids.csv\"\r\n\toutput_file = open(filename, 'w')\r\n\twriter = csv.writer(output_file)\r\n\tfor key in dict_of_country_ids:\r\n\t\twriter.writerow([dict_of_country_ids[key], key])\r\n\toutput_file.close()\r\n\r\nif __name__ == '__main__':\r\n\tall_countries_list, all_stats_dict, all_countries_dict = load_from_data_folder('source_data')\r\n\tsave_country_tables(all_countries_list)\r\n\tsave_stat_id_tables(all_stats_dict)\r\n\tsave_country_id_tables(all_countries_dict)\r\n\t\r\n#Note: To put tables into SQL, use:\r\n#\\copy data_gdp_usd from 'gdp_usd_data.csv' DELIMITER ',' CSV NULL as ''\r\n","sub_path":"webapp/sql_data_test/me_data_converter_2.py","file_name":"me_data_converter_2.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"121921600","text":"\"\"\"\n# PasswordWallet\n\n@author: Sebastiano Bisacchi\n\nThis file contains all the global settings\nand preferences that the software use.\n\"\"\"\n\nimport os\n\n\n# -------------- Window settings -------------- #\n\nX_RESIZE = False\nY_RESIZE = True\n\nX_SIZE = 400\nY_SIZE = 600\n\nX_SPACING = 300\nY_SPACING = 200\n\n\n# -------------- Path and file settings -------------- #\n\nFILE_NAME = 'pwStorage.pwea'\nBKFILE_NAME = 'pwStorage.pweabk'\n\nUSER_FILE_PATH = None\nFILE_PATH = None\n\n\n# -------------- Encryption/decryption settings -------------- #\n\nENCRYPTION_KEY = None\n\n\n# -------------- Graphic settings -------------- #\n\nASSETS_PATH = os.path.join('.', 'assets')\n\nCPY_ICON = os.path.join(ASSETS_PATH, 'copy.gif')\n\nFONT_NAME = 'Arial'\n\nFONT_SIZE = 14\n\nCOLORS_STATES = ['#f44242', '#f46842', '#f48942', '#f4a442', '#f4bc42', '#f4df42', '#cef442', '#5ff442', '#00f218']\nSECURITY_STATES = ['extremely weak', 'very weak', 'weak', 'quite good', 'good', 'really good', 'safety']\n","sub_path":"src/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"498607547","text":"def controllo_corrispondenza(codice,parola):\n dizionario = {}\n lung_parola = len(parola)\n mycodice = list(codice)\n verita = True\n\n for i in range(0,lung_parola):\n carattere = parola[i]\n \n if(carattere not in dizionario.keys()): \n if (mycodice[i] in dizionario.values()):\n verita=False\n else: \n dizionario[carattere] = mycodice[i]\n else:\n if(dizionario[carattere] != mycodice[i]):\n verita=False\n break\n return (verita)\n\ndef decod(pfile, codice):\n result = set()\n myfile = open(pfile,'r',encoding = 'utf-8')\n lista_parole = myfile.readlines()\n \n for parola in lista_parole:\n parola = parola.strip('\\n')\n lung_parola=len(parola)\n lung_codice=len(codice)\n \n if(lung_parola != lung_codice):\n continue\n else:\n possibile_parola_trovata = controllo_corrispondenza(codice,parola)\n \n if(possibile_parola_trovata==True):\n result.add(parola)\n else:\n continue\n return(result)\n \n \n\n\n","sub_path":"students/1814543/homework02/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"318365746","text":"from abc import ABC, abstractmethod\nimport collections\nfrom typing import Union, Type, Dict, List, Tuple, Optional, Any, Callable\n\nimport torch as th\nimport torch.nn as nn\nimport numpy as np\n\nfrom partner import Partner, PartnerPolicy, PPOPartnerPolicy\nfrom modular_policy import ModularPolicy\n\nclass InteractivePolicy(ModularPolicy):\n def __init__(self, *args, **kwargs):\n super(InteractivePolicy, self).__init__(*args, **kwargs)\n self.partners = None\n\n def set_partners(self, partners: Optional[List[Partner]]=None):\n self.partners = partners\n self.num_partners = len(partners) if partners is not None else 1\n \n def set_PPO_partners(self, partner_model_paths: List[str]):\n self.set_partners(partners=[Partner(PPOPartnerPolicy(pmpath)) for pmpath in partner_model_paths])\n\nclass OptimalPolicy(ModularPolicy, ABC):\n def __init__(self, *args, **kwargs):\n super(OptimalPolicy, self).__init__(*args, **kwargs)\n self.use_optimal_mask = False\n\n @abstractmethod\n def get_mask(self, obs):\n pass\n\n @abstractmethod\n def setup_optimal_mask(self, env):\n pass\n\n def evaluate_actions(self, obs: th.Tensor,\n actions: th.Tensor,\n partner_idx: int) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n optimal_mask = self.get_mask(obs) if self.use_optimal_mask else None\n return super(OptimalPolicy, self).evaluate_actions(obs=obs, actions=actions, partner_idx=partner_idx, action_mask=optimal_mask)\n\n def get_action_dist_from_obs(self, obs: th.Tensor, partner_idx: int) -> th.Tensor:\n optimal_mask = self.get_mask(obs) if self.use_optimal_mask else None\n return super(OptimalPolicy, self).get_action_dist_from_obs(obs=obs, partner_idx=partner_idx, action_mask=optimal_mask)\n\nclass BlocksPolicy(InteractivePolicy, OptimalPolicy):\n def __init__(self, *args, **kwargs):\n super(BlocksPolicy, self).__init__(*args, **kwargs)\n\n def setup_optimal_mask(self, env):\n self.use_optimal_mask = True\n self.sz = env.grid_size ** 2\n self.action_sz = env.action_space.n\n\n def get_mask(self, obs):\n optimal_mask = th.ones((obs.size(0), self.action_sz), dtype=th.bool)\n \n # 6 turns so turn 0 is the first move for P1. Target is blue block which has id=3.\n first_move, second_move, target_id = 0, 2, 3\n is_first_two_moves = th.logical_or(obs[:,2*self.sz] == first_move, obs[:,2*self.sz] == second_move)\n goal_obs = obs[:,:self.sz]\n\n optimal_mask[is_first_two_moves] = th.zeros(self.action_sz, dtype=th.bool) \n loc_of_target = (goal_obs[is_first_two_moves] != target_id)\n optimal_mask[is_first_two_moves, :self.sz] = loc_of_target\n\n # 6 turns so turn 4 is the last move for P1. Target is red block which has id=2.\n last_move, target_id = 4, 2\n is_last_move = (obs[:,2*self.sz] == last_move)\n goal_obs = obs[:,:self.sz]\n\n # this part only works nicely since the goal obs exactly matches optimal action representation\n optimal_mask[is_last_move] = th.zeros(self.action_sz, dtype=th.bool) # if last move, all moves are suboptimal except one\n loc_of_target = (goal_obs[is_last_move] == target_id) \n optimal_mask[is_last_move, :self.sz] = loc_of_target\n\n\n\n\n return optimal_mask\n\n def forward(self, obs: th.Tensor,\n partner_idx: int,\n deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n :param obs: (th.Tensor) Observation\n :param deterministic: (bool) Whether to sample or use deterministic actions\n :return: (Tuple[th.Tensor, th.Tensor, th.Tensor]) action, value and log probability of the action\n \"\"\"\n if self.is_partners_turn(obs) and self.partners is not None:\n actions, values, log_probs = self.partners[partner_idx].policy.forward(obs=obs, deterministic=deterministic)\n return actions, th.tensor([0.0]), th.tensor([0.0]) # effectively detaching value / log_prob of partner\n\n latent_pi, latent_vf, _ = self._get_latent(obs=obs)\n partner_latent_pi, partner_latent_vf = self.partner_mlp_extractor[partner_idx](latent_pi)\n\n distribution = self._get_action_dist_from_latent(latent_pi, partner_latent_pi, partner_idx=partner_idx)\n if self.use_optimal_mask: # limit actions to optimal actions\n optimal_mask = self.get_mask(obs)\n distribution = self._get_action_dist_from_latent(latent_pi, partner_latent_pi, partner_idx=partner_idx, action_mask=optimal_mask)\n\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf) + self.partner_value_net[partner_idx](partner_latent_vf)\n\n return actions, values, log_prob\n\n def is_partners_turn(self, obs):\n turn = obs[0][-1]\n return turn % 2 == 1\n\n\nclass ArmsPolicy(InteractivePolicy, OptimalPolicy):\n def __init__(self, *args, **kwargs):\n super(ArmsPolicy, self).__init__(*args, **kwargs)\n\n def setup_optimal_mask(self, env):\n self.use_optimal_mask = True\n from tabular import tabular_q_learning\n self.q_values, self.optimal_action1_mask, self.optimal_action2_mask = tabular_q_learning(env)\n\n def get_mask(self, obs):\n obs_idx = tuple(obs.T.long())\n optimal_mask = th.cat((self.optimal_action1_mask[obs_idx], self.optimal_action2_mask[obs_idx]), dim=1)\n return optimal_mask\n\n def forward(self, obs: th.Tensor,\n partner_idx: int,\n deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n :param obs: (th.Tensor) Observation\n :param deterministic: (bool) Whether to sample or use deterministic actions\n :return: (Tuple[th.Tensor, th.Tensor, th.Tensor]) action, value and log probability of the action\n \"\"\"\n latent_pi, latent_vf, _ = self._get_latent(obs=obs)\n partner_latent_pi, partner_latent_vf = self.partner_mlp_extractor[partner_idx](latent_pi)\n\n distribution = self._get_action_dist_from_latent(latent_pi, partner_latent_pi, partner_idx=partner_idx)\n if self.use_optimal_mask: # limit actions to optimal actions\n optimal_mask = self.get_mask(obs)\n distribution = self._get_action_dist_from_latent(latent_pi, partner_latent_pi, partner_idx=partner_idx, action_mask=optimal_mask)\n\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf) + self.partner_value_net[partner_idx](partner_latent_vf)\n\n # partner actions\n if self.partners is not None:\n partner_actions, _, _ = self.partners[partner_idx].policy.forward(obs=obs, deterministic=deterministic)\n partner_actions = partner_actions.to(actions.device)\n actions = th.stack((actions[:,0], partner_actions[:,1]), dim=1)\n\n #print(obs, actions, log_prob)\n return actions, values, log_prob\n\nclass HanabiPolicy(InteractivePolicy):\n def __init__(self, *args, **kwargs):\n super(HanabiPolicy, self).__init__(*args, **kwargs)\n self.turn = 0\n\n def forward(self, obs: th.Tensor,\n partner_idx: int,\n deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n :param obs: (th.Tensor) Observation\n :param deterministic: (bool) Whether to sample or use deterministic actions\n :return: (Tuple[th.Tensor, th.Tensor, th.Tensor]) action, value and log probability of the action\n \"\"\"\n self.turn = 1 - self.turn\n if self.is_partners_turn(obs) and self.partners is not None:\n actions, values, log_probs = self.partners[partner_idx].policy.forward(obs=obs, deterministic=deterministic)\n return actions, th.tensor([0.0]), th.tensor([0.0]) # effectively detaching value / log_prob of partner\n\n latent_pi, latent_vf, _ = self._get_latent(obs=obs)\n partner_latent_pi, partner_latent_vf = self.partner_mlp_extractor[partner_idx](latent_pi)\n\n distribution = self._get_action_dist_from_latent(latent_pi, partner_latent_pi, partner_idx=partner_idx)\n\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf) + self.partner_value_net[partner_idx](partner_latent_vf)\n\n return actions, values, log_prob\n\n def is_partners_turn(self, obs):\n return self.turn == 1","sub_path":"interactive_policy.py","file_name":"interactive_policy.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"470798411","text":"\"\"\" Exploring learning curves for classification of handwritten digits \"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy\nfrom sklearn.datasets import *\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\n\ndef display_digits():\n digits = load_digits()\n print(digits.DESCR)\n fig = plt.figure()\n for i in range(10):\n subplot = fig.add_subplot(5, 2, i+1)\n subplot.matshow(numpy.reshape(digits.data[i], (8, 8)), cmap='gray')\n\n plt.show()\n\n\ndef train_model():\n data = load_digits()\n num_trials = 10\n train_percentages = range(5, 95, 5)\n print(train_percentages)\n test_accuracies = []\n for i in train_percentages:\n test_accuracy = 0\n total = 0\n avg = 0\n for j in range(0, 20):\n data = load_digits()\n X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size= i/100)\n model = LogisticRegression(C=10**-15)\n model.fit(X_train, y_train)\n train_accuracy = model.score(X_train, y_train)\n test_accuracy = model.score(X_test, y_test)\n total = test_accuracy + total\n avg += 1\n clean = total/avg\n print(clean)\n plotdata = numpy.append(test_accuracies, clean)\n test_accuracies = plotdata\n\n\n fig = plt.figure()\n plt.plot(train_percentages, plotdata)\n plt.xlabel('Percentage of Data Used for Training')\n plt.ylabel('Accuracy on Test Set')\n plt.show()\n\n\nif __name__ == \"__main__\":\n # Feel free to comment/uncomment as needed\n #display_digits()\n train_model()\n","sub_path":"learning_curve.py","file_name":"learning_curve.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"274360911","text":"# Let's play with the *args pattern.\n# Create a function named multiply that takes any number of arguments. \n# Return the product (multiplied value) of all of the supplied arguments. \n# The type of argument shouldn't matter.\n\ndef multiply(*args):\n product = 1\n for arg in args:\n product *= arg\n return product\n\nprint(multiply())","sub_path":"tuple.py","file_name":"tuple.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"304154983","text":"import unittest\nfrom main import main\n\nclass TestExtraTask(unittest.TestCase):\n \"\"\"\n Тестирование дополнительного задания\n \"\"\"\n\n def setUp(self):\n ''' Ответы '''\n self.answers = {\n 0: 4.000000,\n 1: 5.500000,\n 2: 3.538095,\n }\n\n def test_all_together(self):\n ''' Тестирование всех файлов '''\n for i in range(0, 3):\n filename = f'input_{i}.txt'\n with self.subTest(file=filename):\n self.assertEqual(self.answers[i], main(filename))\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"extra_task/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"154120056","text":"from django.urls import path\nfrom crud import views\nurlpatterns = [\n path('addstudent/',views.addstudent,name='add'),\n path('display/',views.display,name='display'),\n path('update/',views.update,name=\"update\"),\n path('delete/',views.delete,name='delete'),\n\n\n\n]\n","sub_path":"TOT-Django/MyProject/crud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"39859584","text":"import requests\nfrom scrapy import signals\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.signalmanager import dispatcher\nfrom scrapy.exporters import PythonItemExporter\nfrom tripadvisor_restaurants.tripadvisor_restaurants.spiders.restaurant_spider import RestaurantSpider\n\n\ndef get_link_for_tripadvisor(latitude, longitude):\n link = f\"https://www.tripadvisor.ru/MobileNearbyRestaurants?j=distLow&latitude={latitude}&longitude={longitude}\"\n resp = requests.get(link)\n return resp.url\n\ndef spider_handler(latitude, longitude, max_number, q):\n link = get_link_for_tripadvisor(latitude, longitude)\n output = []\n _exporter = PythonItemExporter(binary=False)\n def get_crawler_output(signal, sender, item, response, spider):\n output.append(_exporter.export_item(item))\n\n dispatcher.connect(get_crawler_output, signal=signals.item_scraped)\n process = CrawlerProcess({\n \"USER_AGENT\": \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)\"\n })\n process.crawl(RestaurantSpider, start_url=link, max_restaurants=max_number)\n process.start()\n q.put(output)\n\nif __name__ == \"__main__\":\n output = spider_handler(\"50.4863685\", \"30.4666481\", 3)\n print(output)","sub_path":"bot_api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"163764543","text":"def return_min_max( fn, row, col, variable ):\n ''' return min/max WRF output T2 data '''\n print('working with {} starting'.format( fn ))\n ds = xr.open_dataset( fn, autoclose=True )\n da = ds[ variable ] # get the array as it is easier to process on\n\n # try to slice it...\n da_pt = da[:,row, col]\n\n #fill the dataframe with min and max value with kelvin to C conversion\n day_min = da_pt.resample(time='D').min() - 273.15\n day_max = da_pt.resample(time='D').max() - 273.15\n\n df = pd.DataFrame( index=day_min.time )\n\n df['min'] = day_min\n df['max'] = day_max\n\n # return dataframe rounded at 2 decimals --> SNAP Temperature standard\n return df.round( 2 )\n\ndef reproject_wgs84_to_wrf( x,y ):\n ''' simple wrapper around pyproj.transform to project the coords between wgs84 latlong and WRF polar grid'''\n import pyproj\n wrf = pyproj.Proj( '+units=m +proj=stere +lat_ts=64.0 +lon_0=-152.0 +lat_0=90.0 +x_0=0 +y_0=0 +a=6370000 +b=6370000 ' )\n wgs = pyproj.Proj( '+units=m +datum=WGS84 +proj=latlong ' )\n return pyproj.transform( wgs, wrf, np.array(x), np.array(y) )\n\ndef affine_from_wrfds( fn ):\n ''' make an affine transform from a template wrf file... '''\n ds = xr.open_dataset( fn, autoclose=True )\n res = 20000\n x0,y0 = np.array( ds.xc.min()-(res/2.)), np.array(ds.yc.max()+(res/2.) )\n ds.close()\n ds = None\n return rasterio.transform.from_origin( x0, y0, res, res )\n\ndef rasterize_shapes( shapes, arr, transform, fill=0, all_touched=False, default_value=1, dtype='float32' ):\n from rasterio.features import rasterize\n return rasterize( geoms, out_shape=arr.shape, fill=fill, out=None, \n transform=transform, all_touched=all_touched, \n default_value=default_value, dtype=dtype )\n\n\nif __name__ == '__main__':\n import xarray as xr\n import os, glob, rasterio\n import numpy as np\n import pandas as pd\n from multiprocessing import Pool\n from functools import partial\n import warnings\n from shapely.geometry import Point\n import geopandas as gpd\n\n # global setup arguments\n variable = 'T2'\n\n path = '/workspace/Shared/Tech_Projects/wrf_data/project_data/wrf_data/hourly_fix/{}'.format( variable.lower() )\n output_path = '/workspace/Shared/Tech_Projects/DOD_Ft_Wainwright/project_data/wrf_data_app'\n warnings.filterwarnings( 'ignore' ) # so we dont have to look at xarray warnings about pd.TimeGrouper\n\n location = {\n 'Fairbanks' : ( -147.716, 64.8378 ),\n 'Greely' : ( -145.6076, 63.8858 ),\n 'Whitehorse' : ( -135.074, 60.727 ),\n 'Coldfoot' : ( -150.1772 , 67.2524 )\n }\n\n # reproject the points to the 3338...\n location = { i:Point(j) for i,j in location.items() }\n df = pd.Series( location ).to_frame( 'geometry' )\n wrf_crs = '+units=m +proj=stere +lat_ts=64.0 +lon_0=-152.0 +lat_0=90.0 +x_0=0 +y_0=0 +a=6370000 +b=6370000 '\n pts_proj = gpd.GeoDataFrame(df, crs={'init':'epsg:4326'}).to_crs( wrf_crs )\n\n # list (and filter) historicals\n wildcard = '*GFDL*historical*.nc'\n historicals = sorted( glob.glob( os.path.join( path, wildcard ) ) )\n historicals = list(filter(lambda x: '_2006' not in x, historicals ))\n\n # list futures \n wildcard = '*GFDL*rcp85*.nc'\n futures = sorted( glob.glob( os.path.join( path, wildcard ) ) )\n\n # append 'em\n files = historicals + futures\n \n # get an affine transform to make the lookups faster\n a = affine_from_wrfds( files[0] )\n arr = xr.open_dataset(files[0]).t2[0].values\n proj = xr.open_dataset(files[0]).proj_parameters\n # loop through the locations for extraction\n for k, pt in pts_proj.geometry.to_dict().items():\n\n # print( k )\n # out_fn = os.path.join( output_path , '{}_WRF_extract_GFDL_1970-2100_{}v3.csv'.format( variable, k ) )\n \n # get row/col from x/y using affine\n col, row = ~a * (pt.x, pt.y)\n col, row = [ int(i) for i in [col, row] ]\n arr[row, col] = -9999\n\n meta = {'driver':'GTiff',\n 'height':arr.shape[0],\n 'width':arr.shape[1],\n 'count':1,\n 'dtype':'float32',\n 'crs':rasterio.crs.CRS.from_string(proj),\n 'compress':'lzw',\n 'transform':a\n\n }\n\n with rasterio.open('/workspace/Shared/Tech_Projects/wrf_data/project_data/wrf_data/TEST_POINTS_LOCATIONS.tif','w', **meta) as out:\n out.write( arr , 1)\n\n\n\n # run in parallel\n pool = Pool( 32 )\n func = partial( return_min_max, row=row, col=col, variable=variable.lower() )\n ls_df = pool.map( func, files )\n pool.close()\n pool.join()\n\n # concat and write to disk\n df = pd.concat( ls_df )\n df = df.sort_index()\n df.to_csv( out_fn )","sub_path":"data/extract_v2_TEMP.py","file_name":"extract_v2_TEMP.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"56524215","text":"#!/usr/bin/env python\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2011-2013, The BIOM Format Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom __future__ import division\nfrom pyqi.core.command import (Command, CommandIn, CommandOut, \n ParameterCollection)\nfrom pyqi.core.exception import CommandError\nfrom biom.parse import MetadataMap\nfrom biom.table import Table\n\n__author__ = \"Greg Caporaso\"\n__copyright__ = \"Copyright 2011-2013, The BIOM Format Development Team\"\n__credits__ = [\"Greg Caporaso\", \"Morgan Langille\", \"Jai Ram Rideout\",\n \"Daniel McDonald\"]\n__license__ = \"BSD\"\n__url__ = \"http://biom-format.org\"\n__maintainer__ = \"Greg Caporaso\"\n__email__ = \"gregcaporaso@gmail.com\"\n\nclass MetadataAdder(Command):\n BriefDescription = \"Add metadata to a BIOM table\"\n LongDescription = (\"Add sample and/or observation metadata to \"\n \"BIOM-formatted files. Detailed usage examples can be \"\n \"found here: http://biom-format.org/documentation/adding_metadata.html\")\n\n CommandIns = ParameterCollection([\n CommandIn(Name='table', DataType=Table,\n Description='the input BIOM table', Required=True),\n # sample_metadata and observation_metadata are currently files (or\n # file-like) because of the existing metadata map / processing function\n # support. Ideally, these two parameters should be MetadataMap\n # instances.\n CommandIn(Name='sample_metadata', DataType=file,\n Description='the sample metadata map (will add sample '\n 'metadata to the input BIOM table, if provided)'),\n CommandIn(Name='observation_metadata', DataType=file,\n Description='the observation metadata map (will add '\n 'observation metadata to the input BIOM table, if '\n 'provided)'),\n CommandIn(Name='sc_separated', DataType=list,\n Description='list of the metadata fields to split on '\n 'semicolons. This is useful for hierarchical data such as '\n 'taxonomy or functional categories'),\n CommandIn(Name='sc_pipe_separated', DataType=list,\n Description='list of the metadata fields to split on '\n 'semicolons and pipes (\"|\"). This is useful for '\n 'hierarchical data such as functional categories with '\n 'one-to-many mappings (e.g. x;y;z|x;y;w)'),\n CommandIn(Name='int_fields', DataType=list,\n Description='list of the metadata fields to cast to '\n 'integers. This is useful for integer data such as '\n '\"DaysSinceStart\"'),\n CommandIn(Name='float_fields', DataType=list,\n Description='list of the metadata fields to cast to '\n 'floating point numbers. This is useful for real number '\n 'data such as \"pH\"'),\n CommandIn(Name='sample_header', DataType=list,\n Description='list of the sample metadata field names. This '\n 'is useful if a header line is not provided with the '\n 'metadata, if you want to rename the fields, or if you want '\n 'to include only the first n fields where n is the number '\n 'of entries provided here',\n DefaultDescription='use header from sample metadata map'),\n CommandIn(Name='observation_header', DataType=list,\n Description='list of the observation metadata field names. '\n 'This is useful if a header line is not provided with the '\n 'metadata, if you want to rename the fields, or if you want '\n 'to include only the first n fields where n is the number '\n 'of entries provided here',\n DefaultDescription='use header from observation metadata '\n 'map')\n ])\n\n CommandOuts = ParameterCollection([\n CommandOut(Name='table', DataType=Table,\n Description='Table with added metadata')\n ])\n\n def run(self, **kwargs):\n table = kwargs['table']\n sample_metadata = kwargs['sample_metadata']\n observation_metadata = kwargs['observation_metadata']\n sc_separated = kwargs['sc_separated']\n sc_pipe_separated = kwargs['sc_pipe_separated']\n int_fields = kwargs['int_fields']\n float_fields = kwargs['float_fields']\n sample_header = kwargs['sample_header']\n observation_header = kwargs['observation_header']\n\n # define metadata processing functions, if any\n process_fns = {}\n if sc_separated is not None:\n process_fns.update(dict.fromkeys(sc_separated,\n self._split_on_semicolons))\n\n if sc_pipe_separated is not None:\n process_fns.update(dict.fromkeys(sc_pipe_separated,\n self._split_on_semicolons_and_pipes))\n\n if int_fields is not None:\n process_fns.update(dict.fromkeys(int_fields, self._int))\n\n if float_fields is not None:\n process_fns.update(dict.fromkeys(float_fields, self._float))\n\n # parse mapping files\n if sample_metadata is not None:\n sample_metadata = MetadataMap.fromFile(sample_metadata,\n process_fns=process_fns,\n header=sample_header)\n\n if observation_metadata is not None:\n observation_metadata = MetadataMap.fromFile(observation_metadata,\n process_fns=process_fns, header=observation_header)\n\n if sample_metadata is None and observation_metadata is None:\n raise CommandError('Must specify sample_metadata and/or '\n 'observation_metadata.')\n\n # NAUGHTY: this is modifying the input table IN PLACE!!! And then\n # RETURNING IT! MetadataAdder is angry!\n\n # add metadata as necessary\n if sample_metadata:\n table.addSampleMetadata(sample_metadata)\n\n if observation_metadata:\n table.addObservationMetadata(observation_metadata)\n\n return {'table': table}\n\n def _split_on_semicolons(self, x):\n return [e.strip() for e in x.split(';')]\n\n def _split_on_semicolons_and_pipes(self, x):\n return [[e.strip() for e in y.split(';')] for y in x.split('|')]\n\n def _int(self, x):\n try:\n return int(x)\n except ValueError:\n return x\n\n def _float(self, x):\n try:\n return float(x)\n except ValueError:\n return x\n\nCommandConstructor = MetadataAdder\n","sub_path":"biom/commands/metadata_adder.py","file_name":"metadata_adder.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"517829098","text":"__author__ = 'kurre'\n\nimport os\nimport functools\nimport nuke\nfrom nukescripts.panels import PythonPanel\nfrom PySide import QtCore\nfrom PySide import QtGui\n\nfrom animaNukeTools import *\nimport animaBaseAsset\n\n\nclass SetPresetPanel(PythonPanel):\n def __init__( self, node ):\n PythonPanel.__init__(self, 'Presets')\n class_name = node.Class()\n preset_path = \"/\".join( [ os.environ['ANIMA_PROJECT_ROOT'], 'tools', 'presets_nuke', class_name ] )\n presets = os.listdir( preset_path )\n self.enumKnob = nuke.Enumeration_Knob('presets', 'Presets', presets )\n self.addKnob( self.enumKnob )\n\n\nclass SavePresetPanel(PythonPanel):\n def __init__( self ):\n PythonPanel.__init__(self, 'Presets')\n self.textKnob = nuke.String_Knob('preset_name', 'Preset name' )\n self.addKnob( self.textKnob )\n\n\nclass OpenWorkfileDialog( QtGui.QDialog ):\n def __init__( self, parent=None ):\n super( OpenWorkfileDialog, self ).__init__( parent=parent )\n self.assets = []\n self.base_layout = QtGui.QVBoxLayout( self )\n self.season_layout = QtGui.QHBoxLayout( self )\n self.episode_layout = QtGui.QHBoxLayout( self )\n self.task_layout = QtGui.QHBoxLayout( self )\n self.button_layout = QtGui.QHBoxLayout( self )\n\n seasonLabel = QtGui.QLabel(\"Season\")\n self.seasonCombo = QtGui.QComboBox()\n self.seasons = self.get_seasons_and_episodes()\n s_list = []\n for s in self.seasons:\n s_list.append( s )\n s_list = sorted( s_list )\n for s in s_list:\n self.seasonCombo.addItem( s )\n self.season_layout.addWidget( seasonLabel )\n self.season_layout.addWidget( self.seasonCombo )\n episodeLabel = QtGui.QLabel(\"Episode\")\n self.episodeCombo = QtGui.QComboBox()\n self.episode_layout.addWidget( episodeLabel )\n self.episode_layout.addWidget( self.episodeCombo )\n\n taskLabel = QtGui.QLabel(\"Task\")\n self.taskCombo = QtGui.QComboBox()\n self.taskCombo.addItem('compositing')\n self.taskCombo.addItem('compositing-keyshot')\n self.task_layout.addWidget( taskLabel )\n self.task_layout.addWidget( self.taskCombo )\n\n self.file_list = QtGui.QListWidget()\n self.onSeasonChanged()\n self.taskCombo.currentIndexChanged.connect( self.onUpdateFiles )\n self.seasonCombo.currentIndexChanged.connect( self.onSeasonChanged )\n self.episodeCombo.currentIndexChanged.connect( self.onUpdateFiles )\n self.onUpdateFiles()\n\n self.ok_button = QtGui.QPushButton( \"Open selected\" )\n self.ok_button.clicked.connect( self.onOpen )\n self.button_layout.addWidget( self.ok_button )\n self.cancel_button = QtGui.QPushButton( \"Cancel\" )\n self.cancel_button.clicked.connect( self.reject )\n self.button_layout.addWidget( self.cancel_button )\n\n self.base_layout.addLayout( self.season_layout )\n self.base_layout.addLayout( self.episode_layout )\n self.base_layout.addLayout( self.task_layout )\n self.base_layout.addWidget( self.file_list )\n self.base_layout.addLayout( self.button_layout )\n self.setLayout( self.base_layout )\n\n def onOpen(self):\n selected = self.file_list.currentItem()\n if selected:\n file = \"/\".join( [self.getBrowsePath(), selected.text(), selected.text() + '.nk' ] )\n nuke.scriptOpen( file )\n self.accept()\n\n def onUpdateFiles(self):\n files = self.getFileList()\n self.file_list.clear()\n for f in files:\n self.file_list.addItem(f)\n\n def onSeasonChanged( self ):\n self.episodeCombo.clear()\n season = self.seasonCombo.currentText()\n for ep in self.seasons[season]:\n self.episodeCombo.addItem( ep )\n\n def getBrowsePath(self):\n season = self.seasonCombo.currentText()\n episode = self.episodeCombo.currentText()\n sequence = 'sq0010'\n task = self.taskCombo.currentText()\n root_path = '/'.join( [os.environ['ANIMA_PROJECT_ROOT'], 'work', task, season, episode, sequence ] )\n return root_path\n\n def getFileList(self):\n root_path = self.getBrowsePath()\n if os.path.exists( root_path ):\n dirs = os.listdir( root_path )\n return sorted( dirs )\n return []\n\n def get_seasons_and_episodes(self):\n root_path = '/'.join( [os.environ['ANIMA_PROJECT_ROOT'], 'assets', 'shots'] )\n seasons = {}\n if os.path.exists( root_path ):\n for dir in os.listdir( root_path ):\n s_path = root_path + \"/\" + dir\n if os.path.isdir( s_path ):\n episodes = []\n for ep in os.listdir( s_path ):\n e_path = s_path + \"/\" + ep\n if os.path.isdir( e_path ):\n episodes.append( ep )\n seasons[ dir ] = sorted( episodes )\n return seasons\n\n\nclass ManageVersionsDialog( QtGui.QDialog ):\n def __init__( self, node=None, parent=None ):\n super( ManageVersionsDialog, self ).__init__( parent )\n self.assets = []\n self.widgets = []\n self.node = node\n self.widths = (50,75,80)\n self.base_layout = QtGui.QVBoxLayout()\n self.label_layout = QtGui.QHBoxLayout()\n self.button_layout = QtGui.QHBoxLayout()\n self.version_layout = QtGui.QVBoxLayout()\n\n self.all_version_layout = QtGui.QHBoxLayout()\n shot_widget = QtGui.QLabel( \"\" )\n shot_widget.setFixedWidth( self.widths[0] )\n new_version_button = QtGui.QPushButton(\"New version for all\")\n new_version_button.setFixedWidth( self.widths[1] + self.widths[2] )\n new_version_button.clicked.connect( self.onNewVersionForAll )\n self.all_version_layout.addWidget( shot_widget )\n self.all_version_layout.addWidget( new_version_button )\n\n shotLabel = QtGui.QLabel(\"Shot\")\n shotLabel.setFixedWidth( self.widths[0] )\n versionLabel = QtGui.QLabel(\"Version\")\n versionLabel.setFixedWidth( self.widths[1] )\n emptyLabel = QtGui.QLabel(\" \")\n emptyLabel.setFixedWidth( self.widths[2] )\n self.label_layout.addWidget( shotLabel )\n self.label_layout.addWidget( versionLabel )\n self.label_layout.addWidget( emptyLabel )\n self.base_layout.addWidget( QtGui.QLabel(\"Manage versions\") )\n self.base_layout.addLayout( self.all_version_layout )\n self.base_layout.addLayout( self.label_layout )\n self.base_layout.addLayout( self.version_layout )\n\n self.ok_button = QtGui.QPushButton( \"Done\" )\n self.ok_button.clicked.connect( self.onDone )\n self.button_layout.addWidget( self.ok_button )\n self.base_layout.addLayout( self.button_layout )\n\n self.setLayout( self.base_layout )\n self.buildVersionUI()\n\n def onDone( self ):\n self.accept()\n\n def clearWidgets( self ):\n child = self.version_layout.takeAt(0)\n while child:\n layout_child = child.takeAt(0)\n while layout_child:\n widget = layout_child.widget()\n widget.setParent(None)\n del widget\n del layout_child\n layout_child = child.takeAt(0)\n del child\n child = self.version_layout.takeAt(0)\n\n def buildVersionUI(self):\n self.clearWidgets()\n self.assets = []\n anima_seq = get_anima_sequence()\n if anima_seq:\n for shot in anima_seq.shots:\n self.assets.append( shot )\n version = shot.get_version( asset_type='comp' )\n if not version:\n version = \"- no version -\"\n else:\n version = 'v'+str(version).zfill(3)\n layout = QtGui.QHBoxLayout()\n shot_widget = QtGui.QLabel( shot.asset.assetContainerName() )\n shot_widget.setFixedWidth( self.widths[0] )\n version_widget = QtGui.QLabel( version )\n version_widget.setFixedWidth( self.widths[1] )\n new_version_button = QtGui.QPushButton(\"New version\")\n new_version_button.setFixedWidth( self.widths[2] )\n action = functools.partial( self.onNewVersion, shot )\n new_version_button.clicked.connect( action )\n layout.addWidget( shot_widget )\n layout.addWidget( version_widget )\n layout.addWidget( new_version_button )\n self.version_layout.addLayout( layout )\n\n def makeNewVersionPaths(self, shot):\n asset = animaBaseAsset.BaseAsset.fromDct( shot.asset.toDct() )\n asset.setAssetType( 'comp' )\n new_version = asset.getVersions(new=True)[0]\n asset.setAssetVersion( new_version )\n paths = [ os.path.dirname( asset.assetDataPath( assetDataType='.exr', assetDataSequential=True ) ),\n os.path.dirname( asset.assetDataPath( assetDataType='.jpg', assetDataSequential=True ) ),\n os.path.dirname( asset.assetDataPath( assetManifestation='charAlpha', assetDataType='.exr', assetDataSequential=True ) ),\n os.path.dirname( asset.assetDataPath( assetManifestation='envAlpha', assetDataType='.exr', assetDataSequential=True ) ),\n os.path.dirname( asset.assetDataPath( assetManifestation='depth', assetDataType='.exr', assetDataSequential=True ) ) ]\n make_dirs( paths )\n\n def onNewVersion(self, shot):\n self.makeNewVersionPaths( shot )\n self.buildVersionUI()\n\n def onNewVersionForAll( self ):\n for shot_asset in self.assets:\n self.makeNewVersionPaths( shot_asset )\n self.buildVersionUI()\n\n\nclass CheckFramesDialog( QtGui.QDialog ):\n def __init__( self, node=None, parent=None ):\n super( CheckFramesDialog, self ).__init__( parent )\n self.base_layout = QtGui.QVBoxLayout()\n self.button_layout = QtGui.QHBoxLayout()\n\n variant_label = QtGui.QLabel( \"Variant:\" )\n self.variant_line = QtGui.QLineEdit()\n self.variant_line.setText('characters,location')\n size_label = QtGui.QLabel( \"File size threshold:\" )\n self.size_line = QtGui.QLineEdit()\n self.size_line.setText('100')\n self.result_box = QtGui.QTextEdit()\n self.result_box.setMinimumWidth( 640 )\n self.check_button = QtGui.QPushButton( \"Check files\" )\n self.check_button.clicked.connect( self.onCheckFiles )\n self.base_layout.addWidget( variant_label )\n self.base_layout.addWidget( self.variant_line )\n self.base_layout.addWidget( size_label )\n self.base_layout.addWidget( self.size_line )\n self.base_layout.addWidget( self.check_button )\n self.base_layout.addWidget( self.result_box )\n\n self.ok_button = QtGui.QPushButton( \"Close\" )\n self.ok_button.clicked.connect( self.onDone )\n self.button_layout.addWidget( self.ok_button )\n self.base_layout.addLayout( self.button_layout )\n\n self.setLayout( self.base_layout )\n\n def onDone( self ):\n self.accept()\n\n def onCheckFiles( self ):\n self.result_box.setText('')\n variant = self.variant_line.text()\n variants = variant.split(',')\n size = self.size_line.text()\n text = \"Checking frames...
\\n\"\n for variant in variants:\n variant = variant.strip()\n result = check_frames( variant=variant, min_size=int(size) )\n if result:\n for res in result:\n text += \"--------------------------
\\n\" \\\n + \"Shot: \" + str(res) + \"
\\n\" \\\n + \"Variant: \" + str(variant) + \"
\\n\" \\\n + \"Version: \" + str(result[res][0]) + \"
\\n\" \\\n + \"Path: \" + str(result[res][1]) + \"
\\n\" \\\n + \"Missing frames: \" + ','.join( \"'{0}'\".format(n) for n in result[res][2]) + \"
\\n\" \\\n + \"Missing frames as nuke frames: \" + ','.join( \"'{0}'\".format(n) for n in result[res][3]) + \"
\\n\" \\\n + \"Suspiciously small frames: \" + ','.join( \"'{0}'\".format(n) for n in result[res][4]) + \"
\\n\" \\\n + \"
\\n\"\n self.result_box.setHtml( text )\n","sub_path":"python/animaNukeDialogs.py","file_name":"animaNukeDialogs.py","file_ext":"py","file_size_in_byte":12508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"544733583","text":"\"\"\"\nCreated on Monday, Oct. 8, 2019\n\n@author: whyang\n\"\"\"\n# -*- coding: utf-8 -*-\n\n###\n# declare needed packages\n#\nimport pandas as pd\nfrom flask import Flask\nfrom flask_restplus import Api, Resource, fields, marshal\nfrom server.restAPI import server # REST UI's server for popvar (population variation)\nfrom models.popvar_model import * # REST UI's data model(schema) for popvar (population variation)\n\n###\n# initiate REST UI's server and API modules\n#\napp, api = server.app, server.api\n\n###\n# API structure of the enquiry for indigenous peoples population and its variation w.r.t. CIP's datasets\n# URL of API: \n# cip_pop_var 1.0 ----- tribe ----- 100_102 # query of population w.r.t tribe\n# ----- 100_103\n# ----- 100_107\n# ----- 102_103\n# ----- 102_107\n# ----- 103_107\n# cip_pop_var 1.0 ----- area ----- 100_102 # query of population w.r.t. each city and county\n# ----- 100_103\n# ----- 100_107\n# ----- 102_103\n# ----- 102_107\n# ----- 103_107\n# URL of API on Swagger UI: cip_pop_var/api\n# base URL of API: cip_pop_var/1.0/\n# adopt namespace in the api of flask to practice the above cip_pop_var API structure \n#\n_ns_tribe = api.namespace('tribe', description='原住民族人口變化(100-107): 族')\n_ns_area = api.namespace('area', description='原住民族人口變化(100-107): 縣市')\n\n\n#######################################################################################\n# declare functions\n#######################################################################################\n###\n# remove leading and trailing characters of each value across all cells in dataframe\n#\ndef trim_all_cells(df):\n # trim whitespace from ends of each value across all series in dataframe\n trim_strings = lambda x: x.strip() if isinstance(x, str) else x\n return df.applymap(trim_strings)\n\n###\n# make sure the queried tribe is existed\n#\ndef abort_if_tribe_doesnt_exist(tribe):\n if tribe not in TRIBES:\n api.abort(404, \"找不到該原住民族的資料\".format(tribe))\n###\n# make sure the queried area (city or county) is existed\n#\ndef abort_if_area_doesnt_exist(area):\n if area not in AREAS:\n api.abort(404, \"找不到該縣市的資料\".format(tribe))\n \n#######################################################################################\n# url routing for the /tribe and /area \n#######################################################################################\n\n#parser = api.parser()\n#parser.add_argument('tribe', type=str, required=True, help='原住民族族名(例如,阿美族)', location='form')\n\n###\n# the period of the years 100 to 107\n# \n###\n# /tribe/100_107 query all tribes' variation info. of the indigenous population \n# 所有原住民族在各縣市之人口數變化'\n#\n@_ns_tribe.route('/100_107/')\nclass TribeList_100_107(Resource):\n @_ns_tribe.marshal_with(var_tribe_list_model_107) # declare corresponded data model(schema) for response \n @_ns_tribe.doc(description='目前原住民族人口統計資料集內容,可以查詢到的所有原住民族有: {0}'.format(', '.join(TRIBES.keys())))\n @_ns_tribe.doc(\n responses={\n 200: 'Success',\n 400: 'Validation Error'\n }\n )\n def get(self):\n '''查詢各縣市(區域)內,每個原住民族人口的年度變動數量 (100年~107年)''' \n ###\n # read the data between the year of 100 and 107\n #\n with open('..\\\\data\\\\population-var-100-107.csv', 'r', encoding='utf-8', newline='') as csvfile:\n df_100_107 = pd.read_csv(\n csvfile,\n header = 0,\n usecols = ['區域別',\n #usecols = ['日期區間', '區域別', '總計',\n '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族',\n '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族',\n '拉阿魯哇族', '卡那卡那富族', '尚未申報'],\n index_col = '區域別', # indexing as the names of city or county in AREAS\n verbose = True,\n skip_blank_lines = True)\n df_100_107 = trim_all_cells(df_100_107) # trim whitespace from each cell in dataframe\n \n ###\n # transpose dataframe to coincide to the data model of response\n #\n df = df_100_107.T # transpose()\n \n ###\n # add on tribe names w.r.t. '族別' column\n #\n df['族別'] = ['阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', \n '賽夏族', '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', \n '賽德克族','拉阿魯哇族', '卡那卡那富族', '尚未申報']\n \n ###\n # arrange responded values w.r.t. the data model of response\n # row_1 (column_1_name: cell_value,..., column_n_name: cell_value),...,row_n (...)\n # area = city/county name, tribe_name = value, ...\n #\n df1 = df.to_dict('records') # arrange responded values w.r.t. the data model of response\n \n ###\n # send out response\n #\n return df1 \n\n###\n# /tribe/100_107/ query one specific tribe's variation info. of the indigenous population \n#\n@_ns_tribe.route('/100_107/')\n@_ns_tribe.doc(responses={404: '找不到該原住民族的資料'}, params={'tribe': '原住民族族名'})\nclass Tribe_100_107(Resource):\n @_ns_tribe.marshal_with(var_tribe_model) # declare corresponded data model(schema) for response \n @_ns_tribe.doc(description='目前原住民族人口統計資料集內容,可以提供查詢的原住民族有: {0}'.format(', '.join(TRIBES.keys())))\n @_ns_tribe.doc(\n responses={\n 200: 'Success',\n 400: 'Validation Error'\n }\n )\n def post(self, tribe): #get(self, tribe):\n '''查詢某一個特定原住民族(如阿美族),在各縣市(區域)人口的年度變動數量 (100年~107年)'''\n ###\n # inspect the correctness of the queried tribe's name\n #\n abort_if_tribe_doesnt_exist(tribe)\n \n ###\n # read the data between the year of 100 and 107\n # \n with open('..\\\\data\\\\population-var-100-107.csv', 'r', encoding='utf-8', newline='') as csvfile:\n df_100_107 = pd.read_csv(\n csvfile,\n header = 0,\n usecols = ['區域別', tribe], # only one column for the queried tribe\n #usecols = ['日期區間', '區域別', '總計',\n #'阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族',\n #'雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族',\n #'拉阿魯哇族', '卡那卡那富族', '尚未申報'],\n verbose = True,\n skip_blank_lines = True)\n df_100_107 = trim_all_cells(df_100_107) # trim whitespace from each cell in dataframe\n \n ###\n # map the dataframe's columns of '區域別' and tribe_name into the names w.r.t. the used data model(schema)\n #\n df_100_107.rename({'區域別': 'area', tribe: 'amount'}, axis='columns', inplace=True) # axis=1\n\n ###\n # arrange responded values w.r.t. the data model of response\n # row_1 (area: cell_value('區域別'), amount: cell_value(tribe)),...,row_n()\n # area = city/county name, amount = pop_var of the tribe in the city/county\n # \n df = df_100_107.to_dict('records')\n \n ###\n # send out response\n #\n return df\n\n###\n# /area/100_107 query the tribes' variation info. of the indigenous population in the area (city/county) \n# 各縣市(區域)內之原住民族人口數變化'\n#\n@_ns_area.route('/100_107/')\nclass AreaList_100_107(Resource):\n @_ns_area.marshal_with(var_area_list_model) # declare corresponded data model(schema) for response \n @_ns_area.doc(description='目前原住民族人口統計資料集內容,可以查詢的縣市有: {0}'.format(', '.join(AREAS.keys())))\n @_ns_area.doc(\n responses={\n 200: 'Success',\n 400: 'Validation Error'\n }\n )\n def get(self):\n '''查詢原住民族人口統計數據,在各縣市���年度變動數量 (100年~107年)''' \n ###\n # read the data between the year of 100 and 107\n #\n with open('..\\\\data\\\\population-var-100-107.csv', 'r', encoding='utf-8', newline='') as csvfile:\n df_100_107 = pd.read_csv(\n csvfile,\n header = 0,\n usecols = ['區域別','總計',\n #usecols = ['日期區間', '區域別', '總計',\n '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族',\n '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族',\n '拉阿魯哇族', '卡那卡那富族', '尚未申報'],\n #index_col = '區域別', # skip to index with any column\n verbose = True,\n skip_blank_lines = True)\n df_100_107 = trim_all_cells(df_100_107) # trim whitespace from each cell in dataframe\n\n ###\n # arrange responded values w.r.t. the data model of response\n # row_1 (column_1_name: cell_value,..., column_n_name: cell_value),...,row_n (...)\n # 區域別 = city/county name, 總計 = amount, tribe_name = value, ...\n #\n df1 = df_100_107.to_dict('records')\n \n ###\n # send out response\n #\n return df1\n\n###\n# /area/100_107/ query the tribes' variation info. of the indigenous population in the specific area (city/county) \n#\n@_ns_area.route('/100_107/')\n@_ns_area.doc(responses={404: '找不到該縣市的資料'}, params={'area': '縣市名稱'})\nclass Area_100_107(Resource):\n @_ns_area.marshal_with(var_area_model) # declare corresponded data model(schema) for response\n @_ns_area.doc(description='目前原住民族人口統計資料集內容,可以提供查詢的區域(縣市)有: {0}'.format(', '.join(AREAS.keys())))\n @_ns_area.doc(\n responses={\n 200: 'Success',\n 400: 'Validation Error'\n }\n )\n def post(self, area): #get(self, area):\n '''查詢某一個縣市(如新北市)內,各原住民族人口的年度變動數量 (100年~107年)'''\n ###\n # inspect the correctness of the queried area's name\n #\n abort_if_area_doesnt_exist(area)\n \n ###\n # read the data between the year of 100 and 107\n # \n with open('..\\\\data\\\\population-var-100-107.csv', 'r', encoding='utf-8', newline='') as csvfile:\n df_100_107 = pd.read_csv(\n csvfile,\n header = 0,\n usecols = ['區域別',\n #usecols = ['日期區間', '區域別', '總計',\n '阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族',\n '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族',\n '拉阿魯哇族', '卡那卡那富族', '尚未申報'],\n #index_col = '區域別', # skip to index with any column\n verbose = True,\n skip_blank_lines = True)\n df_100_107 = trim_all_cells(df_100_107) # trim whitespace from each cell in dataframe\n\n ###\n # to deal with two counties' names (桃園縣 and 桃園市)\n # in the year of 100~102 and 102~103, 桃園縣 is collected in the dataset (while 桃園市 is not included because it is 省轄市)\n # however, 桃園市(直轄市) contains the number of the both of 桃園縣 and 桃園市(省轄市) in the year of 103~107\n #\n if (area == '桃園縣'):\n area = '桃園市'\n # unify to use '臺' instead of '台' \n if (area == '台北市'): \n area = '臺北市'\n elif (area == '台中市'): \n area = '臺中市' \n elif (area == '台南市'): \n area = '臺南市' \n elif (area == '台東縣'): \n area = '臺東縣' \n\n ###\n # to screen out the number w.r.t. each tribe of the specific city/country\n # \n filter = df_100_107['區域別'] == area # filter context in terms of the name of the city/county\n # construct the columns of the output response structure\n y_100_107_col= ['阿美族', '泰雅族', '排灣族', '布農族', '魯凱族', '卑南族', '鄒族', '賽夏族',\n '雅美族', '邵族', '噶瑪蘭族', '太魯閣族', '撒奇萊雅族', '賽德克族',\n '拉阿魯哇族', '卡那卡那富族', '尚未申報']\n # construct the row of the output response structure\n y_100_107 = [df_100_107.loc[filter, '阿美族'].values[0],\n df_100_107.loc[filter, '泰雅族'].values[0],\n df_100_107.loc[filter, '排灣族'].values[0],\n df_100_107.loc[filter, '布農族'].values[0],\n df_100_107.loc[filter, '魯凱族'].values[0],\n df_100_107.loc[filter, '卑南族'].values[0],\n df_100_107.loc[filter, '鄒族'].values[0],\n df_100_107.loc[filter, '賽夏族'].values[0],\n df_100_107.loc[filter, '雅美族'].values[0],\n df_100_107.loc[filter, '邵族'].values[0],\n df_100_107.loc[filter, '噶瑪蘭族'].values[0],\n df_100_107.loc[filter, '太魯閣族'].values[0],\n df_100_107.loc[filter, '撒奇萊雅族'].values[0],\n df_100_107.loc[filter, '賽德克族'].values[0],\n df_100_107.loc[filter, '拉阿魯哇族'].values[0],\n df_100_107.loc[filter, '卡那卡那富族'].values[0],\n df_100_107.loc[filter, '尚未申報'].values[0]]\n ###\n # map the dataframe's columns of 'tribe' and 'amount' into the above response structure of column and row w.r.t. the data model(schema)\n # \n dict = {\"tribe\": y_100_107_col, \"amount\": y_100_107}\n df = pd.DataFrame(dict)\n\n ###\n # arrange responded values w.r.t. the data model of response\n # row_1 (tribe: cell_value, amount: cell_value(tribe)),...,row_n()\n # tribe = tribe's name, amount = pop_var of the tribe in the city/county\n # \n df1 =df.to_dict('records')\n \n ###\n # send out response\n #\n return df1 \n","sub_path":"restui/resources/popvar_resource_100_107.py","file_name":"popvar_resource_100_107.py","file_ext":"py","file_size_in_byte":15948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"319390454","text":"# -*- coding: utf-8 -*-\n#\n# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.\n# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.\n# Licensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n# an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n#\nimport mock\nimport pytest\n\nfrom backend.apps.templatesets.release.generator import generator\nfrom backend.apps.templatesets.release.generator.res_context import ResContext\nfrom backend.tests.bcs_mocks.misc import FakePaaSCCMod\nfrom backend.utils.basic import getitems\n\npytestmark = pytest.mark.django_db\n\n\n@pytest.fixture(autouse=True)\ndef use_dummy_settings_config(settings):\n settings.DEVOPS_ARTIFACTORY_HOST = \"http://harbor-api.service.consul\"\n\n\nclass TestReleaseDataGenerator:\n def test_form_generator(self, bk_user, cluster_id, form_template, form_version_entity, form_show_version):\n instance_entity = {res_name: ids.split(',') for res_name, ids in form_version_entity.resource_entity.items()}\n\n namespace = 'test'\n context = ResContext(\n access_token=bk_user.token.access_token,\n username=bk_user.username,\n cluster_id=cluster_id,\n project_id=form_template.project_id,\n namespace=namespace,\n template=form_template,\n show_version=form_show_version,\n instance_entity=instance_entity,\n extras={'namespace_id': 1},\n )\n\n with mock.patch(\n 'backend.apps.templatesets.release.generator.form_mode.get_ns_variable', return_value=(False, '1.12.3', {})\n ), mock.patch('backend.apps.instance.generator.paas_cc', new=FakePaaSCCMod()):\n\n data_generator = generator.ReleaseDataGenerator(name=\"nginx\", res_ctx=context)\n release_data = data_generator.generate()\n\n for res in release_data.resource_list:\n assert res.name == getitems(res.manifest, 'metadata.name')\n assert res.kind == getitems(res.manifest, 'kind')\n assert res.namespace == getitems(res.manifest, 'metadata.namespace')\n assert res.version == form_show_version.name\n assert getitems(res.manifest, 'webCache') is None\n assert 'io.tencent.bcs.cluster' in getitems(res.manifest, 'metadata.annotations')\n\n if res.kind == 'Service':\n assert getitems(res.manifest, 'spec.type') == 'ClusterIP'\n\n def test_yaml_generator(self, bk_user, cluster_id, yaml_template, yaml_version_entity, yaml_show_version):\n instance_entity = {res_name: ids.split(',') for res_name, ids in yaml_version_entity.resource_entity.items()}\n namespace = 'test'\n context = ResContext(\n access_token=bk_user.token.access_token,\n username=bk_user.username,\n cluster_id=cluster_id,\n project_id=yaml_template.project_id,\n namespace=namespace,\n template=yaml_template,\n show_version=yaml_show_version,\n instance_entity=instance_entity,\n extras={'namespace_id': 1},\n )\n\n with mock.patch('backend.bcs_k8s.app.bcs_info_provider.paas_cc', new=FakePaaSCCMod()), mock.patch(\n 'backend.bcs_k8s.helm.bcs_variable.paas_cc', new=FakePaaSCCMod()\n ):\n data_generator = generator.ReleaseDataGenerator(name=\"gw\", res_ctx=context)\n release_data = data_generator.generate()\n\n assert len(release_data.resource_list) == 4\n\n for res in release_data.resource_list:\n assert res.name == getitems(res.manifest, 'metadata.name')\n assert res.kind == getitems(res.manifest, 'kind')\n assert res.namespace == getitems(res.manifest, 'metadata.namespace')\n assert res.version == yaml_show_version.name\n\n if res.kind == 'Endpoints':\n assert getitems(res.manifest, 'subsets')[0]['addresses'][0]['ip'] == '0.0.0.1'\n continue\n\n if res.kind == 'Pod':\n assert getitems(res.manifest, 'spec.containers')[0]['image'] == 'redis:5.0.4'\n","sub_path":"bcs-app/backend/tests/apps/templatesets/release/test_generator.py","file_name":"test_generator.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"467670691","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import savgol_filter\nimport sys, time\n\nsys.path.insert(0, \"..\")\nfrom extra.progressbar import progress_bar\nfrom data_reader_NMEA.NMEA_data_reader import ReadNMEAData\nfrom extra.error_calculation_NMA_standard import accuracy_NMEA, filtering_outliers\n\n\ndef recording_data_2018(receiver):\n\n obj = ReadNMEAData()\n if receiver == \"HFS\":\n obj.read_textfile(adress, verbose=False)\n # print(obj.day_year, receiver)\n datapoints_per_day[i], dataline_per_day[i] = obj.datapoints\n N, E, Z = obj.coordinates\n z_n = obj.datapoints[0]\n yeardataHFS_z[i, : len(Z)] = Z\n\n if receiver == \"STE\":\n obj.read_textfile(adress, verbose=False)\n datapoints_per_day[i], dataline_per_day[i] = obj.datapoints\n N, E, Z = obj.coordinates\n z_n = obj.datapoints[0]\n yeardataSTE_z[i, : len(Z)] = Z\n if receiver == \"TRM\":\n obj.read_textfile(adress, verbose=False)\n # print(obj.day_year, receiver)\n datapoints_per_day[i], dataline_per_day[i] = obj.datapoints\n N, E, Z = obj.coordinates\n z_n = obj.datapoints\n yeardataTRM_z[i, : len(Z)] = Z\n return N, E, Z, z_n\n\n\ndef plot_datapoints():\n plt.plot(datapoints_per_day)\n plt.plot(datapoints_per_day, \"*\")\n plt.plot(dataline_per_day)\n plt.legend([\"gps fix\", \"other point\"])\n plt.ylabel(\"datapoints /lines \")\n plt.xlabel(\"time [days]\")\n plt.title(\"datapoints over a full year at \" + receiver)\n plt.show()\n\n\ndef plot_coordinates():\n plt.plot(np.median(yeardataHFS_z, axis=1))\n plt.plot(np.median(yeardataSTE_z, axis=1))\n plt.plot(np.median(yeardataTRM_z, axis=1))\n plt.title(\"z-coordinate read at \" + receiver + \" over 2018\")\n plt.ylabel(\"offset from average [m]\")\n\n plt.show()\n\n\ndef plotting_noise():\n plt.plot(noise_Z_6, label=\"0-6\")\n plt.plot(noise_Z_12, label=\"6-12\")\n plt.plot(noise_Z_18, label=\"12-18\")\n plt.plot(noise_Z_24, label=\"18-24\")\n plt.title(\"z-coordinate noise at \" + receiver + \" over 2018\")\n plt.ylabel(\"sample noise [m]\")\n plt.xlabel(\"days\")\n plt.legend()\n plt.show()\n\n\ndays_in_a_month = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n\nreceiver_stations = [\"HFS\", \"STE\", \"TRM\"]\nnr_days = 365\nyear = \"2018\"\ndatapoints_per_day = np.zeros(nr_days)\ndataline_per_day = np.zeros(nr_days)\n\ndate = []\n\nyeardataHFS_z = np.zeros((nr_days, 84600))\nyeardataSTE_z = np.zeros((nr_days, 84600))\nyeardataTRM_z = np.zeros((nr_days, 84600))\n\nnoise = np.zeros((nr_days))\nnoise_Z_6 = np.zeros(nr_days)\nnoise_Z_12 = np.zeros(nr_days)\nnoise_Z_18 = np.zeros(nr_days)\nnoise_Z_24 = np.zeros(nr_days)\n\nfor i in range(1, 366):\n if len(str(i)) == 1:\n date.append(\"00\" + str(i))\n elif len(str(i)) == 2:\n date.append(\"0\" + str(i))\n else:\n date.append(str(i))\n\nfor receiver in receiver_stations:\n for i in range(len(date)):\n progress_bar(i, len(date))\n # recording_data_2018()\n # adress = \"/run/media/michaelsb/HDD Linux/data/NMEA/2018/\"+date[i]+\"/\"+\\\n # NMEA_M\"+receiver +\"_\"+date[i]+\"0.log\"\n adress = (\n \"/scratch/michaesb/data/NMEA/\"\n + year\n + \"/\"\n + date[i]\n + \"/NMEA_M\"\n + receiver\n + \"_\"\n + date[i]\n + \"0.log\"\n )\n try:\n N, E, Z, z_n = recording_data_2018(receiver)\n # Z,Z_filtered = filtering_outliers(Z)\n # sigma_Z = accuracy_NMEA(Z_filtered-np.median(Z_filtered))\n # sigma_Z_smooth= savgol_filter(sigma_Z,window_length=(5),polyorder=3)\n # noise_Z_6[i] = np.median(sigma_Z_smooth[:int(len(sigma_Z_smooth)/4)])\n # noise_Z_12[i] =np.median(sigma_Z_smooth[int(len(sigma_Z_smooth)/4):int(len(sigma_Z_smooth)/2)])\n # noise_Z_18[i] = np.median(sigma_Z_smooth[int(len(sigma_Z_smooth)/2):int(3*len(sigma_Z_smooth)/4)])\n # noise_Z_24[i] =np.median(sigma_Z_smooth[int(3*len(sigma_Z_smooth)/4):])\n except FileNotFoundError:\n print(\"no\" + receiver + \"file here at day: \" + str(i) + \" year: \" + year)\n print(adress)\n continue\n plot_datapoints()\n plot_coordinates()\n","sub_path":"seasonal_data/monthly_average.py","file_name":"monthly_average.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"341450429","text":"\n\ndef destroy_lifecycle_rule(client, module):\n name = module.params.get('name')\n prefix = module.params.get('prefix')\n rule_id = module.params.get('rule_id')\n changed = False\n if (prefix is None):\n prefix = ''\n try:\n current_lifecycle_rules = client.get_bucket_lifecycle_configuration(Bucket=name)['Rules']\n except ClientError as e:\n if (e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration'):\n current_lifecycle_rules = []\n else:\n module.fail_json_aws(e)\n except BotoCoreError as e:\n module.fail_json_aws(e)\n lifecycle_obj = dict(Rules=[])\n if (rule_id is not None):\n for existing_rule in current_lifecycle_rules:\n if (rule_id == existing_rule['ID']):\n changed = True\n else:\n lifecycle_obj['Rules'].append(existing_rule)\n else:\n for existing_rule in current_lifecycle_rules:\n if (prefix == existing_rule['Filter']['Prefix']):\n changed = True\n else:\n lifecycle_obj['Rules'].append(existing_rule)\n try:\n if lifecycle_obj['Rules']:\n client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_obj)\n elif current_lifecycle_rules:\n changed = True\n client.delete_bucket_lifecycle(Bucket=name)\n except (ClientError, BotoCoreError) as e:\n module.fail_json_aws(e)\n module.exit_json(changed=changed)\n","sub_path":"Data Set/bug-fixing-1/67cf2da2a1a0007915cc99c77c72616e6c29d478--fix.py","file_name":"67cf2da2a1a0007915cc99c77c72616e6c29d478--fix.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"271958014","text":"import json\n\nfrom kafka import KafkaConsumer\n\n\nconsumer = KafkaConsumer('pred_size_alert', \n\t\t\t\t\t\t bootstrap_servers='localhost:9092', \n\t\t\t\t\t\t client_id='pfe2019',\n\t\t\t\t\t\t enable_auto_commit=True,\n\t\t\t\t\t\t auto_commit_interval_ms=1000, # commit every second\n\t\t\t\t\t\t group_id='cascade_size_alert_receiver',\n\t\t\t\t\t\t value_deserializer=lambda x: json.loads(x.decode('utf-8')))\n\nfor alert in consumer:\n\tprint(f\"Received an alert for {alert.value['cascade_idx']}\")\n\tprint(f\"Estimated cascade size: {alert.value['estimated_size']} \\n\")","sub_path":"cascade_size_alert_receiver.py","file_name":"cascade_size_alert_receiver.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"266166832","text":"#coding: utf-8\r\nimport Tkinter as tk\r\nimport csv\r\nimport random\r\nimport time\r\nimport base64\r\nimport os\r\nimport codecs\r\nimport itertools\r\nfrom tkMessageBox import showerror\r\nfrom icon import img\r\n\r\n#Build\r\nbbuild=\" (Build Alpha 0.3.1)\"\r\n\r\nclass Word:\r\n def __init__(self,iid=0,wword=\"\",mean=\"\",sent=\"\",arti=0,book=0,used=1,over=0):\r\n self.id=iid\r\n self.word=wword\r\n self.mean=mean\r\n self.sent=sent\r\n self.arti=arti\r\n self.book=book\r\n self.used=used\r\n self.over=over\r\n def __lt__(lhs,rhs):\r\n return lhs.word0:\r\n id_used.append(tuple(arti_id[i+1]))\r\n for j in range(arti_id[i+1][0],arti_id[i+1][1]):\r\n data.append(rdata[j])\r\n data[ldata].used=1\r\n idmax+=1\r\n ldata+=1\r\n #print(larti[i+1])\r\n #print(\"%d,%d\"%(arti_id[i+1][0],arti_id[i+1][1]))\r\n if (ldata==0):\r\n vartis[0].set(1)\r\n renew()\r\n #print(data[0].word)\r\n #print(len(id_used))\r\n random.shuffle(data)\r\n\r\ndef idmap(iid):\r\n global id_used,data,pprob,idmax,score,rdata,ldata\r\n tid=0\r\n #print(iid)\r\n #print(len(id_used))\r\n for j in range(ldata):\r\n if data[j].used:\r\n tid+=1\r\n else:\r\n continue\r\n if tid==iid:\r\n data[j].used=0\r\n idmax-=1\r\n pprob=data[j]\r\n score+=1\r\n #print(\"(%d,%d)\"%(idmax,ldata))\r\n return\r\n #print(\"idmap:%d\"%(pprob.id))\r\n pprob=rdata[0]\r\n #print(\"!!!\")\r\ndef get_prob():\r\n global idmax,pprob,data,rdata\r\n if (idmax==0):\r\n pprob=rdata[0]\r\n return\r\n iid=idmax\r\n #print(\"get_prob:%d\"%iid)\r\n idmap(iid)\r\n\r\ndef read_setting():\r\n global setting\r\n with open('setting.ini','r') as f:\r\n lines=f.readlines()\r\n if lines[0][:3]==codecs.BOM_UTF8:\r\n lines[0]=lines[0][3:]\r\n for line in lines:\r\n if line.startswith('#'):\r\n continue\r\n if '=' in line:\r\n lt=line.split('=')\r\n setting[lt[0].strip(' ').strip('\\r').strip('\\n')]=lt[1].strip(' ').strip('\\r').strip('\\n')\r\n #print(\"Read %s=%s\"%(lt[0].strip(' ').strip('\\r').strip('\\n'),lt[1].strip(' ').strip('\\r').strip('\\n')))\r\n if ('guisetting' in setting) and (setting['guisetting']!=''):\r\n try:\r\n with open(setting['guisetting'],'r') as f:\r\n lines=f.readlines()\r\n if lines[0][:3]==codecs.BOM_UTF8:\r\n lines[0]=lines[0][3:]\r\n for line in lines:\r\n if line.startswith('#'):\r\n continue\r\n if '=' in line:\r\n lt=line.split('=')\r\n setting[lt[0].strip(' ').strip('\\r').strip('\\n')]=lt[1].strip(' ').strip('\\r').strip('\\n')\r\n except:\r\n pass\r\n \r\ndef read_data():\r\n global setting,rdata,idmax,arti_id,mbook,marti,larti\r\n with open(setting['csv'],'rb') as myFile:\r\n lines=list(csv.reader(myFile))\r\n #print(lines[0])\r\n if lines[0][0][:3]==codecs.BOM_UTF8:\r\n #print(\"BOM\")\r\n lines[0][0]=lines[0][0][3:]\r\n for line in lines:\r\n iid=int(line[0])\r\n line[0]=iid\r\n line[4]=marti[line[4]][0]\r\n line[5]=mbook[line[5]]\r\n rdata[iid]=Word(*line)\r\n artiid=rdata[iid].arti\r\n arti_id[artiid][0]=min(arti_id[artiid][0],iid)\r\n arti_id[artiid][1]=max(arti_id[artiid][1],iid+1)\r\n idmax+=1\r\n #print(\"%d ids.\"%idmax)\r\n for i in range(artis+1):\r\n if (arti_id[i][1]-arti_id[i][0]>0):\r\n id_used.append(tuple(arti_id[i]))\r\n rdata[0].over=1\r\n rdata[0].arti=0\r\n larti[0]=setting['overarti']\r\n rdata[0].book=0\r\n rdata[0].id=0\r\n rdata[0].mean=setting['overmean']\r\n rdata[0].sent=setting['oversent']\r\n rdata[0].word=setting['overword']\r\n\r\ndef set_state(st):\r\n global cka,ckb\r\n if not st:\r\n for i in cka:\r\n i.config(state=tk.DISABLED)\r\n for j in ckb:\r\n j.config(state=tk.DISABLED)\r\n else:\r\n for i in cka:\r\n i.config(state=tk.NORMAL)\r\n for j in ckb:\r\n j.config(state=tk.NORMAL)\r\n\r\n #print(\"state set!\")\r\n\r\ndef restart():\r\n global score,gover,tword,tsent,tans,tfrom,setting,aans,tbnext,tbans,brest,tmult,ltip2\r\n score=0\r\n gover=0\r\n set_state(1)\r\n brest.place_forget()\r\n tword.set(setting['wr'])\r\n tsent.set(setting['sr'])\r\n tfrom.set(setting['fr'])\r\n aans=-1\r\n tbans.set(setting['tbans0'])\r\n tbnext.set(setting['tbnext0'])\r\n ttogo.set('')\r\n tans.set('')\r\n tmult.place(x=600,y=400,anchor='nw')\r\n ltip2.place(x=520,y=395,anchor='nw')\r\n renew()\r\n\r\ndef hit_bans():\r\n global tword,tsent,tans,aans,data,tmult,ldata,idmax,setting\r\n if aans!=-1:\r\n tans.set(aans)\r\n else:\r\n data.sort()\r\n tl=[list(g) for k,g in itertools.groupby(data)]\r\n words=dict([(key.word,list(group)) for key,group in itertools.groupby(data)])\r\n random.shuffle(tl)\r\n data=[]\r\n for i in tl:\r\n data.extend(i)\r\n choi=tmult.get().strip(' ').encode('utf-8')\r\n tmult.delete(0,len(tmult.get()))\r\n if (choi!=''):\r\n if (choi not in words):\r\n showerror(title=setting['errtitle'], message=setting['errtext']%choi)\r\n return\r\n data=words[choi]\r\n ldata=len(data)\r\n #print(data)\r\n idmax=ldata\r\n hit_bnext()\r\ndef hit_bnext():\r\n global tword,tsent,tans,aans,pprob,tfrom,idmax\r\n global tbnext,tbans,setting,score,gover,brest,tmult,ltip2\r\n if gover:\r\n restart()\r\n return\r\n set_state(0)\r\n get_prob()\r\n #print(\"bnext:%d\"%pprob.id)\r\n tword.set(pprob.word)\r\n tsent.set(pprob.sent)\r\n brest.place_forget()\r\n tmult.place_forget()\r\n ltip2.place_forget()\r\n if not pprob.over:\r\n tfrom.set('《'+larti[pprob.arti]+'》')\r\n aans=pprob.mean\r\n gover=0\r\n tbans.set(setting['ansbut'])\r\n tbnext.set(setting['nextbut'])\r\n tans.set('')\r\n brest.place(x=20,y=20,anchor='nw')\r\n ttogo.set(str(idmax))\r\n else:\r\n tfrom.set(larti[pprob.arti])\r\n aans=pprob.mean%score\r\n gover=1\r\n tbans.set(setting['overans'])\r\n tbnext.set(setting['overbut'])\r\n tans.set(setting['overa'])\r\n ttogo.set('')\r\n \r\n\r\ndef draw_main():\r\n global tword,tsent,tans,tfrom,setting,vooks,vartis,aans,labout1,labout2,labout3,labout4,labout5,labout6,labout7,window\r\n global tbnext,tbans,cka,ckb,brest,ttogo,tmult,ltip2\r\n \r\n labout1.pack_forget()\r\n labout2.pack_forget()\r\n labout3.pack_forget()\r\n labout4.pack_forget()\r\n labout5.pack_forget()\r\n labout6.pack_forget()\r\n labout7.pack_forget()\r\n \r\n ckb=[]\r\n cka=[]\r\n lword=tk.Label(window,textvariable=tword,font=(setting['font'],int(setting['bsize'])))\r\n lword.pack()\r\n tword.set(setting['wr'])\r\n #print(\"p1\")\r\n lsent=tk.Label(window,textvariable=tsent,font=(setting['font'],int(setting['bsize'])))\r\n lsent.pack()\r\n #print(\"p2\")\r\n tsent.set(setting['sr'])\r\n lfrom=tk.Label(window,textvariable=tfrom,font=(setting['font'],int(setting['bsize'])))\r\n lfrom.pack()\r\n #print(\"p3\")\r\n tfrom.set(setting['fr'])\r\n aans=-1\r\n tbans.set(setting['tbans0'])\r\n tbnext.set(setting['tbnext0'])\r\n bans=tk.Button(window,textvariable=tbans,command=hit_bans,font=(setting['font'],int(setting['ssize'])))\r\n bans.pack()\r\n lans=tk.Label(window,textvariable=tans,font=(setting['font'],int(setting['bsize'])))\r\n lans.pack()\r\n bnext=tk.Button(window,textvariable=tbnext,command=hit_bnext,font=(setting['font'],int(setting['ssize'])))\r\n bnext.pack()\r\n ltip=tk.Label(window,text=setting['ttip'],font=(setting['font'],int(setting['sssize'])))\r\n ltip.place(x=20,y=500,anchor='nw')\r\n brest=tk.Button(window,text=setting['trest'],command=restart,font=(setting['font'],int(setting['sssize'])))\r\n brest.place_forget()\r\n ttogo=tk.StringVar()\r\n ttogo.set('')\r\n ltogo=tk.Label(window,textvariable=ttogo,font=(setting['font'],int(setting['mssize'])))\r\n ltogo.place(x=850,y=20,anchor='nw')\r\n\r\n \r\n for i in range(6):\r\n vbooks.append(tk.IntVar())\r\n vbooks[i].set(1)\r\n ckb.append(tk.Checkbutton(window, text=lbook[i+1], variable=vbooks[i], onvalue=1, offvalue=0,\r\n command=renew_book))\r\n ckb[i].place(x=20+i/3*50,y=550+i%3*20,anchor='nw')\r\n #print(\"place %d:(%d,%d)\"%(i,50+i/3*50,550+i%3*20))\r\n for i in range(artis):\r\n vartis.append(tk.IntVar())\r\n vartis[i].set(1)\r\n cka.append(tk.Checkbutton(window, text=larti[i+1], variable=vartis[i], onvalue=1, offvalue=0,\r\n command=renew))\r\n cka[i].place(x=120+i/3*135,y=550+i%3*20,anchor='nw')\r\n renew()\r\n ltip2=tk.Label(window,text=setting['tchoose'],font=(setting['font'],int(setting['sssize'])))\r\n ltip2.place(x=520,y=395,anchor='nw')\r\n tmult=tk.Entry(window)\r\n tmult.place(x=600,y=400,anchor='nw')\r\ndef main():\r\n global tword,tsent,tans,tfrom,setting,vooks,vartis,aans,labout1,labout2,labout3,labout4,labout5,labout6,labout7,window\r\n global tbans,tbnext,bbuild\r\n read_setting()\r\n #print(setting)\r\n window=tk.Tk()\r\n #icon\r\n tmp=open(\"tmp.ico\",\"wb+\")\r\n tmp.write(base64.b64decode(img))\r\n tmp.close()\r\n window.iconbitmap(\"tmp.ico\")\r\n os.remove(\"tmp.ico\")\r\n \r\n window.title(setting['title']+bbuild)\r\n window.geometry(setting['size'])\r\n tword=tk.StringVar()\r\n tsent=tk.StringVar()\r\n tans=tk.StringVar()\r\n tfrom=tk.StringVar()\r\n tbans=tk.StringVar()\r\n tbnext=tk.StringVar()\r\n #about\r\n labout1=tk.Label(window,text=setting[\"about1\"],font=(setting['font'],int(setting['bsize'])))\r\n labout1.pack()\r\n labout2=tk.Label(window,text=setting[\"about2\"],font=(setting['font'],int(setting['bsize'])))\r\n labout2.pack()\r\n labout3=tk.Label(window,text=setting[\"about3\"],font=(setting['font'],int(setting['bsize'])))\r\n labout3.pack()\r\n labout4=tk.Label(window,text=setting[\"about4\"],font=(setting['font'],int(setting['bsize'])))\r\n labout4.pack()\r\n labout5=tk.Label(window,text=setting[\"about5\"],font=(setting['font'],int(setting['bsize'])))\r\n labout5.pack()\r\n labout6=tk.Label(window,text=setting[\"about6\"],font=(setting['font'],int(setting['bsize'])))\r\n labout6.pack()\r\n labout7=tk.Label(window,text=setting[\"about7\"],font=(setting['font'],int(setting['bsize'])))\r\n labout7.pack()\r\n \r\n #/about\r\n \r\n read_data()\r\n #for i in data:\r\n #print(\"%d\"%i.id)\r\n window.after(int(setting['startms']),draw_main)\r\n window.mainloop()\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"240230276","text":"#!/user/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n\r\n'''\r\nDescription\r\nGiven an array of integers, remove the duplicate numbers in it.\r\n\r\nYou should:\r\n1. Do it in place in the array.\r\n2. Move the unique numbers to the front of the array.\r\n3. Return the total number of the unique numbers.\r\n\r\nNotice\r\nYou don't need to keep the original order of the integers.\r\n\r\nExample\r\nGiven nums = [1,3,1,4,4,2], you should:\r\n\r\nMove duplicate integers to the tail of nums => nums = [1,3,4,2,?,?].\r\nReturn the number of unique integers in nums => 4.\r\nActually we don't care about what you place in ?, we only care about the part which has no duplicate integers.\r\n\r\nChallenge\r\nDo it in O(n) time complexity.\r\nDo it in O(nlogn) time without extra space.\r\n'''\r\ndef deduplication(nums):\r\n i = 0\r\n j = len(nums) - 1\r\n count = 0\r\n while i < j:\r\n if nums.count(nums[i]) > 1: #如果有多于1个num[i]\r\n nums[i], nums[j] = nums[j], nums[i] #和尾部数据交换\r\n j -= 1 #右指针左移\r\n count += 1 #计数+1\r\n else:\r\n i += 1 #否则,左指针右移\r\n\r\n return len(nums) - count\r\n\r\nif __name__ == '__main__':\r\n nums = [1, 2, 1, 4, 4, 2]\r\n print(deduplication(nums))\r\n","sub_path":"LintCode/3_521.py","file_name":"3_521.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"499436336","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2013-2015 Bluestar Solutions Sàrl ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom math import floor\nfrom re import match\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\n\n\nCONVERSIONS = {\n 'w': lambda x: float(x) * 5.0, # week(s) in days\n 'd': lambda x: float(x), # day(s) in days\n 'h': lambda x: float(x) / 24.0 # hour(s) in days\n}\n\nPATTERNS = None\n\n\nclass duration(fields.float):\n _type = 'duration'\n _symbol_c = '%s'\n\n def _symbol_f(x): # @NoSelf\n return duration.parse_value(x)\n\n _symbol_set = (_symbol_c, _symbol_f)\n\n @staticmethod\n def init_patterns():\n PATTERNS = \"\"\n pattern = r'^\\d{1,}%s$'\n\n conv_keys = CONVERSIONS.viewkeys()\n\n while(len(conv_keys) > 0):\n PATTERNS += (pattern % conv_keys.pop())\n if len(conv_keys) > 0:\n PATTERNS += \"|\"\n\n @staticmethod\n def validate_str(str_to_validate):\n if PATTERNS is None:\n duration.init_patterns()\n\n for element in str_to_validate.split(' '):\n if match(PATTERNS, element) is None:\n return False\n\n return True\n\n @staticmethod\n def parse_value(vals):\n if not duration.validate_str(vals):\n raise osv.except_osv(\n _(\"Error!\"),\n _(\"String duration is not valid.\")\n )\n\n total = float(0.0)\n\n for element in vals.split(' '):\n total += CONVERSIONS[element[-1]](element[:-1])\n\n return total\n\n def __init__(self, string=\"unknown\", **args):\n fields.float.__init__(self, string=string, **args)\n\n def _symbol_get(self, value_to_display):\n days = int(floor(value_to_display))\n hours = int((value_to_display - days) * 24)\n\n return \"%sd %sh\" % (days, hours)\n\nfields.duration = duration\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"bss_duration_field/duration_field.py","file_name":"duration_field.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"172456737","text":"import numpy as np\nfrom scipy.stats import norm\nfrom math import sqrt\n\ndef N(d):\n \"\"\"\n Probabilidad de que que la accion se posicione por debajo de \"d\" conforme a una distribución normal\n \"\"\"\n return norm.cdf(d,0,1)\n\ndef calculo_blackScholes(spot,strike,tiempo_al_vencimiento,type = \"C\",sigma = 0.3,interes=0.4):\n \"\"\"\n Cálculo teórico de valuación de opciones financieras.\n Parte del DataFrame.\n \"\"\"\n #Maturity = tiempo al venc/365\n\n #print(\"SPOT:\",spot,\"\\nSTRIKE: \",strike,\"\\OPEX: \",tiempo_al_vencimiento,\"\\nTYPE: \",type)\n\n d1 = (np.log(spot/strike) + (interes + sigma**2/2)*tiempo_al_vencimiento)/(sigma*np.sqrt(tiempo_al_vencimiento))\n d2 = d1 - sigma*np.sqrt(tiempo_al_vencimiento)\n\n delta_c = N(d1)\n vega = spot * delta_c * np.sqrt(tiempo_al_vencimiento)\n\n\n try:\n if type == \"C\":\n price = spot * delta_c - strike*np.exp(-interes*tiempo_al_vencimiento) * N(d2)\n\n elif type == \"V\":\n price = strike * np.exp(-interes*tiempo_al_vencimiento) * N(-d2) - spot*N(-d1)\n\n\n return round(price,2)#,d1,d2,vega\n except:\n return \"Error\"\n\ndef vega(spot,strike,tiempo_al_vencimiento,interes,sigma):\n spot = float(spot)\n d1 = (np.log(spot/strike) + (interes + 0.5 * sigma**2)*tiempo_al_vencimiento)/(sigma*np.sqrt(tiempo_al_vencimiento))\n return spot * N(d1) * sqrt(tiempo_al_vencimiento)\n\ndef vi(spot,strike,tiempo_al_vencimiento,prima,type = \"C\",interes=0.35):\n \"\"\"\n ARREGLAR\n \"\"\"\n sigma_est = 0.5\n for i in range(100):\n sigma_est -= (calculo_blackScholes(spot,strike,tiempo_al_vencimiento,type,sigma_est,interes)-prima) / vega(spot,strike,tiempo_al_vencimiento,interes,sigma_est)\n #print(strike,round(sigma_est * 100,2))\n\n\n return round(sigma_est * 100,2)\n\n#spot = 119\n#print(vi(spot,124.81,56/365,9.5,\"C\"))\n#print(vi(spot,127.81,56/365,8.3,\"C\"))\n#print(vi(spot,130.81,56/365,7.25,\"C\"))\n#print(vi(spot,133.81,56/365,6.45,\"C\"))\n\n\ndef griegas(cotizacion,type):\n pass\n\n\ndef y_graph(side,base,prima,cant,x,lote=100):\n \"\"\"\n Determina la curva de una opción, sea call/put comprado/lanzado\n \"\"\"\n\n if side == \"C\":\n \"\"\"\n compra True = comprar call (view alcista)\n compra False = lanzar call (view bajista)\n \"\"\"\n if cant > 0:\n return [-prima * lote *cant if x <= base else round((x - (base+prima)) * lote * cant,2) for x in x ]\n return [prima * lote * -cant if x <= base else round(((base+prima) - x) * lote * -cant ,2) for x in x ]\n\n else:\n \"\"\"\n compra True = comprar put (view bajista)\n compra False = lanzar put (view alcista)\n \"\"\"\n if cant > 0:\n return [-prima * lote * cant if x >= base else round(((base-prima) - x) * lote * cant,2) for x in x]\n return [prima * lote * -cant if x >= base else round((x - (base - prima)) * lote * -cant,2) for x in x]\n\ndef graph(details,var_x,opex=0):\n \"\"\"\n [RESULTADO AL VENCIMIENTO]\n Calcula los valores de Y para la suma de todos los activos en cartera.\n Valores que muestra el gráfico en pantalla\n \"\"\"\n #print(\"SUMA: \",suma)\n #print(\"NEW: \",new)\n\n al_vto, teorico = [0 for x in var_x],[0 for x in var_x]\n\n for i in details:\n #print(\"DETAILS: \",i[0], i[1], i[2], i[3], i[4]) #3- tenencia\n\n #print(i)\n if len(i) == 3:\n #print(\"ACCION\")\n curva_vto = [(x-i[1]) * i[2] for x in var_x]\n curva_teorico = [(x - i[1]) * i[2] for x in var_x]\n else:\n #print(\"OPCION\")\n #print(var_x[1],var_x[2],var_x[3],i[1],opex,i[0])\n\n curva_vto = y_graph(i[0],float(i[1]),i[2],i[3],var_x)\n #print(115,var_x[9],opex/365,i[0])\n\n #print(-i[2],i[2])\n #print([calculo_blackScholes(var_x[z],i[1],opex/365,i[0]) for z in range(len(al_vto))])\n\n #print(i[2])\n #print(i[3])\n #print()\n #print([i[2] - calculo_blackScholes(var_x[z], i[1], opex / 365, i[0]) for z in range(len(al_vto))])\n #print([-i[2] + calculo_blackScholes(var_x[z], i[1], opex / 365, i[0]) for z in range(len(al_vto))])\n\n curva_teorico = [(-i[2] + calculo_blackScholes(var_x[z], i[1], opex / 365, i[0])) * i[3] * 100 if i[3] >= 0 else\n (i[2] - calculo_blackScholes(var_x[z], i[1], opex / 365, i[0])) * abs(i[3]) * 100 for z in range(len(al_vto))]\n\n #print(\"finish,curva\", curva_vto)\n #print(\"teorico,curva\", curva_teorico)\n #print(\"suma_finish\", al_vto)\n #print(\"suma_teorico\", teorico)\n\n\n for j in range(len(curva_vto)):\n al_vto[j] += curva_vto[j]\n teorico[j] += curva_teorico[j]\n\n\n #print(\"AL VENCIMIENTO: \",al_vto)\n #print(\"TEORICO: \",teorico)\n\n\n\n return al_vto, teorico\n\ndef graph2():\n pass\n\ndef tna_a_tea(tna,capitalize):\n \"\"\"\n Transforma la tasa Nominal anual a su correspondiente Efectiva anual\n :param tna: Tasa TNA a transformar\n :param capitalize: Periodo de capitalización\n \"\"\"\n times =(360/capitalize)\n tna /= times\n return (1 + tna) ** times\n\n#print(\"Tasa Futuro enero DO: \",tna_a_tea(0.5111,105))\n#print(\"Tasa Futuro octubre DO: \",tna_a_tea(0.3586,15))\n#print(\"Tasa Futuro noviembre DO: \",tna_a_tea(0.4381,45))\n\n\n#prima,d1,d2,vega = calculo_blackScholes(100,90,0.3287671233,\"C\",0.3,0.05)\n\n#print(prima)\n#print(d1,d2)\n#print(N(d1),N(d2))\n#print(N(-d1),N(-d2))\n#print(vega)\n\n#print(volatilidad_implicita(34,30,1,0.0001,2.724,0.5))\n\n#print([calculo_blackScholes(115,x,0.14,\"C\") for x in range(50,100,2)])","sub_path":"ENTORNO DE OPCIONES/finance.py","file_name":"finance.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"398997562","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# PEP8:OK, LINT:OK, PY3:OK\n\n\n#############################################################################\n## This file may be used under the terms of the GNU General Public\n## License version 2.0 or 3.0 as published by the Free Software Foundation\n## and appearing in the file LICENSE.GPL included in the packaging of\n## this file. Please review the following information to ensure GNU\n## General Public Licensing requirements will be met:\n## http:#www.fsf.org/licensing/licenses/info/GPLv2.html and\n## http:#www.gnu.org/copyleft/gpl.html.\n##\n## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE\n## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.\n#############################################################################\n\n\n# metadata\n' Cinta Testigo para Radios '\n__version__ = ' 1.0 '\n__license__ = ' GPL '\n__author__ = ' juancarlospaco '\n__email__ = ' juancarlospaco@ubuntu.com '\n__url__ = ' https://github.com/juancarlospaco '\n__date__ = ' 06/06/2013 '\n__prj__ = ' cintatestigo '\n__docformat__ = 'html'\n__source__ = ''\n__full_licence__ = 'http://opensource.org/licenses/gpl-3.0.html'\n\n\n# imports\nimport sys\nfrom os import (path, linesep, geteuid, environ, statvfs, mkdir, getcwd)\nfrom datetime import datetime\nfrom subprocess import call\nfrom random import randint\nfrom webbrowser import open_new_tab\nfrom shutil import make_archive\nfrom subprocess import check_output as getoutput\nfrom getpass import getuser\nfrom sip import setapi\n\ntry:\n from PyQt4.QtGui import (QIcon, QLabel, QFileDialog, QWidget, QVBoxLayout,\n QHBoxLayout, QComboBox, QCursor, QLineEdit, QCheckBox, QPushButton,\n QGroupBox, QMessageBox, QCompleter, QDirModel, QLCDNumber, QAction,\n QFont, QTabWidget, QDockWidget, QToolBar, QSizePolicy, QColorDialog,\n QPalette, QPen, QPainter, QColor, QPixmap, QMenu, QDialog, QSlider,\n QDesktopWidget, QProgressBar, QMainWindow, QApplication, QTreeWidget,\n QTreeWidgetItem, QColumnView, QDial, QTabBar, QGraphicsDropShadowEffect,\n QSystemTrayIcon)\n\n from PyQt4.QtCore import (Qt, QDir, QSize, QUrl, QEvent,\n QTimer, QFileInfo, QProcess)\n\n from PyQt4.QtNetwork import (QNetworkProxy, )\n from PyQt4.phonon import Phonon\nexcept ImportError:\n print(''' ERROR: No Qt4 avaliable !\n ( sudo apt-get install python-qt4 python-qt4-phonon ) ''')\n exit()\n\ntry:\n from PyKDE4.kdeui import (KAboutApplicationDialog, KColorDialog, KHelpMenu,\n KFontDialog)\n from PyKDE4.kdeui import KTextEdit as QPlainTextEdit\n from PyKDE4.kdeui import KDatePicker as QCalendarWidget\n from PyKDE4.solid import Solid\n from PyKDE4.nepomuk import Nepomuk\n from PyKDE4.kdecore import (KAboutData, ki18n, KUrl)\n aboutData = KAboutData(__doc__, \"\", ki18n(__doc__), __version__,\n ki18n(__doc__), KAboutData.License_GPL, ki18n(__author__),\n ki18n(\" This Smart App uses KDE if present, else Qt only if present \"),\n __url__, __email__)\n KDE = True\nexcept ImportError:\n from PyQt4.QtGui import (QPlainTextEdit, QCalendarWidget, # lint:ok\n QFontDialog, ) # lint:ok\n print(\" WARNING: No PyKDE ! \\n ( sudo apt-get install python-kde4 ) \")\n KDE = False\n\n\n# API 2\n(setapi(a, 2) for a in (\"QDate\", \"QDateTime\", \"QString\", \"QTime\", \"QUrl\",\n \"QTextStream\", \"QVariant\"))\n\n\n# constants\nHOME = path.abspath(path.expanduser(\"~\"))\nprint((' INFO: My Home Dir is {}'.format(HOME)))\n\n\n# root check\nif geteuid() == 0:\n exit(\" ERROR: Do NOT Run as root!, NO ejecutar como root!\\n bye noob...\\n\")\nelse:\n pass\n\n\nprint(('#' * 80))\nprint((''.join((__doc__, ',v.', __version__, __license__, ' by ', __author__))))\n\n\n###############################################################################\n\n\nclass TabBar(QTabBar):\n ' custom tab bar '\n def __init__(self, parent):\n ' init class custom tab bar '\n QTabBar.__init__(self, parent)\n self._editor = QLineEdit(self)\n self._editor.setToolTip(' Type a Tab Name ')\n self._editor.setWindowFlags(Qt.Popup)\n self._editor.setFocusProxy(self)\n self._editor.editingFinished.connect(self.handleEditingFinished)\n self._editor.installEventFilter(self)\n\n def eventFilter(self, widget, event):\n ' filter mouse, esc key, events '\n if ((event.type() == QEvent.MouseButtonPress and\n not self._editor.geometry().contains(event.globalPos())) or\n (event.type() == QEvent.KeyPress and\n event.key() == Qt.Key_Escape)):\n self._editor.hide()\n return True\n return QTabBar.eventFilter(self, widget, event)\n\n def mouseDoubleClickEvent(self, event):\n ' handle double click '\n index = self.tabAt(event.pos())\n if index >= 0:\n self.editTab(index)\n\n def editTab(self, index):\n ' handle the editor '\n rect = self.tabRect(index)\n self.setTabTextColor(index, QColorDialog.getColor())\n self._editor.setFixedSize(rect.size())\n self._editor.move(self.parent().mapToGlobal(rect.topLeft()))\n self._editor.setText(self.tabText(index))\n if not self._editor.isVisible():\n self._editor.show()\n\n def handleEditingFinished(self):\n ' set text when editing has finished '\n index = self.currentIndex()\n if index >= 0:\n self._editor.hide()\n self.setTabText(index, self._editor.text())\n\n\n###############################################################################\n\n\nclass MyMainWindow(QMainWindow):\n ' Main Window '\n def __init__(self, AUTO):\n ' Initialize QWidget inside MyMainWindow '\n super(MyMainWindow, self).__init__()\n QWidget.__init__(self)\n self.auto = AUTO\n self.statusBar().showMessage(' {}'.format(__doc__))\n self.setStyleSheet('QStatusBar{color:grey;}')\n self.setWindowTitle(__doc__)\n self.setWindowIcon(QIcon.fromTheme(\"face-monkey\"))\n self.setFont(QFont('Ubuntu Light', 10))\n self.setMaximumSize(QDesktopWidget().screenGeometry().width(),\n QDesktopWidget().screenGeometry().height())\n\n self.base = path.abspath(path.join(getcwd(), str(datetime.now().year)))\n\n # directory auto completer\n self.completer = QCompleter(self)\n self.dirs = QDirModel(self)\n self.dirs.setFilter(QDir.AllEntries | QDir.NoDotAndDotDot)\n self.completer.setModel(self.dirs)\n self.completer.setCaseSensitivity(Qt.CaseInsensitive)\n self.completer.setCompletionMode(QCompleter.PopupCompletion)\n\n # process\n self.process1 = None\n self.process2 = None\n self.cmd1 = 'nice -n {n} arecord{v} -f {f} -c {c} -r {b} -t raw'\n self.cmd2 = 'oggenc - -r -C {c} -R {b} -q {q} {d}{t}{a} -o {o}'\n self.process3 = QProcess(self)\n #self.process3.finished.connect(self.on_process3_finished)\n #self.process3.error.connect(self.on_process3_error)\n\n self.cmd3 = ('nice -n 20 ' +\n 'sox \"{o}\" -n spectrogram -x {x} -y {y} -z 99 -t \"{o}\" -o \"{o}.png\"')\n self.actual_file = ''\n\n # re starting timers, one stops, one starts\n self.timerFirst = QTimer(self)\n self.timerFirst.timeout.connect(self.end)\n self.timerSecond = QTimer(self)\n self.timerSecond.timeout.connect(self.run)\n\n # Proxy support, by reading http_proxy os env variable\n proxy_url = QUrl(environ.get('http_proxy', ''))\n QNetworkProxy.setApplicationProxy(QNetworkProxy(QNetworkProxy.HttpProxy\n if str(proxy_url.scheme()).startswith('http')\n else QNetworkProxy.Socks5Proxy, proxy_url.host(), proxy_url.port(),\n proxy_url.userName(), proxy_url.password())) \\\n if 'http_proxy' in environ else None\n print((' INFO: Proxy Auto-Config as ' + str(proxy_url)))\n\n # basic widgets layouts and set up\n self.mainwidget = QTabWidget()\n self.mainwidget.setToolTip(__doc__)\n self.mainwidget.setMovable(True)\n self.mainwidget.setTabShape(QTabWidget.Triangular)\n self.mainwidget.setContextMenuPolicy(Qt.CustomContextMenu)\n self.mainwidget.setStyleSheet('QTabBar{color:white;font-weight:bold;}')\n self.mainwidget.setTabBar(TabBar(self))\n self.mainwidget.setTabsClosable(False)\n self.setCentralWidget(self.mainwidget)\n self.dock1 = QDockWidget()\n self.dock2 = QDockWidget()\n self.dock3 = QDockWidget()\n self.dock4 = QDockWidget()\n self.dock5 = QDockWidget()\n for a in (self.dock1, self.dock2, self.dock3, self.dock4, self.dock5):\n a.setWindowModality(Qt.NonModal)\n # a.setWindowOpacity(0.9)\n a.setWindowTitle(__doc__\n if a.windowTitle() == '' else a.windowTitle())\n a.setStyleSheet('QDockWidget::title{text-align:center;}')\n self.mainwidget.addTab(a, QIcon.fromTheme(\"face-smile\"),\n 'Double Click Me')\n\n # Paleta de colores para pintar transparente\n self.palette().setBrush(QPalette.Base, Qt.transparent)\n self.setPalette(self.palette())\n self.setAttribute(Qt.WA_OpaquePaintEvent, False)\n\n # toolbar and basic actions\n self.toolbar = QToolBar(self)\n self.toolbar.setIconSize(QSize(24, 24))\n # spacer widget for left\n self.left_spacer = QWidget(self)\n self.left_spacer.setSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n # spacer widget for right\n self.right_spacer = QWidget(self)\n self.right_spacer.setSizePolicy(QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n qaqq = QAction(QIcon.fromTheme(\"application-exit\"), 'Quit', self)\n qaqq.setShortcut('Ctrl+Q')\n qaqq.triggered.connect(exit)\n qamin = QAction(QIcon.fromTheme(\"go-down\"), 'Minimize', self)\n qamin.triggered.connect(lambda: self.showMinimized())\n qamax = QAction(QIcon.fromTheme(\"go-up\"), 'Maximize', self)\n qanor = QAction(QIcon.fromTheme(\"view-fullscreen\"),\n 'AutoCenter AutoResize', self)\n qanor.triggered.connect(self.center)\n qatim = QAction(QIcon.fromTheme(\"mail-signed-verified\"),\n 'View Date and Time', self)\n qatim.triggered.connect(self.timedate)\n qabug = QAction(QIcon.fromTheme(\"help-about\"), 'Report a Problem', self)\n qabug.triggered.connect(lambda: qabug.setDisabled(True) if not call(\n 'xdg-open mailto:' + 'whnapneybfcnpb@hohagh.pbz'.decode('rot13'),\n shell=True) else ' ERROR ')\n qamax.triggered.connect(lambda: self.showMaximized())\n qaqt = QAction(QIcon.fromTheme(\"help-about\"), 'About Qt', self)\n qaqt.triggered.connect(lambda: QMessageBox.aboutQt(self))\n qakde = QAction(QIcon.fromTheme(\"help-about\"), 'About KDE', self)\n if KDE:\n qakde.triggered.connect(KHelpMenu(self, \"\", False).aboutKDE)\n qaslf = QAction(QIcon.fromTheme(\"help-about\"), 'About Self', self)\n if KDE:\n qaslf.triggered.connect(\n KAboutApplicationDialog(aboutData, self).exec_)\n else:\n qaslf.triggered.connect(lambda: QMessageBox.about(self.mainwidget,\n __doc__, ''.join((__doc__, linesep, 'version ', __version__, ', (',\n __license__, '), by ', __author__, ', ( ', __email__, ' )', linesep\n ))))\n qafnt = QAction(QIcon.fromTheme(\"tools-check-spelling\"),\n 'Set GUI Font', self)\n if KDE:\n font = QFont()\n qafnt.triggered.connect(lambda:\n self.setStyleSheet(''.join((\n '*{font-family:', str(font.toString()), '}'))\n if KFontDialog.getFont(font)[0] == QDialog.Accepted else ''))\n else:\n qafnt.triggered.connect(lambda:\n self.setStyleSheet(''.join(('*{font-family:',\n str(QFontDialog.getFont()[0].toString()), '}'))))\n qasrc = QAction(QIcon.fromTheme(\"applications-development\"),\n 'View Source Code', self)\n qasrc.triggered.connect(lambda:\n call('xdg-open {}'.format(__file__), shell=True))\n qakb = QAction(QIcon.fromTheme(\"input-keyboard\"),\n 'Keyboard Shortcuts', self)\n qakb.triggered.connect(lambda: QMessageBox.information(self.mainwidget,\n 'Keyboard Shortcuts', ' Ctrl+Q = Quit '))\n qapic = QAction(QIcon.fromTheme(\"camera-photo\"),\n 'Take a Screenshot', self)\n qapic.triggered.connect(lambda: QPixmap.grabWindow(\n QApplication.desktop().winId()).save(QFileDialog.getSaveFileName(\n self.mainwidget, \" Save Screenshot As ...\", path.expanduser(\"~\"),\n ';;(*.png) PNG', 'png')))\n qatb = QAction(QIcon.fromTheme(\"go-top\"), 'Toggle ToolBar', self)\n qatb.triggered.connect(lambda: self.toolbar.hide()\n if self.toolbar.isVisible() is True else self.toolbar.show())\n qati = QAction(QIcon.fromTheme(\"zoom-in\"),\n 'Switch ToolBar Icon Size', self)\n qati.triggered.connect(lambda:\n self.toolbar.setIconSize(self.toolbar.iconSize() * 4)\n if self.toolbar.iconSize().width() * 4 == 24\n else self.toolbar.setIconSize(self.toolbar.iconSize() / 4))\n qasb = QAction(QIcon.fromTheme(\"preferences-other\"),\n 'Toggle Tabs Bar', self)\n qasb.triggered.connect(lambda: self.mainwidget.tabBar().hide()\n if self.mainwidget.tabBar().isVisible() is True\n else self.mainwidget.tabBar().show())\n qadoc = QAction(QIcon.fromTheme(\"help-browser\"), 'On-line Docs', self)\n qadoc.triggered.connect(lambda: open_new_tab(str(__url__).strip()))\n qapy = QAction(QIcon.fromTheme(\"help-about\"), 'About Python', self)\n qapy.triggered.connect(lambda: open_new_tab('http://python.org/about'))\n qali = QAction(QIcon.fromTheme(\"help-browser\"), 'Read Licence', self)\n qali.triggered.connect(lambda: open_new_tab(__full_licence__))\n qacol = QAction(QIcon.fromTheme(\"preferences-system\"), 'Set GUI Colors',\n self)\n if KDE:\n color = QColor()\n qacol.triggered.connect(lambda:\n self.setStyleSheet(''.join(('* { background-color: ',\n str(color.name()), '}')))\n if KColorDialog.getColor(color, self) else '')\n else:\n qacol.triggered.connect(lambda: self.setStyleSheet(''.join((\n ' * { background-color: ', str(QColorDialog.getColor().name()),\n ' } '))))\n qatit = QAction(QIcon.fromTheme(\"preferences-system\"),\n 'Set the App Window Title', self)\n qatit.triggered.connect(self.seTitle)\n self.toolbar.addWidget(self.left_spacer)\n self.toolbar.addSeparator()\n self.toolbar.addActions((qaqq, qamin, qanor, qamax, qasrc, qakb, qacol,\n qatim, qatb, qafnt, qati, qasb, qatit, qapic, qadoc, qali, qaslf,\n qaqt, qakde, qapy, qabug))\n self.addToolBar(Qt.TopToolBarArea, self.toolbar)\n self.toolbar.addSeparator()\n self.toolbar.addWidget(self.right_spacer)\n # define the menu\n menu = self.menuBar()\n # File menu items\n menu.addMenu('&File').addActions((qaqq, ))\n menu.addMenu('&Window').addActions((qamax, qanor, qamin))\n # Settings menu\n menu.addMenu('&Settings').addActions((qasrc, qacol, qafnt, qatim,\n qatb, qati, qasb, qapic))\n # Help menu items\n menu.addMenu('&Help').addActions((qadoc, qakb, qabug, qali,\n qaqt, qakde, qapy, qaslf))\n # Tray Icon\n tray = QSystemTrayIcon(QIcon.fromTheme(\"face-devilish\"), self)\n tray.setToolTip(__doc__)\n traymenu = QMenu()\n traymenu.addActions((qamax, qanor, qamin, qaqq))\n tray.setContextMenu(traymenu)\n tray.show()\n\n def contextMenuRequested(point):\n ' quick and dirty custom context menu '\n menu = QMenu()\n menu.addActions((qaqq, qamin, qanor, qamax, qasrc, qakb, qacol,\n qafnt, qati, qasb, qatb, qatim, qatit, qapic, qadoc, qali,\n qaslf, qaqt, qakde, qapy, qabug))\n menu.exec_(self.mapToGlobal(point))\n self.mainwidget.customContextMenuRequested.connect(contextMenuRequested)\n\n def must_be_checked(widget_list):\n ' widget tuple passed as argument should be checked as ON '\n for each_widget in widget_list:\n try:\n each_widget.setChecked(True)\n except:\n pass\n\n def must_have_tooltip(widget_list):\n ' widget tuple passed as argument should have tooltips '\n for each_widget in widget_list:\n try:\n each_widget.setToolTip(each_widget.text())\n except:\n each_widget.setToolTip(each_widget.currentText())\n finally:\n each_widget.setCursor(QCursor(Qt.PointingHandCursor))\n\n def must_autofillbackground(widget_list):\n ' widget tuple passed as argument should have filled background '\n for each_widget in widget_list:\n try:\n each_widget.setAutoFillBackground(True)\n except:\n pass\n\n def must_glow(widget_list):\n ' apply an glow effect to the widget '\n for glow, each_widget in enumerate(widget_list):\n try:\n if each_widget.graphicsEffect() is None:\n glow = QGraphicsDropShadowEffect(self)\n glow.setOffset(0)\n glow.setBlurRadius(99)\n glow.setColor(QColor(99, 255, 255))\n each_widget.setGraphicsEffect(glow)\n # glow.setEnabled(False)\n try:\n each_widget.clicked.connect(lambda:\n each_widget.graphicsEffect().setEnabled(True)\n if each_widget.graphicsEffect().isEnabled() is False\n else each_widget.graphicsEffect().setEnabled(False))\n except:\n each_widget.sliderPressed.connect(lambda:\n each_widget.graphicsEffect().setEnabled(True)\n if each_widget.graphicsEffect().isEnabled() is False\n else each_widget.graphicsEffect().setEnabled(False))\n except:\n pass\n\n #######################################################################\n\n # dock 1\n QLabel('

Record !

', self.dock1).resize(\n self.dock3.size().width() / 4, 25)\n self.group1 = QGroupBox()\n self.group1.setTitle(__doc__)\n\n self.spec = QPushButton(self)\n self.spec.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.spec.setMinimumSize(self.spec.size().width(), 250)\n self.spec.setFlat(True)\n self.spec.clicked.connect(self.spectro)\n\n self.clock = QLCDNumber()\n self.clock.setSegmentStyle(QLCDNumber.Flat)\n self.clock.setMinimumSize(self.clock.size().width(), 50)\n self.clock.setNumDigits(25)\n self.timer1 = QTimer(self)\n self.timer1.timeout.connect(lambda: self.clock.display(\n datetime.now().strftime(\"%d-%m-%Y %H:%M:%S %p\")))\n self.timer1.start(1000)\n self.clock.setToolTip(datetime.now().strftime(\"%c %x\"))\n self.clock.setCursor(QCursor(Qt.CrossCursor))\n\n self.diskBar = QProgressBar()\n self.diskBar.setMinimum(0)\n self.diskBar.setMaximum(statvfs(HOME).f_blocks *\n statvfs(HOME).f_frsize / 1024 / 1024 / 1024)\n self.diskBar.setValue(statvfs(HOME).f_bfree *\n statvfs(HOME).f_frsize / 1024 / 1024 / 1024)\n self.diskBar.setToolTip(str(statvfs(HOME).f_bfree *\n statvfs(HOME).f_frsize / 1024 / 1024 / 1024) + ' Gigabytes free')\n\n self.feedback = QPlainTextEdit(''.join(('

', __doc__,\n ', version', __version__, __license__, '
by ', __author__,\n ' (Dev), Radio Comunitaria FM Reconquista (Q.A.)
',\n 'FMReconquista.org.ar & GitHub.com/JuanCarlosPaco/Cinta-Testigo')))\n\n self.rec = QPushButton(QIcon.fromTheme(\"media-record\"), 'Record')\n self.rec.setMinimumSize(self.rec.size().width(), 50)\n self.rec.clicked.connect(self.go) # self.run\n\n self.stop = QPushButton(QIcon.fromTheme(\"media-playback-stop\"), 'Stop')\n self.stop.clicked.connect(self.end)\n\n self.kill = QPushButton(QIcon.fromTheme(\"process-stop\"), 'Kill')\n self.kill.clicked.connect(self.killer)\n\n vboxg1 = QVBoxLayout(self.group1)\n for each_widget in (\n QLabel(' Spectro'), self.spec,\n QLabel(' Time '), self.clock,\n QLabel(' Disk '), self.diskBar,\n QLabel(' STDOUT + STDIN '), self.feedback,\n QLabel(' Record '), self.rec, self.stop,\n self.kill):\n vboxg1.addWidget(each_widget)\n\n self.group2 = QGroupBox()\n self.group2.setTitle(__doc__)\n\n self.slider = QSlider(self)\n self.slid_l = QLabel(self.slider)\n self.slider.setCursor(QCursor(Qt.OpenHandCursor))\n self.slider.sliderPressed.connect(lambda:\n self.slider.setCursor(QCursor(Qt.ClosedHandCursor)))\n self.slider.sliderReleased.connect(lambda:\n self.slider.setCursor(QCursor(Qt.OpenHandCursor)))\n self.slider.valueChanged.connect(lambda:\n self.slider.setToolTip(str(self.slider.value())))\n self.slider.valueChanged.connect(lambda: self.slid_l.setText(\n '

{}'.format(self.slider.value())))\n self.slider.setMinimum(10)\n self.slider.setMaximum(99)\n self.slider.setValue(30)\n self.slider.setOrientation(Qt.Vertical)\n self.slider.setTickPosition(QSlider.TicksBothSides)\n self.slider.setTickInterval(2)\n self.slider.setSingleStep(10)\n self.slider.setPageStep(10)\n\n vboxg2 = QVBoxLayout(self.group2)\n for each_widget in (\n QLabel('MINUTES of recording'), self.slider,\n QLabel(' Default: 30 Min')):\n vboxg2.addWidget(each_widget)\n\n group3 = QGroupBox()\n group3.setTitle(__doc__)\n try:\n self.label2 = QLabel(getoutput('sox --version', shell=True))\n self.label4 = QLabel(getoutput('arecord --version', shell=1)[:25])\n self.label6 = QLabel(str(getoutput('oggenc --version', shell=True)))\n except:\n print(''' ERROR: No SOX, OGGenc avaliable !\n ( sudo apt-get install vorbis-tools sox alsa-utils ) ''')\n exit()\n\n self.button5 = QPushButton(QIcon.fromTheme(\"audio-x-generic\"),\n 'OGG --> ZIP')\n self.button5.clicked.connect(lambda: make_archive(\n str(QFileDialog.getSaveFileName(self, \"Save OGG to ZIP file As...\",\n getcwd(), ';;(*.zip)', 'zip')).replace('.zip', ''), \"zip\",\n path.abspath(path.join(getcwd(), str(datetime.now().year)))))\n\n self.button1 = QPushButton(QIcon.fromTheme(\"folder-open\"), 'Files')\n self.button1.clicked.connect(lambda:\n call('xdg-open ' + getcwd(), shell=True))\n\n self.button0 = QPushButton(\n QIcon.fromTheme(\"preferences-desktop-screensaver\"), 'LCD OFF')\n self.button0.clicked.connect(lambda:\n call('sleep 3 ; xset dpms force off', shell=True))\n\n vboxg3 = QVBoxLayout(group3)\n for each_widget in (\n QLabel(' OGG Output Codec '), self.label6,\n QLabel(' Raw Record Backend '), self.label4,\n QLabel(' Helper Libs '), self.label2,\n QLabel(' OGG ZIP '), self.button5,\n QLabel(' Files '), self.button1,\n QLabel(' LCD '), self.button0):\n vboxg3.addWidget(each_widget)\n container = QWidget()\n hbox = QHBoxLayout(container)\n for each_widget in (self.group2, self.group1, group3):\n hbox.addWidget(each_widget)\n self.dock1.setWidget(container)\n\n # dock 2\n QLabel('

Hardware !

', self.dock2).resize(\n self.dock2.size().width() / 4, 25)\n try:\n audioDriverStr = {Solid.AudioInterface.Alsa: \"ALSA\",\n Solid.AudioInterface.OpenSoundSystem: \"Open Sound\",\n Solid.AudioInterface.UnknownAudioDriver: \"Unknown?\"}\n audioInterfaceTypeStr = {\n Solid.AudioInterface.AudioControl: \"Control\",\n Solid.AudioInterface.UnknownAudioInterfaceType: \"Unknown?\",\n Solid.AudioInterface.AudioInput: \"In\",\n Solid.AudioInterface.AudioOutput: \"Out\"}\n soundcardTypeStr = {\n Solid.AudioInterface.InternalSoundcard: \"Internal\",\n Solid.AudioInterface.UsbSoundcard: \"USB3\",\n Solid.AudioInterface.FirewireSoundcard: \"FireWire\",\n Solid.AudioInterface.Headset: \"Headsets\",\n Solid.AudioInterface.Modem: \"Modem\"}\n display = QTreeWidget()\n display.setAlternatingRowColors(True)\n display.setHeaderLabels([\"Items\", \"ID\", \"Drivers\", \"I / O\", \"Type\"])\n display.setColumnWidth(0, 350)\n display.setColumnWidth(1, 350)\n display.setColumnWidth(3, 75)\n # retrieve a list of Solid.Device for this machine\n deviceList = Solid.Device.allDevices()\n # filter the list of all devices and display matching results\n # note that we never create a Solid.AudioInterface object, but\n # receive one from the 'asDeviceInterface' call\n for device in deviceList:\n if device.isDeviceInterface(\n Solid.DeviceInterface.AudioInterface):\n audio = device.asDeviceInterface(\n Solid.DeviceInterface.AudioInterface)\n devtype = audio.deviceType()\n devstr = []\n for key in audioInterfaceTypeStr:\n flag = key & devtype\n if flag:\n devstr.append(audioInterfaceTypeStr[key])\n QTreeWidgetItem(display, [device.product(), audio.name(),\n audioDriverStr[audio.driver()], \"/\".join(devstr),\n soundcardTypeStr[audio.soundcardType()]])\n self.dock2.setWidget(display)\n except:\n self.dock2.setWidget(QLabel(\"\"\"
\n

:(
ERROR: Please, install PyKDE !


\n
(Sorry, can not use non-Qt Libs). Thanks
\"\"\"))\n\n ## dock 3\n QLabel('

Previews !

', self.dock3).resize(\n self.dock3.size().width() / 4, 25)\n self.fileView = QColumnView()\n self.fileView.updatePreviewWidget.connect(self.play)\n self.fileView.setToolTip(' Browse and Preview Files ')\n self.media = None\n self.model = QDirModel()\n self.fileView.setModel(self.model)\n self.dock3.setWidget(self.fileView)\n\n # dock4\n QLabel('

Setup !

', self.dock4).resize(\n self.dock4.size().width() / 4, 25)\n self.group4 = QGroupBox()\n self.group4.setTitle(__doc__)\n\n self.combo0 = QComboBox()\n self.combo0.addItems(['S16_LE', 'S32_LE', 'S16_BE', 'U16_LE', 'U16_BE',\n 'S24_LE', 'S24_BE', 'U24_LE', 'U24_BE', 'S32_BE', 'U32_LE', 'U32_BE'])\n\n self.combo1 = QComboBox()\n self.combo1.addItems(['1', '-1', '0', '2', '3', '4',\n '5', '6', '7', '8', '9', '10'])\n\n self.combo2 = QComboBox()\n self.combo2.addItems(['128', '256', '512', '1024', '64', '32', '16'])\n\n self.combo3 = QComboBox(self)\n self.combo3.addItems(['MONO', 'STEREO', 'Surround'])\n\n self.combo4 = QComboBox()\n self.combo4.addItems(['44100', '96000', '48000', '32000',\n '22050', '16000', '11025', '8000'])\n\n self.combo5 = QComboBox(self)\n self.combo5.addItems(['20', '19', '18', '17', '16', '15', '14', '13',\n '12', '10', '9', '8', '7', '6', '5', '4', '3', '2', '1', '0'])\n\n self.nepochoose = QCheckBox('Auto-Tag Files using Nepomuk Semantic')\n\n self.chckbx0 = QCheckBox('Disable Software based Volume Control')\n\n self.chckbx1 = QCheckBox('Output Sound Stereo-to-Mono Downmix')\n\n self.chckbx2 = QCheckBox('Add Date and Time MetaData to Sound files')\n\n self.chckbx3 = QCheckBox('Add Yourself as the Author Artist of Sound')\n\n vboxg4 = QVBoxLayout(self.group4)\n for each_widget in (\n QLabel(' Sound OGG Quality'), self.combo1,\n QLabel(' Sound Record Format'), self.combo0,\n QLabel(' Sound KBps '), self.combo2,\n QLabel(' Sound Channels '), self.combo3,\n QLabel(' Sound Sample Rate '), self.combo4,\n QLabel(' Sound Volume'), self.chckbx0,\n QLabel(' Sound Mix'), self.chckbx1,\n QLabel(' Sound Meta'), self.chckbx2,\n QLabel(' Sound Authorship'), self.chckbx3,\n QLabel(' CPUs Priority'), self.combo5,\n QLabel('Nepomuk Semantic User Experience'),\n self.nepochoose):\n vboxg4.addWidget(each_widget)\n self.dock4.setWidget(self.group4)\n\n # dock 5\n QLabel('

Voice Changer !

', self.dock5\n ).resize(self.dock5.size().width() / 3, 25)\n self.group5 = QGroupBox()\n self.group5.setTitle(__doc__)\n\n self.dial = QDial()\n self.dial.setCursor(QCursor(Qt.OpenHandCursor))\n self.di_l = QLabel(self.dial)\n self.di_l.resize(self.dial.size() / 8)\n self.dial.sliderPressed.connect(lambda:\n self.dial.setCursor(QCursor(Qt.ClosedHandCursor)))\n self.dial.sliderReleased.connect(lambda:\n self.dial.setCursor(QCursor(Qt.OpenHandCursor)))\n self.dial.valueChanged.connect(lambda:\n self.dial.setToolTip(str(self.dial.value())))\n self.dial.valueChanged.connect(lambda: self.di_l.setText(\n '

{}'.format(self.dial.value())))\n self.dial.setValue(0)\n self.dial.setMinimum(-999)\n self.dial.setMaximum(999)\n self.dial.setSingleStep(100)\n self.dial.setPageStep(100)\n self.dial.setWrapping(False)\n self.dial.setNotchesVisible(True)\n\n self.defo = QPushButton(QIcon.fromTheme(\"media-playback-start\"), 'Run')\n self.defo.setMinimumSize(self.defo.size().width(), 50)\n self.defo.clicked.connect(lambda: self.process3.start(\n 'play -q -V0 \"|rec -q -V0 -n -d -R riaa pitch {} \"'\n .format(self.dial.value()) if int(self.dial.value()) != 0 else\n 'play -q -V0 \"|rec -q -V0 --multi-threaded -n -d -R bend {} \"'\n .format(' 3,2500,3 3,-2500,3 ' * 999)))\n\n self.qq = QPushButton(QIcon.fromTheme(\"media-playback-stop\"), 'Stop')\n self.qq.clicked.connect(self.process3.kill)\n\n self.die = QPushButton(QIcon.fromTheme(\"process-stop\"), 'Kill')\n self.die.clicked.connect(lambda: call('killall rec', shell=True))\n\n vboxg5 = QVBoxLayout(self.group5)\n for each_widget in (self.dial, self.defo, self.qq, self.die):\n vboxg5.addWidget(each_widget)\n self.dock5.setWidget(self.group5)\n\n # configure some widget settings\n must_be_checked((self.nepochoose, self.chckbx1,\n self.chckbx2, self.chckbx3))\n must_have_tooltip((self.label2, self.label4, self.label6, self.combo0,\n self.nepochoose, self.combo1, self.combo2, self.combo3, self.combo4,\n self.combo5, self.chckbx0, self.chckbx1, self.chckbx2, self.chckbx3,\n self.rec, self.stop, self.defo, self.qq, self.die, self.kill,\n self.button0, self.button1, self.button5))\n must_autofillbackground((self.clock, self.label2, self.label4,\n self.label6, self.nepochoose, self.chckbx0, self.chckbx1,\n self.chckbx2, self.chckbx3))\n must_glow((self.rec, self.dial, self.combo1))\n self.nepomuk_get('testigo')\n if self.auto is True:\n self.go()\n\n def play(self, index):\n ' play with delay '\n if not self.media:\n self.media = Phonon.MediaObject(self)\n audioOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)\n Phonon.createPath(self.media, audioOutput)\n self.media.setCurrentSource(Phonon.MediaSource(\n self.model.filePath(index)))\n self.media.play()\n\n def end(self):\n ' kill it with fire '\n print((' INFO: Stoping Processes at {}'.format(str(datetime.now()))))\n self.process1.terminate()\n self.process2.terminate()\n self.feedback.setText('''\n

Errors for RECORDER QProcess 1:
{}
\n
Errors for ENCODER QProcess 2:
{}
\n
Output for RECORDER QProcess 1:
{}
\n
Output for ENCODER QProcess 2:
{}
\n '''.format(self.process1.readAllStandardError(),\n self.process2.readAllStandardError(),\n self.process1.readAllStandardOutput(),\n self.process2.readAllStandardOutput(),\n ))\n\n def killer(self):\n ' kill -9 '\n QMessageBox.information(self.mainwidget, __doc__,\n ' KILL -9 was sent to the multi-process backend ! ')\n self.process1.kill()\n self.process2.kill()\n\n def go(self):\n ' run timeout re-starting timers '\n self.timerFirst.start(int(self.slider.value()) * 60 * 1000 + 2000)\n self.timerSecond.start(int(self.slider.value()) * 60 * 1000 + 2010)\n self.run()\n\n def run(self):\n ' run forest run '\n print((' INFO: Working at {}'.format(str(datetime.now()))))\n\n chnl = 1 if self.combo3.currentText() == 'MONO' else 2\n print((' INFO: Using {} Channels . . . '.format(chnl)))\n\n btrt = int(self.combo4.currentText())\n print((' INFO: Using {} Hz per Second . . . '.format(btrt)))\n\n threshold = int(self.dial.value())\n print((' INFO: Using Thresold of {} . . . '.format(threshold)))\n\n print((' INFO: Using Recording time of {}'.format(self.slider.value())))\n\n frmt = str(self.combo0.currentText()).strip()\n print((' INFO: Using Recording quality of {} ...'.format(frmt)))\n\n qlt = str(self.combo1.currentText()).strip()\n print((' INFO: Using Recording quality of {} ...'.format(qlt)))\n\n prio = str(self.combo5.currentText()).strip()\n print((' INFO: Using CPU Priority of {} ...'.format(prio)))\n\n downmix = '--downmix ' if self.chckbx1.isChecked() is True else ''\n print((' INFO: Using Downmix is {} ...'.format(downmix)))\n\n aut = '-a ' + getuser() if self.chckbx3.isChecked() is True else ''\n print((' INFO: The Author Artist of this sound is: {}'.format(aut)))\n\n T = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n tim = '--date {} '.format(T) if self.chckbx2.isChecked() is True else ''\n print((' INFO: The Date and Time of this sound is: {}'.format(tim)))\n\n vol = ' --disable-softvol' if self.chckbx0.isChecked() is True else ''\n print((' INFO: Software based Volume Control is: {}'.format(vol)))\n\n # make base directory\n try:\n mkdir(self.base)\n print((' INFO: Base Directory path created {}'.format(self.base)))\n except OSError:\n print((' INFO: Base Directory already exist {}'.format(self.base)))\n except:\n print((' ERROR: Can not create Directory ?, {}'.format(self.base)))\n\n # make directory tree\n try:\n for dr in range(1, 13):\n mkdir(path.abspath(path.join(self.base, str(dr))))\n print((' INFO:Directory created {}/{}'.format(self.base, dr)))\n except OSError:\n print((' INFO: Directory already exist {}/1,12'.format(self.base)))\n except:\n print((' ERROR: Cant create Directory?, {}/1,12'.format(self.base)))\n\n # make new filename\n flnm = path.abspath(path.join(self.base, str(datetime.now().month),\n datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S.ogg\")))\n self.actual_file = flnm\n print((' INFO: Recording on the file {}'.format(flnm)))\n\n # make custom commands\n cmd1 = self.cmd1.format(n=prio, f=frmt, c=chnl, b=btrt, v=vol)\n cmd2 = self.cmd2.format(c=chnl, b=btrt, q=qlt,\n d=downmix, o=flnm, a=aut, t=tim)\n print((cmd1, cmd2))\n # multiprocess recording loop pipe\n self.process1 = QProcess(self)\n self.process2 = QProcess(self)\n self.process1.setStandardOutputProcess(self.process2)\n self.process1.start(cmd1)\n if not self.process1.waitForStarted():\n print((\" ERROR: RECORDER QProcess 1 Failed: \\n {} \".format(cmd1)))\n self.process2.start(cmd2)\n if not self.process2.waitForStarted():\n print((\" ERROR: ENCODER QProcess 2 Failed: \\n {} \".format(cmd2)))\n self.nepomuk_set(flnm, 'testigo', 'testigo', 'AutoTag by Cinta-Testigo')\n\n def spectro(self):\n ' spectrometer '\n wid = self.spec.size().width()\n hei = self.spec.size().height()\n command = self.cmd3.format(o=self.actual_file, x=wid, y=hei)\n print(' INFO: Spectrometer is deleting OLD .ogg.png Files on target ')\n call('rm --verbose --force {}/*/*.ogg.png'.format(self.base), shell=1)\n print(' INFO: Spectrometer finished Deleting Files, Starting Render ')\n call(command, shell=True)\n print((''' INFO: Spectrometer finished Rendering Sound using:\n {}{} OutPut: {}'''.format(command, linesep, self.actual_file)))\n self.spec.setIcon(QIcon('{o}.png'.format(o=self.actual_file)))\n self.spec.setIconSize(QSize(wid, hei))\n self.spec.resize(wid, hei)\n\n ###########################################################################\n\n def paintEvent(self, event):\n 'Paint semi-transparent background, animated pattern, background text'\n QWidget.paintEvent(self, event)\n # make a painter\n p = QPainter(self)\n p.setRenderHint(QPainter.TextAntialiasing)\n p.setRenderHint(QPainter.HighQualityAntialiasing)\n # fill a rectangle with transparent painting\n p.fillRect(event.rect(), Qt.transparent)\n # animated random dots background pattern\n for i in range(4096):\n x = randint(9, self.size().width() - 9)\n y = randint(9, self.size().height() - 9)\n p.setPen(QPen(QColor(randint(200, 255), randint(200, 255), 255), 1))\n p.drawPoint(x, y)\n # set pen to use white color\n p.setPen(QPen(QColor(randint(9, 255), randint(9, 255), 255), 1))\n # Rotate painter 45 Degree\n p.rotate(35)\n # Set painter Font for text\n p.setFont(QFont('Ubuntu', 300))\n # draw the background text, with antialiasing\n p.drawText(99, 199, \"Radio\")\n # Rotate -45 the QPen back !\n p.rotate(-35)\n # set the pen to no pen\n p.setPen(Qt.NoPen)\n # Background Color\n p.setBrush(QColor(0, 0, 0))\n # Background Opacity\n p.setOpacity(0.75)\n # Background Rounded Borders\n p.drawRoundedRect(self.rect(), 50, 50)\n # finalize the painter\n p.end()\n\n def seTitle(self):\n ' set the title of the main window '\n dialog = QDialog(self)\n textEditInput = QLineEdit(' Type Title Here ')\n ok = QPushButton(' O K ')\n ok.clicked.connect(lambda: self.setWindowTitle(textEditInput.text()))\n ly = QVBoxLayout()\n [ly.addWidget(wdgt) for wdgt in (QLabel('Title:'), textEditInput, ok)]\n dialog.setLayout(ly)\n dialog.exec_()\n\n def timedate(self):\n ' get the time and date '\n dialog = QDialog(self)\n clock = QLCDNumber()\n clock.setNumDigits(24)\n timer = QTimer()\n timer.timeout.connect(lambda: clock.display(\n datetime.now().strftime(\"%d-%m-%Y %H:%M:%S %p\")))\n timer.start(1000)\n clock.setToolTip(datetime.now().strftime(\"%c %x\"))\n ok = QPushButton(' O K ')\n ok.clicked.connect(dialog.close)\n ly = QVBoxLayout()\n [ly.addWidget(wdgt) for wdgt in (QCalendarWidget(), clock, ok)]\n dialog.setLayout(ly)\n dialog.exec_()\n\n def closeEvent(self, event):\n ' Ask to Quit '\n if QMessageBox.question(self, ' Close ', ' Quit ? ',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n def center(self):\n ' Center and resize the window '\n self.showNormal()\n self.resize(QDesktopWidget().screenGeometry().width() // 1.25,\n QDesktopWidget().screenGeometry().height() // 1.25)\n qr = self.frameGeometry()\n qr.moveCenter(QDesktopWidget().availableGeometry().center())\n self.move(qr.topLeft())\n\n def nepomuk_set(self, file_tag=None, __tag='', _label='', _description=''):\n ' Quick and Easy Nepomuk Taggify for Files '\n print((''' INFO: Semantic Desktop Experience is Tagging Files :\n {}, {}, {}, {})'''.format(file_tag, __tag, _label, _description)))\n if Nepomuk.ResourceManager.instance().init() is 0:\n fle = Nepomuk.Resource(KUrl(QFileInfo(file_tag).absoluteFilePath()))\n _tag = Nepomuk.Tag(__tag)\n _tag.setLabel(_label)\n fle.addTag(_tag)\n fle.setDescription(_description)\n print(([str(a.label()) for a in fle.tags()], fle.description()))\n return ([str(a.label()) for a in fle.tags()], fle.description())\n else:\n print(\" ERROR: FAIL: Nepomuk is not running ! \")\n\n def nepomuk_get(self, query_to_search):\n ' Quick and Easy Nepomuk Query for Files '\n print((''' INFO: Semantic Desktop Experience is Quering Files :\n {} '''.format(query_to_search)))\n results = []\n nepo = Nepomuk.Query.QueryServiceClient()\n nepo.desktopQuery(\"hasTag:{}\".format(query_to_search))\n\n def _query(data):\n ''' ('filename.ext', 'file description', ['list', 'of', 'tags']) '''\n results.append(([str(a.resource().genericLabel()) for a in data][0],\n [str(a.resource().description()) for a in data][0],\n [str(a.label()) for a in iter([a.resource().tags() for a in data][0]\n )]))\n nepo.newEntries.connect(_query)\n\n def _end():\n '''\n [ ('filename.ext', 'file description', ['list', 'of', 'tags']),\n ('filename.ext', 'file description', ['list', 'of', 'tags']),\n ('filename.ext', 'file description', ['list', 'of', 'tags']) ]\n '''\n nepo.newEntries.disconnect\n print(results)\n return results\n nepo.finishedListing.connect(_end)\n\n\n###############################################################################\n\n\ndef main():\n ' Main Loop '\n from getopt import getopt\n OPAQUE = True\n BORDER = True\n AUTO = False\n try:\n opts, args = getopt(sys.argv[1:], 'hvoba',\n ['version', 'help', 'opaque', 'borderless', 'auto'])\n pass\n except:\n pass\n for o, v in opts:\n if o in ('-h', '--help'):\n print('''\n Usage:\n -h, --help Show help informations and exit.\n -a, --auto Auto-Start Recording at start up.\n -v, --version Show version information and exit.\n -o, --opaque Use Opaque GUI.\n -b, --borderless No WM Borders.\n Run without parameters and arguments to use the GUI.\n ''')\n return sys.exit(1)\n elif o in ('-v', '--version'):\n print(__version__)\n return sys.exit(1)\n elif o in ('-o', '--opaque'):\n OPAQUE = False\n elif o in ('-b', '--borderless'):\n BORDER = False\n elif o in ('-a', '--auto'):\n AUTO = True\n # define our App\n app = QApplication(sys.argv)\n app.setApplicationName(__doc__)\n app.setOrganizationName(__author__)\n app.setOrganizationDomain(__author__)\n app.setStyle('Plastique')\n app.setStyle('Oxygen')\n # w is gonna be the mymainwindow class\n w = MyMainWindow(AUTO)\n # set the class with the attribute of translucent background as true\n if OPAQUE is True:\n w.setAttribute(Qt.WA_TranslucentBackground, True)\n # WM Borders\n if BORDER is False:\n w.setWindowFlags(w.windowFlags() | Qt.FramelessWindowHint)\n # run the class\n w.show()\n # if exiting the loop take down the app\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n ' Do NOT add anything here!, use main() function instead. '\n main()\n","sub_path":"cinta-testigo-radio.py","file_name":"cinta-testigo-radio.py","file_ext":"py","file_size_in_byte":46909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"361135727","text":"import urllib\nimport time\n\nfrom bs4 import BeautifulSoup\nfrom operator import itemgetter\n\nLIFEDATA = [78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 32, 31, 30, 29, 28, 27, 26, 26, 25, 24, 23, 22, 21, 21, 20, 19, 18, 18, 17, 16, 15, 15, 14, 13, 13, 12, 11, 11, 10, 10, 9, 8, 8, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]\n\n\n#returns num links with title\ndef findMovieLinks(title, num):\n\n formated = title.replace(' ','+')\n\n results = []\n url = \"http://www.imdb.com/find?q=\"+formated+\"&s=all\"\n page = BeautifulSoup(urllib.urlopen(url))\n \n \n\n for x in page.find(\"div\", {\"class\" : \"findSection\"}).find_all(\"tr\"):\n \n info = {}\n \n link = x.find(\"a\")[\"href\"].split('?')[0]\n link = \"http://www.imdb.com/\" + link + \"fullcredits?ref_=tt_cl_sm#cast\"\n \n info['link'] = link\n info['title'] = x.getText()\n info['pic'] = x.find(\"img\")['src']\n results.append(info)\n\n if len(results) >= num:\n break;\n return results\n\ndef findActorLinks(movieLink,maxActors):\n results = []\n page = BeautifulSoup(urllib.urlopen(movieLink))\n count = 0\n for x in page.find_all(\"tr\", {\"class\" : [\"odd\",\"even\"]}):\n \n if x.parent['class'][0] == 'cast_list':\n results.append([x.find(\"span\").get_text(),\"http://www.imdb.com/\" + x.find(\"a\")[\"href\"]])\n count = count + 1\n if count == maxActors:\n break\n return results\n\ndef getResults(actorLinks):\n\n results = {}\n actors = []\n\n deadCount = 0\n aliveCount = 0\n unkownCount = 0\n totalCount = 0\n deathlink = \"http://www.ssa.gov/OACT/STATS/table4c6.html\"\n deathpage = BeautifulSoup(urllib.urlopen(deathlink))\n\n allDead = 0\n currentYear = int(time.strftime(\"%Y\"))\n\n for x in actorLinks:\n info = {}\n info['name']= x[0]\n \n page = BeautifulSoup(urllib.urlopen(x[1]))\n\n try:\n birth = int(page.find(\"time\", {\"itemprop\" : \"birthDate\"}).find_all(\"a\")[1].get_text())\n info['birth']= birth\n \n\n try:\n death = int(page.find(\"time\", {\"itemprop\" : \"deathDate\"}).find_all(\"a\")[1].get_text())\n \n info['death'] = death\n info['status'] = \"dead\"\n info['deathPrediction'] = death\n deadCount = deadCount + 1\n totalCount = totalCount + 1\n if death > allDead:\n allDead = death\n \n \n except:\n info['death'] = \"alive\"\n info['status'] = \"alive\"\n age = int(currentYear) - birth\n yearsLeft = LIFEDATA[age]\n deathYear = currentYear + yearsLeft\n info['deathPrediction'] = deathYear\n aliveCount = aliveCount + 1\n totalCount = totalCount + 1\n if deathYear > allDead:\n allDead = deathYear\n\n \n \n except:\n info['birth']= \"unkown\"\n info['death']= \"unkown\"\n info['status']= \"unkown\"\n info['deathPrediction'] = 0\n unkownCount = unkownCount + 1\n totalCount = totalCount + 1\n\n actors.append(info)\n \n results['actors'] = actors\n\n results['statusCounts'] = {'dead': deadCount, 'alive': aliveCount, 'unkown':unkownCount, 'total': totalCount}\n if totalCount == 0: return results\n results['statusPercents'] = {'dead': deadCount/float(totalCount) * 100, 'alive': aliveCount/float(totalCount) * 100, 'unkown': unkownCount/float(totalCount) * 100}\n \n #Predicted year when cas is dead\n results['predictedAllDeadYear'] = allDead\n\n return results\n\n#maxActors is maximum number of actors returned\n#0 means unlimited actors\n#get link form findMovieLinks\ndef movieInfo(movieLink,maxActors):\n\n\n actorLinks = findActorLinks(movieLink,maxActors)\n\n data = getResults(actorLinks)\n\n \n return data\n\nif __name__ == \"__main__\":\n print(movieInfo(findMovieLinks(\"the good the bad and the ugly\", 10)[1]['link'],10))\n\n","sub_path":"movie_info.py","file_name":"movie_info.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"268024794","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nfrom scipy.optimize import curve_fit\nfrom scipy import interpolate\nfrom scipy.stats import norm\n\n\ndef years_from_1990(year, month):\n # print(month // 12)\n return year - 1992 + month // 12\n\n\n# indemnity stuff\n\ndf = pd.read_csv(\"Data/cleanedLoss.txt\", usecols=['Year', 'Month', 'Indemnity Amount'])\n\ndf['Time'] = years_from_1990(df['Year'], df['Month'])\ndf = df.loc[df['Time'] >= 0] # drop all rows before time we have loss data for\n\ndf = df.drop(columns=['Year', 'Month'])\n\ndf = df.groupby(['Time']).sum()\n\n# fill in missing rows, hard coded time steps\ndf = df.reindex(pd.RangeIndex(29)).ffill()\n\ntime_points = df.index.astype('int').to_numpy()\nloss = df['Indemnity Amount'].astype('float32').to_numpy()\n\n# precipitation stuff\n\ndf2 = pd.read_csv('Data/MinnesotaMonthlyPDSI.csv')\n\n# add 12 months to model delay as the impact of drought/excess water seems to take a year to take effect\ndf2['Time'] = years_from_1990(df2['Date'] // 100, (df2['Date'] % 100) + 12)\ndf2 = df2.groupby(['Time'], as_index=False).mean()\n\ndf2 = df2.loc[df2['Time'] >= 0] # drop all rows before time we have loss data for\ndf2 = df2.set_index('Time')\nprecipitation = df2['Value'].astype('float32').to_numpy()\n\nprecipitation = precipitation[:-1] # remove extra due to added\n\nprint('pMean:', precipitation.mean())\n\n# temperature stuff\n\ndf3 = pd.read_csv('Data/MinnesotaMonthlyTemperature.csv')\ndf3['Time'] = years_from_1990(df3['Date'] // 100, (df3['Date'] % 100) + 12)\ndf3 = df3.groupby(['Time'], as_index=False).mean()\n\ndf3 = df3.loc[df3['Time'] >= 0] # drop all rows before time we have loss data for\ndf3 = df3.set_index('Time')\ntemperature = df3['Value'].astype('float32').to_numpy()\n\ntemperature = temperature[:-1]\n\nassert (len(time_points) == len(loss) == len(precipitation) == len(temperature))\n\n\n# we have:\n# time, loss, precipitation, temperature\n\n\n# vector norm\ndef normalize(v):\n norm = np.linalg.norm(v)\n if norm == 0:\n return v\n return v / norm\n\n\ndef precipitation_extremity(x, lower_bound):\n # y = np.where(x < lower_bound, lower_bound - x, 0)\n # z = np.where(x > upper_bound, x - upper_bound, 0)\n # return y ** 2 + z ** 2\n return (x - lower_bound) ** 2\n\n\ndef temp_extremity(x, ideal):\n # y = np.where(x < ideal, ideal - x, 0)\n # return y ** 2\n return (x - ideal) ** 2\n\n\ndef f_predicted_loss(time, ideal_precipitation, ideal_temperature,\n precipitation_weight,\n temperature_weight):\n return precipitation_weight * precipitation_extremity(precipitation(time), ideal_precipitation) \\\n + temperature_weight * temp_extremity(temperature(time), ideal_temperature)\n\n\n# scale variables to aid learning\n\nloss_scale = np.linalg.norm(loss)\nprecipitation_scale = np.linalg.norm(precipitation)\ntemperature_scale = np.linalg.norm(temperature)\n\nloss = loss / loss_scale\nprecipitation = precipitation / precipitation_scale\ntemperature = temperature = temperature / temperature_scale\n\nprint(loss_scale)\nprint(precipitation_scale)\nprint(temperature_scale)\n\n# save mean temperature for use with initial values\n\nmeanTemp = temperature.mean()\n\n# interpolation to make continuous to allow scipy optimize\n\nprecipitation = interpolate.interp1d(time_points, precipitation)\ntemperature = interpolate.interp1d(time_points, temperature)\n\nplt.plot(time_points, precipitation(time_points), label='precip')\nplt.plot(time_points, temperature(time_points), label='temp')\nplt.plot(time_points, loss, label='loss')\nplt.legend()\n# plt.show()\n# exit()\n\n# plt.plot(time, temperature(time))\n# plt.show()\n\n# print(meanTemp)\n# params = [0, meanTemp, 1, 1]\nparams, cov = curve_fit(f_predicted_loss, time_points, loss, p0=[0.19873167, -0.05463327, 2.23379107, 0.67416837],\n maxfev=10000)\n\n# print(f_predicted_loss(time_points[0], params[0], params[1], params[2], params[3]))\n\nprint('params:', params)\nprint(cov)\n\nparam_deviation = np.sqrt(np.diagonal(cov))\n\nparam_confidence = param_deviation * norm.ppf(.9)\n\nprint(param_deviation)\nprint(param_confidence)\n\nprint(param_confidence[0] * precipitation_scale)\nprint(param_confidence[1] * temperature_scale)\n\nprint()\n\n# plt.plot(time_points, params[2] * precipitation_extremity(precipitation(time_points), params[0])\n# label='precip')\n# plt.plot(time_points, params[3] * temp_extremity(temperature(time_points), params[1]), label='temp')\n\nplt.figure(2)\n\nplt.plot(time_points, loss * loss_scale, label='loss')\n\nplt.plot(time_points, f_predicted_loss(time_points, *params) * loss_scale,\n label='predict')\nplt.title('Predicted Loss vs. Actual Loss')\nplt.xlabel('Years from 1990')\nplt.ylabel('Predicted and Actual Loss in Dollars')\n\nbottom, top = plt.ylim()\n# plt.ylim(0, top * loss_scale)\nplt.legend()\n# plt.show()\n\n\nplt.show()\n","sub_path":"Code/Other Code/lossAnalysisAnnualShifted.py","file_name":"lossAnalysisAnnualShifted.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"145526100","text":"from django.utils.timezone import utc\nfrom django.conf import settings\nimport contextlib\nimport os.path\nimport tempfile\nimport os\nimport fcntl\nimport select\nimport datetime\n\nutcnow_mock_value = None\n\ndef utcnow():\n if utcnow_mock_value:\n return utcnow_mock_value\n elif settings.MMIXER_MOCK_CURTIME:\n return settings.MMIXER_MOCK_CURTIME\n else:\n return datetime.datetime.utcnow().replace(tzinfo=utc)\n\n@contextlib.contextmanager\ndef test_override_now(dt):\n global utcnow_mock_value\n if dt.tzinfo is None: dt = dt.replace(tzinfo=\"utc\")\n old = utcnow_mock_value\n utcnow_mock_value = dt\n yield\n utcnow_mock_value = old\n\nclass atomic_writer(object):\n \"\"\"\n Atomically write to a file\n \"\"\"\n def __init__(self, fname, mode, osmode=0o644, sync=True, **kw):\n self.fname = fname\n self.osmode = osmode\n self.sync = sync\n dirname = os.path.dirname(self.fname)\n self.fd, self.abspath = tempfile.mkstemp(dir=dirname, text=\"b\" not in mode)\n self.outfd = open(self.fd, mode, closefd=True, **kw)\n\n def __enter__(self):\n return self.outfd\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is None:\n self.outfd.flush()\n if self.sync: os.fdatasync(self.fd)\n os.fchmod(self.fd, self.osmode)\n os.rename(self.abspath, self.fname)\n else:\n os.unlink(self.abspath)\n self.outfd.close()\n return False\n\ndef stream_output(proc):\n \"\"\"\n Take a subprocess.Popen object and generate its output, line by line,\n annotated with \"stdout\" or \"stderr\". At process termination it generates\n one last element: (\"result\", return_code) with the return code of the\n process.\n \"\"\"\n fds = [proc.stdout, proc.stderr]\n bufs = [b\"\", b\"\"]\n types = [\"stdout\", \"stderr\"]\n # Set both pipes as non-blocking\n for fd in fds:\n fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)\n # Multiplex stdout and stderr with different prefixes\n while len(fds) > 0:\n s = select.select(fds, (), ())\n for fd in s[0]:\n idx = fds.index(fd)\n buf = fd.read()\n if len(buf) == 0:\n fds.pop(idx)\n if len(bufs[idx]) != 0:\n yield types[idx], bufs.pop(idx)\n types.pop(idx)\n else:\n bufs[idx] += buf\n lines = bufs[idx].split(b\"\\n\")\n bufs[idx] = lines.pop()\n for l in lines:\n yield types[idx], l\n res = proc.wait()\n yield \"result\", res\n\ndef serialize_ts(ts):\n if ts.tzinfo != utc:\n ts = ts.astimezone(utc)\n return ts.strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef deserialize_ts(ts):\n dt = datetime.datetime.strptime(ts, \"%Y-%m-%d %H:%M:%S\")\n return dt.replace(tzinfo=utc)\n\n\ndef _maybe_bytes_to_str(s):\n \"\"\"\n If s is a bytes instance, return str decoding it from utf8, else return s\n \"\"\"\n if isinstance(s, bytes):\n return s.decode(\"utf-8\")\n return s\n\nclass CSVProduct(object):\n def __init__(self, fname):\n self.fname = fname\n self.keys = []\n\n def read(self):\n import csv\n with open(self.fname) as fd:\n reader = csv.reader(fd)\n for idx, row in enumerate(reader):\n if idx == 0:\n self.keys = row\n continue\n yield dict(zip(self.keys, row))\n\n def read_as_dict(self):\n return { (float(rec[\"Latitude\"]), float(rec[\"Longitude\"])): rec[\"Value\"] for rec in self.read() }\n\n def copy(self, outfd, row_hook=None):\n import csv\n writer = csv.writer(outfd, lineterminator=\"\\n\")\n for idx, r in enumerate(self.read()):\n if idx == 0:\n writer.writerow(self.keys)\n if row_hook is not None:\n row_hook(r)\n writer.writerow([r[k] for k in self.keys])\n","sub_path":"worksession/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"435444567","text":"from __future__ import absolute_import\n\nimport csv\nimport datetime\nimport hashlib\nimport logging\nfrom multiprocessing import Process\nimport shlex\nimport string\nimport struct\n\nfrom django.db import models, transaction\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.timezone import now\nfrom django.template.defaultfilters import slugify\n\nfrom suds.client import Client\nimport jsonfield\nimport xlrd\nfrom model_utils.managers import InheritanceManager\n\nfrom .literals import DEFAULT_FIRST_ROW_NAMES, DEFAULT_LIMIT, DEFAULT_SHEET\n\nHASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest()\nlogger = logging.getLogger(__name__)\n\n\nclass Source(models.Model):\n name = models.CharField(max_length=128, verbose_name=_('name'), help_text=('Human readable name for this source.'))\n slug = models.SlugField(blank=True, max_length=48, verbose_name=_('slug'), help_text=('URL friendly description of this source. If none is specified the name will be used.'))\n\n objects = InheritanceManager()\n\n def get_type(self):\n return self.__class__.source_type\n\n def __unicode__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n super(Source, self).save(*args, **kwargs)\n\n @models.permalink\n def get_absolute_url(self):\n return ('resource_get_all', [self.slug])\n\n class Meta:\n verbose_name = _('source')\n verbose_name_plural = _('sources')\n\n\nclass SourceWS(Source):\n source_type = _('SOAP web service')\n wsdl_url = models.URLField(verbose_name=_('WSDL URL'))\n\n def get_parameters(self, parameters=None):\n result = parameters.copy()\n\n for argument in self.wsargument_set.all():\n if argument.name not in result:\n if argument.default:\n result[argument.name] = argument.default\n\n return result\n\n def get_one(self, id, timestamp=None, parameters=None):\n # ID are all base 1\n if id == 0:\n raise Http404\n\n return self.get_all(timestamp, parameters)[id-1]\n\n def get_all(self, timestamp=None, parameters=None):\n if not parameters:\n parameters = {}\n\n client = Client(self.wsdl_url)\n\n result = []\n try:\n for i in client.service.getEstablishments(**self.get_parameters(parameters))[0]:\n entry = {}\n for field in self.wsresultfield_set.all():\n entry[field.name] = getattr(i, field.name, field.default)\n\n result.append(entry)\n except IndexError:\n result = []\n\n return result\n\n class Meta:\n verbose_name = _('web service source')\n verbose_name_plural = _('web service sources')\n\n\nclass WSArgument(models.Model):\n source_ws = models.ForeignKey(SourceWS, verbose_name=_('web service source'))\n name = models.CharField(max_length=32, verbose_name=_('name'))\n default = models.CharField(max_length=32, blank=True, verbose_name=_('default'))\n\n class Meta:\n verbose_name = _('web service argument')\n verbose_name_plural = _('web service arguments')\n\n\nclass WSResultField(models.Model):\n source_ws = models.ForeignKey(SourceWS, verbose_name=_('web service source'))\n name = models.CharField(max_length=32, verbose_name=_('name'))\n default = models.CharField(max_length=32, blank=True, verbose_name=_('default'))\n\n class Meta:\n verbose_name = _('web service result field')\n verbose_name_plural = _('web service result fields')\n\n\nclass SourceFileBased(Source):\n limit = models.PositiveIntegerField(default=DEFAULT_LIMIT, verbose_name=_('limit'), help_text=('Maximum number of items to show when all items are requested.'))\n path = models.TextField(blank=True, null=True, verbose_name=_('path to file'), help_text=('Location to a file in the filesystem.'))\n file = models.FileField(blank=True, null=True, upload_to='spreadsheets', verbose_name=_('uploaded file'))\n column_names = models.TextField(blank=True, verbose_name=_('column names'), help_text=('Specify the column names to use. Enclose names with quotes and separate with commas.'))\n\n def get_column_names(self):\n \"\"\"\n Split column names by comma but obeying quoted names\n \"\"\"\n if self.column_names:\n result = []\n for number, token in enumerate(shlex.split(self.column_names)):\n if number < len(shlex.split(self.column_names)) - 1:\n result.append(token[:-1])\n else:\n result.append(token)\n return result\n else:\n return string.ascii_uppercase\n\n def check_file(self):\n if self.path:\n try:\n with open(self.path) as handle:\n new_hash = HASH_FUNCTION(handle.read())\n except IOError as exception:\n logger.error('Unable to open file for source id: %s ;%s' % (self.id, exception))\n raise\n else:\n new_hash = HASH_FUNCTION(self.file.read())\n self.file.seek(0)\n try:\n source_data_version = self.sourcedataversion_set.get(checksum=new_hash)\n except SourceDataVersion.DoesNotExist:\n source_data_version = SourceDataVersion.objects.create(source=self, checksum=new_hash)\n p = Process(target=self.import_data, args=(source_data_version,))\n p.start()\n logger.debug('launching subprocess: %s' % p)\n else:\n source_data_version.active = True\n source_data_version.save()\n\n def get_one(self, id, timestamp=None, parameters=None):\n # TODO: return a proper response when no sourcedataversion is found\n if timestamp:\n source_data_version = self.sourcedataversion_set.get(timestamp=timestamp)\n else:\n source_data_version = self.sourcedataversion_set.get(active=True)\n\n return SourceData.objects.get(source_data_version=source_data_version, row_id=id).row\n\n def get_all(self, timestamp=None, parameters=None):\n try:\n if timestamp:\n source_data_version = self.sourcedataversion_set.get(timestamp=timestamp)\n else:\n source_data_version = self.sourcedataversion_set.get(active=True)\n except SourceDataVersion.DoesNotExist:\n return []\n\n return [item.row for item in SourceData.objects.filter(source_data_version=source_data_version)[0:self.limit]]\n\n class Meta:\n abstract = True\n\n\nclass SourceCSV(SourceFileBased):\n source_type = _('CSV file')\n\n first_row_names = models.BooleanField(default=DEFAULT_FIRST_ROW_NAMES, verbose_name=_('first row names'), help_text=('Use the values of the first row as the column names.'))\n delimiter = models.CharField(blank=True, max_length=1, default=',', verbose_name=_('delimiter'))\n quote_character = models.CharField(blank=True, max_length=1, verbose_name=_('quote character'))\n\n def _get_items(self):\n column_names = self.get_column_names()\n\n kwargs = {}\n if self.delimiter:\n kwargs['delimiter'] = str(self.delimiter)\n if self.quote_character:\n kwargs['quotechar'] = str(self.quote_character)\n\n reader = csv.reader(self._file_handle, **kwargs)\n\n if self.first_row_names:\n column_names = reader.next()\n\n for row in reader:\n yield dict(zip(column_names, row))\n\n @transaction.commit_on_success\n def import_data(self, source_data_version):\n # Reload data in case this is executed in another thread\n source_data_version = SourceDataVersion.objects.get(pk=source_data_version.pk)\n\n if self.path:\n self._file_handle = open(self.path)\n else:\n self._file_handle = self.file\n\n row_id = 1\n for row in self._get_items():\n SourceData.objects.create(source_data_version=source_data_version, row_id=row_id, row=row)\n row_id = row_id + 1\n\n self._file_handle.close()\n\n source_data_version.ready = True\n source_data_version.active = True\n source_data_version.save()\n\n class Meta:\n verbose_name = _('CSV source')\n verbose_name_plural = _('CSV sources')\n\n\nclass SourceFixedWidth(SourceFileBased):\n source_type = _('Fixed width column file')\n\n first_row_names = models.BooleanField(default=DEFAULT_FIRST_ROW_NAMES, verbose_name=_('first row names'), help_text=('Use the values of the first row as the column names.'))\n column_widths = models.TextField(blank=True, null=True, verbose_name=_('column widths'), help_text=_('The column widths separated by a comma.'))\n\n def _get_items(self):\n column_names = self.get_column_names()\n\n fmtstring = ''.join('%ds' % f for f in map(int, self.column_widths.split(',')))\n parse = struct.Struct(fmtstring).unpack_from\n\n if self.first_row_names:\n column_names = map(string.strip, parse(self._file_handle.readline()))\n\n for row in self._file_handle.readlines():\n yield dict(zip(column_names, map(string.strip, parse(row))))\n\n @transaction.commit_on_success\n def import_data(self, source_data_version):\n # Reload data in case this is executed in another thread\n source_data_version = SourceDataVersion.objects.get(pk=source_data_version.pk)\n\n if self.path:\n self._file_handle = open(self.path)\n else:\n self._file_handle = self.file\n\n row_id = 1\n for row in self._get_items():\n SourceData.objects.create(source_data_version=source_data_version, row_id=row_id, row=row)\n row_id = row_id + 1\n\n self._file_handle.close()\n\n source_data_version.ready = True\n source_data_version.active = True\n source_data_version.save()\n\n class Meta:\n verbose_name = _('Fixed width source')\n verbose_name_plural = _('Fixed width sources')\n\n\nclass SourceSpreadsheet(SourceFileBased):\n source_type = _('Spreadsheet file')\n\n sheet = models.CharField(max_length=32, default=DEFAULT_SHEET, verbose_name=_('sheet'), help_text=('Worksheet of the spreadsheet file to use.'))\n first_row_names = models.BooleanField(default=DEFAULT_FIRST_ROW_NAMES, verbose_name=_('first row names'), help_text=('Use the values of the first row as the column names.'))\n\n def _convert_value(self, item):\n \"\"\"\n Handle different value types for XLS. Item is a cell object.\n \"\"\"\n # Thx to Augusto C Men to point fast solution for XLS/XLSX dates\n if item.ctype == 3: #XL_CELL_DATE:\n try:\n return datetime.datetime(*xlrd.xldate_as_tuple(item.value, self._book.datemode))\n except ValueError:\n # TODO: make togglable\n # Invalid date\n return item.value\n\n if item.ctype == 2: #XL_CELL_NUMBER:\n if item.value % 1 == 0: # integers\n return int(item.value)\n else:\n return item.value\n\n return item.value\n\n def _get_items(self):\n column_names = self.get_column_names()\n\n if self.first_row_names:\n column_names = [cell.value for cell in self._sheet.row(0)]\n\n for i in range(1 if self.first_row_names else 0, self._sheet.nrows):\n #values = [self.convert_value(cell) for cell in self._sheet.row(i)]\n #if not any(values):\n # continue # empty lines are ignored\n #yield values\n\n result = {}\n column_count = 0\n\n for cell in self._sheet.row(i):\n result[column_names[column_count]] = self._convert_value(cell)\n column_count += 1\n\n yield result\n\n @transaction.commit_on_success\n def import_data(self, source_data_version):\n # Reload data in case this is executed in another thread\n source_data_version = SourceDataVersion.objects.get(pk=source_data_version.pk)\n\n logger.debug('opening workbook')\n if self.path:\n self._book = xlrd.open_workbook(self.path)\n file_handle = None\n else:\n file_handle = self.file\n self._book = xlrd.open_workbook(file_contents=file_handle.read())\n\n logger.debug('opening sheet: %s' % self.sheet)\n try:\n self._sheet = self._book.sheet_by_name(self.sheet)\n except xlrd.XLRDError:\n self._sheet = self._book.sheet_by_index(int(self.sheet))\n\n logger.debug('importing rows')\n row_id = 1\n for row in self._get_items():\n SourceData.objects.create(source_data_version=source_data_version, row_id=row_id, row=row)\n row_id = row_id + 1\n\n if file_handle:\n file_handle.close()\n\n logger.debug('finished importing rows')\n\n source_data_version.ready = True\n source_data_version.active = True\n source_data_version.save()\n logger.debug('exiting')\n\n class Meta:\n verbose_name = _('spreadsheet source')\n verbose_name_plural = _('spreadsheet sources')\n\n\nclass SourceDataVersion(models.Model):\n source = models.ForeignKey(Source, verbose_name=_('source'))\n datetime = models.DateTimeField(default=lambda: now())\n timestamp = models.CharField(blank=True, max_length=20, verbose_name=_('timestamp'))\n checksum = models.TextField(verbose_name=_('checksum'))\n ready = models.BooleanField(default=False, verbose_name=_('ready'))\n active = models.BooleanField(default=False, verbose_name=_('active'))\n\n def save(self, *args, **kwargs):\n self.timestamp = datetime.datetime.strftime(self.datetime, '%Y%m%d%H%M%S%f')\n if self.active:\n SourceDataVersion.objects.filter(source=self.source).update(active=False)\n super(self.__class__, self).save(*args, **kwargs)\n\n class Meta:\n verbose_name = _('source data version')\n verbose_name_plural = _('sources data versions')\n unique_together = (('source', 'datetime'), ('source', 'timestamp'), ('source', 'checksum'))\n\n\nclass SourceData(models.Model):\n source_data_version = models.ForeignKey(SourceDataVersion, verbose_name=_('source data version'))\n row = jsonfield.JSONField(verbose_name=_('row'))\n row_id = models.PositiveIntegerField(verbose_name=_('row id'), db_index=True)\n\n def __unicode__(self):\n return unicode(self.row)\n\n class Meta:\n verbose_name = _('source data')\n verbose_name_plural = _('sources data')\n\n","sub_path":"libre/apps/data_drivers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"618764451","text":"import logging\nimport time\nfrom decimal import Decimal\nfrom math import exp\nfrom math import sin\n\nfrom step_one import dictionary\n\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nstart = time.time()\n\n# формулы\n# 1. Низшая теплота сгорания газа принятого состава Qн:\n\nqn = Decimal((Decimal('0.01') * (Decimal(dictionary.v1) * Decimal(dictionary.c6CH4) + Decimal(dictionary.v2) * Decimal(\n dictionary.c6CO2))) / Decimal('1000000')) # Мдж/м3\n\nqn = round(qn, 2)\nlogger.info('Низшая теплота сгорания газа принятого состава, %s Мдж/м3', qn)\n\n# 2. Теоретически необходимое для горения количество воздуха, м3/м3:\n\nV0 = Decimal((Decimal('0.01') * (Decimal(dictionary.v1) * Decimal(dictionary.c7CH4) + Decimal(dictionary.v2) * Decimal(\n dictionary.c7CO2)))) # м3/м3\n\nV0 = round(V0, 2)\nlogger.info('Теоретически необходимое для горения количество воздуха, %s м3/м3', V0)\n\n# 3. Абсолютная плотность газа принятого состава, кг/м3:\n\nrg = Decimal((Decimal('0.01') * (Decimal(dictionary.v1) * Decimal(dictionary.c4CH4) + Decimal(dictionary.v2) * Decimal(\n dictionary.c4CO2)))) # кг/м3\n\nrg = round(rg, 2)\nlogger.info('Абсолютная плотность газа принятого состава, %s кг/м3', rg)\n\n# 4. Относительная плотность газа принятого состава:\n\nS = Decimal((Decimal('0.01') * (Decimal(dictionary.v1) * Decimal(dictionary.c5CH4) + Decimal(dictionary.v2) * Decimal(\n dictionary.c5CO2))))\n\nS = round(S, 2)\nlogger.info('Относительная плотность газа принятого состава %s', S)\n\n# 5. Газовая постоянная газа принятого состава, Дж/(кг*К):\n\nRg = Decimal('831451') / (Decimal(dictionary.v1) * Decimal(dictionary.c1CH4) + Decimal(dictionary.v2) *\n Decimal(dictionary.c1CO2)) # Дж/(кг*К)\n\nRg = round(Rg, 2)\nlogger.info('Газовая постоянная газа принятого состава, %s Дж/(кг*К)', Rg)\n\n# 6. Показатель адиабаты газа принятого состава:\n\nkg = Decimal((Decimal('0.01') * (Decimal(dictionary.v1) * Decimal(dictionary.c3CH4) + Decimal(dictionary.v2) * Decimal(\n dictionary.c3CO2))))\n\nkg = round(kg, 2)\nlogger.info('Показатель адиабаты газа принятого состава %s', kg)\n\n# 7. Изобраная теплоёмкость газа кДж/(кг * К):\n\nCp = (Decimal('1.695') + Decimal('0.001838') * (Decimal('273') + Decimal(dictionary.v6)) + Decimal('1960000') *\n ((Decimal(dictionary.v3) - Decimal('0.1')) / (Decimal('273') + Decimal(dictionary.v6)))) ** 3\n\nCp = round(Cp, 2)\nlogger.info('Изобраная теплоёмкость газа %s кДж/(кг * К)', Cp)\n\n# 8. Производительность газовой горелки при заданной мощности м3 / с:\n\nQ1 = Decimal('3.6') * Decimal(Decimal(dictionary.v4) / Decimal('1000')) / Decimal(qn)\n\nQ1 = round(Q1, 2)\nlogger.info('Производительность газовой горелки при заданной мощности %s м3 / с', Q1)\n\n# 9. неоходимая суммарная площадь выходных отверстий головки горелки см2:\n\n# А) Для отверстия круглого сечения:\n\nFod = Decimal(Decimal('0.01') * Decimal(dictionary.v14) * Decimal('3.14159') * Decimal(Decimal(dictionary.v8)) *\n Decimal(Decimal(dictionary.v8))) / Decimal('4')\n\nFod = round(Fod, 7)\nlogger.info('неоходимая суммарная площадь выходных отверстий головки горелки %s см2 (для круглого сечения)', Fod)\n\n# Б) Для отверстия прямоугольного сечения:\n\nForc = Decimal('0.01') * Decimal(dictionary.v14) * Decimal(dictionary.v10) * Decimal(dictionary.v12)\n\nForc = round(Forc, 7)\nlogger.info('неоходимая сумм. площадь выходных отверстий головки горелки %s см2 (для прямоуголного сечения)', Forc)\n\n# 10. Диаметр поперечного сечения головки горелки, мм:\n\n# 1) Для круглых отверстий:\n\nDgol1 = (Decimal(Decimal('4') - Decimal(dictionary.v8))) * Decimal(dictionary.v8) / Decimal(sin(0.1))\n\nDgol1 = round(Dgol1, 3)\nlogger.info('Диаметр поперечного сечения головки горелки, %s мм (для круглых отверстий)', Dgol1)\n# Диаметр головки должен быть от 3,5 до 100\n\n# 1) Для прямоуголных отверстий:\n\nDgol2 = (Decimal(Decimal('4') - Decimal(dictionary.v10))) * Decimal(dictionary.v8) / Decimal(sin(0.1))\n\nDgol2 = round(Dgol2, 3)\nlogger.info('Диаметр поперечного сечения головки горелки, %s мм (для прямоуголных отверстий)', Dgol2)\n# Диаметр головки должен быть от 3,5 до 100\n\n# 11. Скорость выхода газовоздушной смеси из огневых отверстий головки горел��и м/с:\n\nU_0 = (Decimal(Q1) * (Decimal('1') + Decimal(dictionary.v5) * Decimal(V0))) / Decimal(Fod)\n\nU_0 = round(U_0, 2)\nlogger.info('Скорость выхода газовоздушной смеси из огневых отверстий головки горелки %s м/с', U_0)\n\n# 12. Скорость истечения газа из сопла, м/с:\n\nU_s = Decimal(dictionary.v16) * (\n ((Decimal('2') * Decimal(kg) * Decimal(Rg) * (Decimal('273') + Decimal(dictionary.v6))) /\n (Decimal(kg) - Decimal('1'))) * (\n Decimal('1') - (Decimal(dictionary.v7) / (Decimal(dictionary.v7) + Decimal(dictionary.v3))) **\n ((Decimal(kg) - Decimal('1')) / Decimal(kg)))) ** Decimal('0.5')\n\nU_s = round(U_s, 2)\nlogger.info('скорость истечения газа из сопла, %s м/с', U_s)\n\n# 13. Площадь сечения сопла см2:\n\nFc = (Decimal(Q1) / (Decimal('0.36') * Decimal(U_s)))\n\nFc = round(Fc, 2)\nlogger.info('Площадь сечения сопла %s см2', Fc)\n\n# 14. Диаметр сечения сопла, мм:\n\nd_c = Decimal(1000) * ((Decimal(4) * Decimal(Q1)) / (Decimal(3600) * Decimal(3.14159) * Decimal(U_s)))\n\nd_c = round(d_c, 4)\nlogger.info('Диаметр сечения сопла, %s мм', d_c)\n\n\n# 15. Оптимальные размеры теплового рассекателя\ndef calc():\n delta = 1\n diamter = 6\n height = 6\n kpdtemp1 = 0\n diamtert1 = 0\n heightt1 = 0\n while diamter < 12:\n while height < 24:\n kpd = 36.3091752883329 + (2.66579338833343 * diamter) + (0.613323721222232 * height) - (\n 0.118982154166671 * diamter ** 2) - \\\n (0.0281706407222229 * diamter * height) - (0.0102175696759261 * height ** 2)\n\n co = 707.783333333346 - (62.9041666666692 * diamter) - (20.7408333333336 * height) + (\n 2.48958333333345 * diamter ** 2) + (\n 1.39388888888891 * diamter * height) + (0.302083333333337 * height ** 2)\n # logger.info(\"kpd is \" + str(kpd) + \" co is \" + str(co))\n if kpdtemp1 < kpd and co < 360:\n kpdtemp1 = kpd\n diamtert1 = diamter\n heightt1 = height\n height += delta\n height = 6\n diamter += delta\n # return kpdtemp1, diamtert1, heightt1\n\n delta = 0.1\n diamter = diamtert1 - 1\n height = heightt1 - 1\n kpdtemp2 = 0\n diamtert2 = 0\n heightt2 = 0\n while diamter < diamtert1 + 1:\n while height < heightt1 + 1:\n kpd = 36.3091752883329 + (2.66579338833343 * diamter) + (0.613323721222232 * height) - (\n 0.118982154166671 * diamter ** 2) - \\\n (0.0281706407222229 * diamter * height) - (0.0102175696759261 * height ** 2)\n\n co = 707.783333333346 - (62.9041666666692 * diamter) - (20.7408333333336 * height) + (\n 2.48958333333345 * diamter ** 2) + (\n 1.39388888888891 * diamter * height) + (0.302083333333337 * height ** 2)\n if kpdtemp2 < kpd and co < 360:\n kpdtemp2 = kpd\n diamtert2 = diamter\n heightt2 = height\n height += delta\n height = heightt1 - 1\n diamter += delta\n\n delta = 0.01\n diamter = diamtert2 - 0.1\n height = heightt2 - 0.1\n kpdtemp3 = 0\n diamtert3 = 0\n heightt3 = 0\n while diamter < diamtert2 + 0.1:\n while height < heightt1 + 0.1:\n kpd = 36.3091752883329 + (2.66579338833343 * diamter) + (0.613323721222232 * height) - (\n 0.118982154166671 * diamter ** 2) - \\\n (0.0281706407222229 * diamter * height) - (0.0102175696759261 * height ** 2)\n\n co = 707.783333333346 - (62.9041666666692 * diamter) - (20.7408333333336 * height) + (\n 2.48958333333345 * diamter ** 2) + (\n 1.39388888888891 * diamter * height) + (0.302083333333337 * height ** 2)\n if kpdtemp3 < kpd and co < 360:\n kpdtemp3 = kpd\n diamtert3 = diamter\n heightt3 = height\n height += delta\n height = heightt2 - 0.1\n diamter += delta\n return kpdtemp3, diamtert3, heightt3\n\n\ncalc = calc()\n\nd_rass = calc[1] # диаметр основания рассекателя\nh_rass = calc[2] # высота рассекателя\nlogger.info('диаметр основания рассекателя, %s мм', d_rass)\nlogger.info('высота рассекателя, %s мм', h_rass)\n\n# 16. Площадь внутренней поверхности крышки, м2:\n\nFtepl = Decimal(0.000001) * Decimal(3.14159 / 4) * \\\n Decimal(Decimal(Dgol1) ** Decimal(2) - Decimal(d_rass) ** Decimal(2) + Decimal(2) * Decimal(d_rass) *\n Decimal((Decimal(h_rass) ** Decimal(2)) + (Decimal(d_rass) ** Decimal(2)) / Decimal(4)) ** Decimal(0.5))\n\nFtepl = round(Ftepl, 10)\nlogger.info('Площадь внутренней поверхности крышки, %s м2', Ftepl)\n\n# 17. Массовый расход газовоздушной смеси, кг/с:\n\nGpot = Decimal(U_s) * Decimal(Fc) * Decimal(Decimal(rg) + Decimal(dictionary.v5) * Decimal(V0) *\n Decimal(dictionary.c1air))\n\nGpot = round(Gpot, 2)\nlogger.info('Массовый расход газовоздушной смеси, %s кг/с', Gpot)\n\n# 18. Конечная температура газовоздушной смеси на выходе из огневых отверстий горелки, С:\n\nt_k = Decimal(dictionary.v18) - Decimal(Decimal(dictionary.v18) - Decimal(dictionary.v6)) * \\\n Decimal(exp(- ((Decimal(Ftepl) * Decimal(dictionary.v19)) / (Decimal(Gpot) * Decimal(Cp)))))\n\nt_k = round(t_k, 2)\nlogger.info('Конечная температура газовоздушной смеси на выходе из огневых отверстий горелки, С: %s', t_k)\n\n# 19. Коэффициент сопротивления отверстий головки горелки:\n\ndjeta = Decimal(Decimal(1) - Decimal(dictionary.v15) ** Decimal(2)) / (Decimal(dictionary.v15) ** Decimal(2))\n\ndjeta = round(djeta, 2)\nlogger.info('Коэффициент сопротивления отверстий головки горелки %s', djeta)\n\n# 20. Коэффициент потерь энергии в головке горелки:\n\nK1 = Decimal(djeta) + Decimal(2) * (Decimal(t_k) + Decimal(273)) / Decimal(273) - Decimal(1)\n\nK1 = round(K1, 2)\nlogger.info('Коэффициент потерь энергии в головке горелки %s', K1)\n\n# 21. Оптимальное значение параметра горелки:\n\nFopt = (Decimal(dictionary.v17) / Decimal(K1)) ** Decimal(0.5)\n\nFopt = round(Fopt, 2)\nlogger.info('Оптимальное значение параметра горелки %s', Fopt)\n\n# 22. Коэффициент объемной эжекции:\n\nus = Decimal(dictionary.v5) * Decimal(V0)\n\nus = round(us, 2)\nlogger.info('Коэффициент объемной эжекции %s', us)\n\n# 23. Коэффициент массовой эжекции:\n\nu = Decimal(dictionary.v5) * Decimal(V0) / Decimal(S)\n\nu = round(u, 2)\nlogger.info('Коэффициент массовой эжекции %s', u)\n\n# Если А < 1, то горелка работает в неоптимальном режиме:\n# 24. Параметр А:\n\nA1 = (Decimal(K1) * (Decimal(1) + Decimal(u)) * (Decimal(1) + Decimal(us)) * Decimal(Fc) * Decimal(Fopt)) / Decimal(Fod)\n\nA1 = round(A1, 2)\nlogger.info('Параметр А %s', A1)\n\n\n# 25. Наименьший корень квадратного уравнения:\n\n\ndef sqr():\n import math\n b = -2\n a = float(A1)\n c = a\n # logger.info('a ** 2 * x - b * x + c = 0')\n discr = b ** 2 - 4 * a * c\n # logger.info(discr)\n if discr > 0:\n x1 = (-b + math.sqrt(discr)) / (2 * a)\n x2 = (-b - math.sqrt(discr)) / (2 * a)\n return x1, x2\n elif discr == 0:\n x3 = -b / (2 * a)\n return x3\n else:\n return logger.info('Корней нет')\n\n\nsqr = sqr()\n\nx = min(sqr)\nx = round(x, 4)\nlogger.info('x = %s', x)\n\n# 26. Параметр смесителя при неоптимальном режиме:\n\nF_1 = Decimal(x) * Decimal(Fopt)\n\nF_1 = round(F_1, 2)\nlogger.info('Параметр смесителя при неоптимальном режиме %s', F_1)\n\n# 27. Площадь горловины смесителя, см2:\n\nF_gor = Decimal(F_1) * Decimal(Fod)\n\nF_gor = round(F_gor, 2)\nlogger.info('Площадь горловины смесителя, %s см2', F_gor)\n\n# 28. Диаметр горловины смесителя, см2:\n\ndg = Decimal(10) * ((Decimal(4) * Decimal(F_gor)) / Decimal(3.14159)) ** Decimal(0.5)\n\ndg = round(dg, 2)\nlogger.info('Диаметр горловины смесителя, %s см2', dg)\n\n# Если А = 1, то горелка работает в оптимальном режиме:\n# 29. Площадь горловины смесителя, см2, если А = 1:\n\nF_gor_opt = Decimal(Fopt) * Decimal(Fod)\n\nF_gor_opt = round(F_gor_opt, 2)\nlogger.info('Площадь горловины смесителя оптимальная %s см2', F_gor_opt)\n\n# 30. Диаметр горловины смесителя, см2 если А = 1:\n\ndg_opt = Decimal('10') * ((Decimal('4') * Decimal(F_gor_opt)) / Decimal('3.14159')) ** Decimal('0.5')\n\ndg_opt = round(dg_opt, 2)\nlogger.info('Диаметр горловины смесителя оптимальный, %s см2', dg_opt)\n\nend = time.time()\nlogger.info(end - start)\n","sub_path":"step_one/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":15205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"295438459","text":"'''\r\nCreated on Apr 22, 2016\r\n\r\n@author: SashaTheSledDog\r\n'''\r\n\r\nclass Sensor(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n\r\n def __init__(\r\n self, sensor, num_units, facing_arcs):\r\n '''\r\n Constructor\r\n '''\r\n self.sensor = sensor\r\n self.num_units = num_units\r\n self.facing_arcs = facing_arcs","sub_path":"warship/component/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"366764688","text":"from tkinter import *\nimport math\nimport random\n\nroot = Tk()\n\nw = 1100 \ncanvas = Canvas(root, width = w, height = w)\ncanvas.pack()\n \ndef rand_color():\n hexCols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 'A', 'B', 'C', 'D', 'E', 'F']\n col = '#'\n for i in range(6):\n col += str(random.choice(hexCols))\n return col\n\ndef draw_hexagon(x, y, a):\n canvas.create_polygon([x, y, x + a, y, x + 3*a/2, y + math.sqrt(3)*a/2, x + a, y + math.sqrt(3)*a, x, y + math.sqrt(3)*a, x - a/2, y + math.sqrt(3)*a/2], fill='white', outline=\"black\")\n\ndef draw_fractal(x, y, size):\n draw_hexagon(x, y, size)\n less = size/3\n if size >= 5:\n draw_fractal(x, y, less)\n draw_fractal(x+size*2/3, y, less)\n draw_fractal(x-less, y+ math.sqrt(3)*size/3, less)\n draw_fractal(x,y+ math.sqrt(3)*size*2/3, less )\n draw_fractal(x + size,y+ math.sqrt(3)*size/3, less )\n draw_fractal(x+size*2/3, y+ math.sqrt(3)*size*2/3, less)\n# draw_fractal(x + size/3,y+ math.sqrt(3)*size/3, less )\n \n else:\n pass \n \n \n\n\n\ndraw_fractal(250, 10,500)\n\nroot.mainloop()","sub_path":"week-04/day-5/hexagon2.py","file_name":"hexagon2.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"604487519","text":"import os\nimport importlib\nimport warnings\nimport random\nfrom collections import defaultdict, namedtuple\nfrom abc import ABCMeta, abstractmethod\nfrom typing import List, Dict, Tuple, Optional, NamedTuple\nimport pickle\n\nimport matplotlib\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sb\nimport numpy as np\nfrom functional import seq\n\nfrom qanta.reporting.report_generator import ReportGenerator\nfrom qanta.datasets.abstract import TrainingData, QuestionText, Answer\nfrom qanta.datasets.quiz_bowl import QuizBowlDataset, QuestionDatabase\nfrom qanta.config import conf\nfrom qanta.util import constants as c\nfrom qanta.util.io import safe_path\nfrom qanta import qlogging\n\n\nlog = qlogging.get(__name__)\n\n\ndef get_class(instance_module: str, instance_class: str):\n py_instance_module = importlib.import_module(instance_module)\n py_instance_class = getattr(py_instance_module, instance_class)\n return py_instance_class\n\n\nGuesserSpec = NamedTuple('GuesserSpec', [\n ('dependency_module', Optional[str]),\n ('dependency_class', Optional[str]),\n ('guesser_module', str),\n ('guesser_class', str)\n])\n\nGuess = namedtuple('Guess', 'fold guess guesser qnum score sentence token')\n\n\nclass AbstractGuesser(metaclass=ABCMeta):\n def __init__(self):\n \"\"\"\n Abstract class representing a guesser. All abstract methods must be implemented. Class\n construction should be light and not load data since this is reserved for the\n AbstractGuesser.load method.\n \"\"\"\n pass\n\n def qb_dataset(self) -> QuizBowlDataset:\n return QuizBowlDataset(guesser_train=True)\n\n @abstractmethod\n def train(self, training_data: TrainingData) -> None:\n \"\"\"\n Given training data, train this guesser so that it can produce guesses.\n\n training_data can be seen as a tuple of two elements which are\n (train_x, train_y, properties).\n In this case train_x is a list of question runs. For example, if the answer for a question\n is \"Albert Einstein\" the runs might be [\"This\", \"This German\", \"This German physicist\", ...]\n train_y is a list of true labels. The questions are strings and the true labels are strings.\n Labels are in canonical form. Questions are not preprocessed in any way. To implement common\n pre-processing refer to the qanta/guesser/preprocessing module.\n\n properties is either None or a list of dictionaries that contain extra information about\n each training example\n\n :param training_data: training data in the format described above\n :return: This function does not return anything\n \"\"\"\n pass\n\n @abstractmethod\n def guess(self, questions: List[QuestionText], max_n_guesses: Optional[int]) -> List[List[Tuple[Answer, float]]]:\n \"\"\"\n Given a list of questions as text, return n_guesses number of guesses per question. Guesses\n must be returned in canonical form, are returned with a score in which higher is better, and\n must also be returned in sorted order with the best guess (highest score) at the front of\n the list and worst guesses (lowest score) at the bottom.\n\n It is guaranteed that before AbstractGuesser.guess is called that either\n AbstractGuesser.train is called or AbstractGuesser.load is called.\n\n :param questions: Questions to guess on\n :param max_n_guesses: Number of guesses to produce per question, if None then return all\n of them if possible\n :return: List of top guesses per question\n \"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def targets(cls) -> List[str]:\n \"\"\"\n List of files located in directory that are produced by the train method and loaded by the\n save method.\n :return: list of written files\n \"\"\"\n pass\n\n @classmethod\n def raw_targets(cls) -> List[str]:\n \"\"\"\n Similar to targets but it does not join a unique directory prefix. The provided paths are\n raw paths to the targets.\n :return: list of written files\n \"\"\"\n return []\n\n @classmethod\n def files(cls, directory: str) -> List[str]:\n return [os.path.join(directory, file) for file in cls.targets()] + cls.raw_targets()\n\n @classmethod\n @abstractmethod\n def load(cls, directory: str):\n \"\"\"\n Given the directory used for saving this guesser, create a new instance of the guesser, and\n load it for guessing or scoring.\n\n :param directory: training data for guesser\n :return: Instance of AbstractGuesser ready for calling guess/score\n \"\"\"\n pass\n\n @abstractmethod\n def save(self, directory: str) -> None:\n pass\n\n def display_name(self) -> str:\n \"\"\"\n Return the display name of this guesser which is used in reporting scripts to identify this\n particular guesser. By default str() on the classname, but can be overriden\n :return: display name of this guesser\n \"\"\"\n return self.__class__.__name__\n\n def parameters(self) -> Dict:\n \"\"\"\n Return the parameters of the model. This is displayed as part of the report to make\n identifying particular runs of particular hyper parameters easier. str(self.parameters())\n will be called at some point to display it as well as making a pickle of parameters.\n :return: model parameters\n \"\"\"\n return {}\n\n def generate_guesses(self, max_n_guesses: int, folds: List[str], word_skip=-1) -> pd.DataFrame:\n \"\"\"\n Generates guesses for this guesser for all questions in specified folds and returns it as a\n DataFrame\n\n WARNING: this method assumes that the guesser has been loaded with load or trained with\n train. Unexpected behavior may occur if that is not the case.\n :param max_n_guesses: generate at most this many guesses per question, sentence, and token\n :param folds: which folds to generate guesses for\n :param word_skip: by default, generate sentence level buzzes, if not set to -1 then generate\n buzzes every word_skip words\n :return: dataframe of guesses\n \"\"\"\n dataset = self.qb_dataset()\n questions_by_fold = dataset.questions_by_fold()\n\n q_folds = []\n q_qnums = []\n q_sentences = []\n q_tokens = []\n question_texts = []\n\n for fold in folds:\n questions = questions_by_fold[fold]\n for q in questions:\n for sent, token, text_list in q.partials(word_skip=word_skip):\n text = ' '.join(text_list)\n question_texts.append(text)\n q_folds.append(fold)\n q_qnums.append(q.qnum)\n q_sentences.append(sent)\n q_tokens.append(token)\n\n guesses_per_question = self.guess(question_texts, max_n_guesses)\n\n if len(guesses_per_question) != len(question_texts):\n raise ValueError(\n 'Guesser has wrong number of answers: len(guesses_per_question)={} len(question_texts)={}'.format(\n len(guesses_per_question), len(question_texts)))\n\n log.info('Creating guess dataframe from guesses...')\n df_qnums = []\n df_sentences = []\n df_tokens = []\n df_guesses = []\n df_scores = []\n df_folds = []\n df_guessers = []\n guesser_name = self.display_name()\n\n for i in range(len(question_texts)):\n guesses_with_scores = guesses_per_question[i]\n fold = q_folds[i]\n qnum = q_qnums[i]\n sentence = q_sentences[i]\n token = q_tokens[i]\n for guess, score in guesses_with_scores:\n df_qnums.append(qnum)\n df_sentences.append(sentence)\n df_tokens.append(token)\n df_guesses.append(guess)\n df_scores.append(score)\n df_folds.append(fold)\n df_guessers.append(guesser_name)\n\n return pd.DataFrame({\n 'qnum': df_qnums,\n 'sentence': df_sentences,\n 'token': df_tokens,\n 'guess': df_guesses,\n 'score': df_scores,\n 'fold': df_folds,\n 'guesser': df_guessers\n })\n\n @staticmethod\n def guess_path(directory: str, fold: str) -> str:\n return os.path.join(directory, 'guesses_{}.pickle'.format(fold))\n\n @staticmethod\n def save_guesses(guess_df: pd.DataFrame, directory: str, folds: List[str]):\n for fold in folds:\n log.info('Saving fold {}'.format(fold))\n fold_df = guess_df[guess_df.fold == fold]\n output_path = AbstractGuesser.guess_path(directory, fold)\n fold_df.to_pickle(output_path)\n\n @staticmethod\n def load_guesses(directory: str, folds=c.GUESSER_GENERATION_FOLDS) -> pd.DataFrame:\n \"\"\"\n Loads all the guesses pertaining to a guesser inferred from directory\n :param directory: where to load guesses from\n :param folds: folds to load, by default all of them\n :return: guesses across all folds for given directory\n \"\"\"\n assert len(folds) > 0\n guess_df = None\n for fold in folds:\n input_path = AbstractGuesser.guess_path(directory, fold)\n if guess_df is None:\n guess_df = pd.read_pickle(input_path)\n else:\n new_guesses_df = pd.read_pickle(input_path)\n guess_df = pd.concat([guess_df, new_guesses_df])\n\n return guess_df\n\n @staticmethod\n def load_all_guesses(directory_prefix='') -> pd.DataFrame:\n \"\"\"\n Loads all guesses from all guessers and folds\n :return:\n \"\"\"\n guess_df = None\n guessers = conf['guessers']\n for guesser_key, g in guessers.items():\n g = guessers[guesser_key]\n if g['enabled']:\n input_path = os.path.join(directory_prefix, c.GUESSER_TARGET_PREFIX, g['class'])\n if guess_df is None:\n guess_df = AbstractGuesser.load_guesses(input_path)\n else:\n new_guess_df = AbstractGuesser.load_guesses(input_path)\n guess_df = pd.concat([guess_df, new_guess_df])\n\n return guess_df\n\n @staticmethod\n def load_guess_score_map(guess_df: pd.DataFrame) -> defaultdict:\n guess_score_map = defaultdict(dict)\n for row in guess_df.itertuples():\n guess_score_map[row.guesser][(row.qnum, row.sentence, row.token, row.guess)] = row.score\n\n return guess_score_map\n\n def create_report(self, directory: str):\n with open(os.path.join(directory, 'guesser_params.pickle'), 'rb') as f:\n params = pickle.load(f)\n dev_guesses = AbstractGuesser.load_guesses(directory, folds=[c.GUESSER_DEV_FOLD])\n\n qdb = QuestionDatabase()\n questions = qdb.all_questions()\n\n # Compute recall and accuracy\n dev_recall = compute_fold_recall(dev_guesses, questions)\n dev_questions = {qnum: q for qnum, q in questions.items() if q.fold == c.GUESSER_DEV_FOLD}\n dev_recall_stats = compute_recall_at_positions(dev_recall)\n dev_summary_accuracy = compute_summary_accuracy(dev_questions, dev_recall_stats)\n dev_summary_recall = compute_summary_recall(dev_questions, dev_recall_stats)\n\n report_to_kuro(params['kuro_trial_id'] if 'kuro_trial_id' in params else None, dev_summary_accuracy)\n\n accuracy_plot('/tmp/dev_accuracy.png', dev_summary_accuracy, 'Guesser Dev')\n recall_plot('/tmp/dev_recall.png', dev_questions, dev_summary_recall, 'Guesser Dev')\n\n # Obtain metrics on number of answerable questions based on the dataset requested\n all_answers = {g for g in qdb.all_answers().values()}\n all_questions = list(qdb.all_questions().values())\n answer_lookup = {qnum: guess for qnum, guess in qdb.all_answers().items()}\n dataset = self.qb_dataset()\n training_data = dataset.training_data()\n\n min_n_answers = {g for g in training_data[1]}\n\n train_questions = [q for q in all_questions if q.fold == c.GUESSER_TRAIN_FOLD]\n train_answers = {q.page for q in train_questions}\n\n dev_questions = [q for q in all_questions if q.fold == c.GUESSER_DEV_FOLD]\n dev_answers = {q.page for q in dev_questions}\n\n min_n_train_questions = [q for q in train_questions if q.page in min_n_answers]\n\n all_common_train_dev = train_answers.intersection(dev_answers)\n min_common_train_dev = min_n_answers.intersection(dev_answers)\n\n all_train_answerable_questions = [q for q in train_questions if q.page in train_answers]\n all_dev_answerable_questions = [q for q in dev_questions if q.page in train_answers]\n\n min_train_answerable_questions = [q for q in train_questions if q.page in min_n_answers]\n min_dev_answerable_questions = [q for q in dev_questions if q.page in min_n_answers]\n\n # The next section of code generates the percent of questions correct by the number\n # of training examples.\n Row = namedtuple('Row', [\n 'fold', 'guess', 'guesser',\n 'qnum', 'score', 'sentence', 'token',\n 'correct', 'answerable_1', 'answerable_2',\n 'n_examples'\n ])\n\n train_example_count_lookup = seq(train_questions) \\\n .group_by(lambda q: q.page) \\\n .smap(lambda page, group: (page, len(group))) \\\n .dict()\n\n def guess_to_row(*args):\n guess = args[1]\n qnum = args[3]\n answer = answer_lookup[qnum]\n\n return Row(\n *args,\n answer == guess,\n answer in train_answers,\n answer in min_n_answers,\n train_example_count_lookup[answer] if answer in train_example_count_lookup else 0\n )\n\n dev_data = seq(dev_guesses) \\\n .smap(guess_to_row) \\\n .group_by(lambda r: (r.qnum, r.sentence)) \\\n .smap(lambda key, group: seq(group).max_by(lambda q: q.sentence)) \\\n .to_pandas(columns=Row._fields)\n dev_data['correct_int'] = dev_data['correct'].astype(int)\n dev_data['ones'] = 1\n dev_counts = dev_data\\\n .groupby('n_examples')\\\n .agg({'correct_int': np.mean, 'ones': np.sum})\\\n .reset_index()\n correct_by_n_count_plot('/tmp/dev_correct_by_count.png', dev_counts, 'Guesser Dev')\n n_train_vs_fold_plot('/tmp/n_train_vs_dev.png', dev_counts, 'Guesser Dev')\n\n with open(os.path.join(directory, 'guesser_report.pickle'), 'wb') as f:\n pickle.dump({\n 'dev_accuracy': dev_summary_accuracy,\n 'guesser_name': self.display_name(),\n 'guesser_params': params\n }, f)\n\n md_output = safe_path(os.path.join(directory, 'guesser_report.md'))\n pdf_output = safe_path(os.path.join(directory, 'guesser_report.pdf'))\n report = ReportGenerator('guesser.md')\n report.create({\n 'dev_recall_plot': '/tmp/dev_recall.png',\n 'dev_accuracy_plot': '/tmp/dev_accuracy.png',\n 'dev_accuracy': dev_summary_accuracy,\n 'guesser_name': self.display_name(),\n 'guesser_params': params,\n 'n_answers_all_folds': len(all_answers),\n 'n_total_train_questions': len(train_questions),\n 'n_train_questions': len(min_n_train_questions),\n 'n_dev_questions': len(dev_questions),\n 'n_total_train_answers': len(train_answers),\n 'n_train_answers': len(min_n_answers),\n 'n_dev_answers': len(dev_answers),\n 'all_n_common_train_dev': len(all_common_train_dev),\n 'all_p_common_train_dev': len(all_common_train_dev) / max(1, len(dev_answers)),\n 'min_n_common_train_dev': len(min_common_train_dev),\n 'min_p_common_train_dev': len(min_common_train_dev) / max(1, len(dev_answers)),\n 'all_n_answerable_train': len(all_train_answerable_questions),\n 'all_p_answerable_train': len(all_train_answerable_questions) / len(train_questions),\n 'all_n_answerable_dev': len(all_dev_answerable_questions),\n 'all_p_answerable_dev': len(all_dev_answerable_questions) / len(dev_questions),\n 'min_n_answerable_train': len(min_train_answerable_questions),\n 'min_p_answerable_train': len(min_train_answerable_questions) / len(train_questions),\n 'min_n_answerable_dev': len(min_dev_answerable_questions),\n 'min_p_answerable_dev': len(min_dev_answerable_questions) / len(dev_questions),\n 'dev_correct_by_count_plot': '/tmp/dev_correct_by_count.png',\n 'n_train_vs_dev_plot': '/tmp/n_train_vs_dev.png',\n }, md_output, pdf_output)\n\n @staticmethod\n def list_enabled_guessers() -> List[GuesserSpec]:\n guessers = conf['guessers']\n enabled_guessers = []\n for g in guessers.values():\n if g['enabled']:\n guesser = g['class']\n dependency = g['luigi_dependency']\n parts = guesser.split('.')\n guesser_module = '.'.join(parts[:-1])\n guesser_class = parts[-1]\n\n if dependency is None:\n dependency_module = None\n dependency_class = None\n else:\n parts = dependency.split('.')\n dependency_module = '.'.join(parts[:-1])\n dependency_class = parts[-1]\n\n enabled_guessers.append(GuesserSpec(dependency_module, dependency_class, guesser_module, guesser_class))\n\n return enabled_guessers\n\n @staticmethod\n def output_path(guesser_module: str, guesser_class: str, file: str):\n guesser_path = '{}.{}'.format(guesser_module, guesser_class)\n return safe_path(os.path.join(c.GUESSER_TARGET_PREFIX, guesser_path, file))\n\n def web_api(self, host='0.0.0.0', port=5000, debug=False):\n from flask import Flask, jsonify, request\n\n app = Flask(__name__)\n\n @app.route('/api/answer_question', methods=['POST'])\n def answer_question():\n text = request.form['text']\n guess, score = self.guess([text], 1)[0][0]\n return jsonify({'guess': guess, 'score': float(score)})\n\n app.run(host=host, port=port, debug=debug)\n\n @staticmethod\n def multi_guesser_web_api(guesser_names: List[str], host='0.0.0.0', port=5000, debug=False):\n from flask import Flask, jsonify, request\n\n app = Flask(__name__)\n\n guesser_lookup = {}\n for name, g in conf['guessers'].items():\n g_qualified_name = g['class']\n parts = g_qualified_name.split('.')\n g_module = '.'.join(parts[:-1])\n g_classname = parts[-1]\n guesser_lookup[name] = (get_class(g_module, g_classname), g_qualified_name)\n\n log.info(f'Loading guessers: {guesser_names}')\n guessers = {}\n for name in guesser_names:\n if name in guesser_lookup:\n g_class, g_qualified_name = guesser_lookup[name]\n guesser_path = os.path.join('output/guesser', g_qualified_name)\n log.info(f'Loading \"{name}\" corresponding to \"{g_qualified_name}\" located at \"{guesser_path}\"')\n guessers[name] = g_class.load(guesser_path)\n else:\n log.info(f'Guesser with name=\"{name}\" not found')\n\n @app.route('/api/guesser', methods=['POST'])\n def guess():\n if 'guesser_name' not in request.form:\n response = jsonify({'errors': 'Missing expected field \"guesser_name\"'})\n response.status_code = 400\n return response\n\n if 'text' not in request.form:\n response = jsonify({'errors': 'Missing expected field \"text\"'})\n response.status_code = 400\n return response\n\n g_name = request.form['guesser_name']\n if g_name not in guessers:\n response = jsonify(\n {'errors': f'Guesser \"{g_name}\" invalid, options are: \"{list(guessers.keys())}\"'}\n )\n response.status_code = 400\n return response\n text = request.form['text']\n guess, score = guessers[g_name].guess([text], 1)[0][0]\n return jsonify({'guess': guess, 'score': float(score)})\n\n app.run(host=host, port=port, debug=debug)\n\nQuestionRecall = namedtuple('QuestionRecall', ['start', 'p_25', 'p_50', 'p_75', 'end'])\n\n\ndef report_to_kuro(kuro_trial_id, summary_accuracy):\n if kuro_trial_id is not None:\n try:\n from kuro.client import Trial\n trial = Trial.from_trial_id(kuro_trial_id)\n trial.report_metric('dev_acc_start', summary_accuracy['start'])\n trial.report_metric('dev_acc_25', summary_accuracy['p_25'])\n trial.report_metric('dev_acc_50', summary_accuracy['p_50'])\n trial.report_metric('dev_acc_75', summary_accuracy['p_75'])\n trial.report_metric('dev_acc_end', summary_accuracy['end'])\n trial.end()\n log.info('Logged guesser accuracies to kuro and ended trial')\n except:\n pass\n\n\ndef question_recall(guesses, qst, question_lookup):\n qnum, sentence, token = qst\n answer = question_lookup[qnum].page\n sorted_guesses = sorted(guesses, reverse=True, key=lambda g: g.score)\n for i, guess_row in enumerate(sorted_guesses, 1):\n if answer == guess_row.guess:\n return qnum, sentence, token, i\n return qnum, sentence, token, None\n\n\ndef compute_fold_recall(guess_df, questions):\n return seq(guess_df)\\\n .smap(Guess)\\\n .group_by(lambda g: (g.qnum, g.sentence, g.token))\\\n .smap(lambda qst, guesses: question_recall(guesses, qst, questions))\\\n .group_by(lambda x: x[0])\\\n .dict()\n\n\ndef start_of_question(group):\n return seq(group).min_by(lambda g: g[1])[3]\n\n\ndef make_percent_of_question(percent):\n def percent_of_question(group):\n n_sentences = len(group)\n middle = max(1, round(n_sentences * percent))\n middle_element = seq(group).filter(lambda g: g[1] == middle).head_option()\n if middle_element is None:\n return None\n else:\n return middle_element[3]\n return percent_of_question\n\n\ndef end_of_question(group):\n return seq(group).max_by(lambda g: g[1])[3]\n\npercent_25_of_question = make_percent_of_question(.25)\npercent_50_of_question = make_percent_of_question(.5)\npercent_75_of_question = make_percent_of_question(.75)\n\n\ndef compute_recall_at_positions(recall_lookup):\n recall_stats = {}\n for q in recall_lookup:\n g = recall_lookup[q]\n start = start_of_question(g)\n p_25 = percent_25_of_question(g)\n p_50 = percent_50_of_question(g)\n p_75 = percent_75_of_question(g)\n end = end_of_question(g)\n recall_stats[q] = QuestionRecall(start, p_25, p_50, p_75, end)\n return recall_stats\n\n\ndef compute_summary_accuracy(questions, recall_stats):\n accuracy_stats = {\n 'start': 0,\n 'p_25': 0,\n 'p_50': 0,\n 'p_75': 0,\n 'end': 0\n }\n n_questions = len(questions)\n for q in questions:\n if q in recall_stats:\n if recall_stats[q].start == 1:\n accuracy_stats['start'] += 1\n if recall_stats[q].p_25 == 1:\n accuracy_stats['p_25'] += 1\n if recall_stats[q].p_50 == 1:\n accuracy_stats['p_50'] += 1\n if recall_stats[q].p_75 == 1:\n accuracy_stats['p_75'] += 1\n if recall_stats[q].end == 1:\n accuracy_stats['end'] += 1\n\n accuracy_stats['start'] /= n_questions\n accuracy_stats['p_25'] /= n_questions\n accuracy_stats['p_50'] /= n_questions\n accuracy_stats['p_75'] /= n_questions\n accuracy_stats['end'] /= n_questions\n return accuracy_stats\n\n\ndef compute_summary_recall(questions, recall_stats):\n recall_numbers = {\n 'start': [],\n 'p_25': [],\n 'p_50': [],\n 'p_75': [],\n 'end': []\n }\n for q in questions:\n if q in recall_stats:\n if recall_stats[q].start is not None:\n recall_numbers['start'].append(recall_stats[q].start)\n if recall_stats[q].p_25 is not None:\n recall_numbers['p_25'].append(recall_stats[q].p_25)\n if recall_stats[q].p_50 is not None:\n recall_numbers['p_50'].append(recall_stats[q].p_50)\n if recall_stats[q].p_75 is not None:\n recall_numbers['p_75'].append(recall_stats[q].p_75)\n if recall_stats[q].end is not None:\n recall_numbers['end'].append(recall_stats[q].end)\n\n return recall_numbers\n\n\ndef compute_recall_plot_data(recall_positions, n_questions,\n max_recall=conf['n_guesses'] + int(conf['n_guesses'] * .1)):\n \"\"\"\n Compute the recall, compute recall out a little further than number of guesses to give the\n plot that uses this data some margin on the right side\n \"\"\"\n x = list(range(1, max_recall + 1))\n y = [0] * max_recall\n for r in recall_positions:\n y[r - 1] += 1\n y = np.cumsum(y) / n_questions\n return x, y\n\n\ndef recall_plot(output, questions, summary_recall, fold_name):\n data = []\n for position, recall_positions in summary_recall.items():\n x_data, y_data = compute_recall_plot_data(recall_positions, len(questions))\n for x, y in zip(x_data, y_data):\n data.append({'x': x, 'y': y, 'position': position})\n data = pd.DataFrame(data)\n g = sb.FacetGrid(data=data, hue='position', size=5, aspect=1.5)\n g.map(plt.plot, 'x', 'y')\n g.add_legend()\n plt.xlabel('Number of Guesses')\n plt.ylabel('Recall')\n plt.subplots_adjust(top=.9)\n g.fig.suptitle('Guesser Recall Through Question on {}'.format(fold_name))\n plt.savefig(output, dpi=200, format='png')\n plt.clf()\n plt.cla()\n plt.close()\n\n\ndef accuracy_plot(output, summary_accuracy, fold_name):\n pd.DataFrame([\n ('start', summary_accuracy['start']),\n ('25%', summary_accuracy['p_25']),\n ('50%', summary_accuracy['p_50']),\n ('75%', summary_accuracy['p_75']),\n ('end', summary_accuracy['end'])],\n columns=['Position', 'Accuracy']\n ).plot.bar('Position', 'Accuracy', title='Accuracy by Position on {}'.format(fold_name))\n plt.savefig(output, dpi=200, format='png')\n plt.clf()\n plt.cla()\n plt.close()\n\n\ndef correct_by_n_count_plot(output, counts, fold):\n counts.plot('n_examples', 'correct_int')\n plt.title('{} fold'.format(fold))\n plt.xlabel('Number of Training Examples')\n plt.ylabel('Percent Correct')\n plt.savefig(output, dpi=200, format='png')\n plt.clf()\n plt.cla()\n plt.close()\n\n\ndef n_train_vs_fold_plot(output, counts, fold):\n counts.plot('n_examples', 'ones')\n plt.title('{} fold'.format(fold))\n plt.xlabel('Number of Training Examples')\n plt.ylabel('Number of {} Examples'.format(fold))\n plt.savefig(output, dpi=200, format='png')\n plt.clf()\n plt.cla()\n plt.close()\n\n\ndef n_guesser_report(report_path, fold, n_samples=10):\n qdb = QuestionDatabase()\n question_lookup = qdb.all_questions()\n questions = [q for q in question_lookup.values() if q.fold == fold]\n guess_dataframes = []\n folds = [fold]\n for g_spec in AbstractGuesser.list_enabled_guessers():\n path = AbstractGuesser.output_path(g_spec.guesser_module, g_spec.guesser_class, '')\n guess_dataframes.append(AbstractGuesser.load_guesses(path, folds=folds))\n df = pd.concat(guess_dataframes) # type: pd.DataFrame\n guessers = set(df['guesser'].unique())\n n_guessers = len(guessers)\n guesses = []\n for name, group in df.groupby(['guesser', 'qnum', 'sentence', 'token']):\n top_guess = group.sort_values('score', ascending=False).iloc[0]\n guesses.append(top_guess)\n\n top_df = pd.DataFrame.from_records(guesses)\n\n guess_lookup = {}\n for name, group in top_df.groupby(['qnum', 'sentence', 'token']):\n guess_lookup[name] = group\n\n performance = {}\n question_positions = {}\n n_correct_samples = defaultdict(list)\n for q in questions:\n page = q.page\n positions = [(sent, token) for sent, token, _ in q.partials()]\n # Since partials() passes word_skip=-1 each entry is guaranteed to be a sentence\n n_sentences = len(positions)\n q_positions = {\n 'start': 1,\n 'p_25': max(1, round(n_sentences * .25)),\n 'p_50': max(1, round(n_sentences * .5)),\n 'p_75': max(1, round(n_sentences * .75)),\n 'end': len(positions)\n }\n question_positions[q.qnum] = q_positions\n for sent, token in positions:\n key = (q.qnum, sent, token)\n if key in guess_lookup:\n guesses = guess_lookup[key]\n n_correct = (guesses.guess == page).sum()\n n_correct_samples[n_correct].append(key)\n if n_correct == 0:\n correct_guessers = 'None'\n elif n_correct == n_guessers:\n correct_guessers = 'All'\n else:\n correct_guessers = '/'.join(sorted(guesses[guesses.guess == page].guesser.values))\n else:\n n_correct = 0\n correct_guessers = 'None'\n performance[key] = (n_correct, correct_guessers)\n\n start_accuracies = []\n p_25_accuracies = []\n p_50_accuracies = []\n p_75_accuracies = []\n end_accuracies = []\n\n for q in questions:\n qnum = q.qnum\n start_pos = question_positions[qnum]['start']\n p_25_pos = question_positions[qnum]['p_25']\n p_50_pos = question_positions[qnum]['p_50']\n p_75_pos = question_positions[qnum]['p_75']\n end_pos = question_positions[qnum]['end']\n\n start_accuracies.append((*performance[(qnum, start_pos, 0)], 'start'))\n p_25_accuracies.append((*performance[(qnum, p_25_pos, 0)], 'p_25'))\n p_50_accuracies.append((*performance[(qnum, p_50_pos, 0)], 'p_50'))\n p_75_accuracies.append((*performance[(qnum, p_75_pos, 0)], 'p_75'))\n end_accuracies.append((*performance[(qnum, end_pos, 0)], 'end'))\n\n all_accuracies = start_accuracies + p_25_accuracies + p_50_accuracies + p_75_accuracies + end_accuracies\n\n perf_df = pd.DataFrame.from_records(all_accuracies, columns=['n_guessers_correct', 'correct_guessers', 'position'])\n perf_df['count'] = 1\n n_questions = len(questions)\n\n aggregate_df = (\n perf_df.groupby(['position', 'n_guessers_correct', 'correct_guessers']).count() / n_questions\n ).reset_index()\n\n fig, ax = plt.subplots(figsize=(12, 8), nrows=2, ncols=3, sharey=True, sharex=True)\n\n positions = {\n 'start': (0, 0),\n 'p_25': (0, 1),\n 'p_50': (1, 0),\n 'p_75': (1, 1),\n 'end': (1, 2)\n }\n\n position_labels = {\n 'start': 'Start',\n 'p_25': '25%',\n 'p_50': '50%',\n 'p_75': '75%',\n 'end': '100%'\n }\n ax[(0, 2)].axis('off')\n\n for p, key in positions.items():\n data = aggregate_df[aggregate_df.position == p].pivot(\n index='n_guessers_correct',\n columns='correct_guessers'\n ).fillna(0)['count']\n plot_ax = ax[key]\n data.plot.bar(stacked=True, ax=plot_ax, title='Question Position: {}'.format(position_labels[p]))\n handles, labels = plot_ax.get_legend_handles_labels()\n ax_legend = plot_ax.legend()\n ax_legend.set_visible(False)\n plot_ax.set(xlabel='Number of Correct Guessers', ylabel='Accuracy')\n\n for plot_ax in list(ax.flatten()):\n for tk in plot_ax.get_yticklabels():\n tk.set_visible(True)\n for tk in plot_ax.get_xticklabels():\n tk.set_rotation('horizontal')\n fig.legend(handles, labels, bbox_to_anchor=(.8, .75))\n fig.suptitle('Accuracy Breakdown by Guesser')\n accuracy_by_n_correct_plot_path = '/tmp/accuracy_by_n_correct_{}.png'.format(fold)\n fig.savefig(accuracy_by_n_correct_plot_path, dpi=200)\n\n sampled_questions_by_correct = sample_n_guesser_correct_questions(\n question_lookup, guess_lookup, n_correct_samples, n_samples=n_samples\n )\n\n report = ReportGenerator('compare_guessers.md')\n report.create({\n 'dev_accuracy_by_n_correct_plot': accuracy_by_n_correct_plot_path,\n 'sampled_questions_by_correct': sampled_questions_by_correct\n }, None, safe_path(report_path))\n\n\ndef sample_n_guesser_correct_questions(question_lookup, guess_lookup, n_correct_samples, n_samples=10):\n sampled_questions_by_correct = defaultdict(list)\n dataset = QuizBowlDataset(guesser_train=True)\n training_data = dataset.training_data()\n answer_counts = defaultdict(int)\n for ans in training_data[1]:\n answer_counts[ans] += 1\n\n for n_correct, keys in n_correct_samples.items():\n samples = random.sample(keys, min(n_samples, len(keys)))\n for key in samples:\n qnum, sent, token = key\n page = question_lookup[qnum].page\n text = question_lookup[qnum].get_text(sent, token)\n guesses = guess_lookup[key]\n correct_guessers = tuple(guesses[guesses.guess == page].guesser)\n wrong_guessers = tuple(guesses[guesses.guess != page].guesser)\n sampled_questions_by_correct[n_correct].append(\n (text, key, page, answer_counts[page], correct_guessers, wrong_guessers)\n )\n\n return sampled_questions_by_correct\n","sub_path":"qanta/guesser/abstract.py","file_name":"abstract.py","file_ext":"py","file_size_in_byte":33964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"200384292","text":"#import time\nimport operator\n#start = time.time()\n\nminm = ['',None] #list to store id and balance with the lowest balance\nresult=dict() #dict to store result\nc = 0 #count number of row\nfor i in range(40):\n filename = 'data/bintang_pradana_{:02d}.csv'.format(i)\n with open(filename) as f:\n #print('processing file: {}'.format(filename))\n next(f)\n for line in f:\n id, balance = line.split(',')\n if c<1000000: #if row < 1M, store the id and balance\n c+=1\n b = int(balance)\n result[id]=b\n if c==1: #finding lowest value balance\n minm = [id,b]\n elif b<=minm[1]:\n minm = [id,b]\n elif int(balance) > minm[1]: #if row > 1M, store only when bigger than minimum\n b = int(balance)\n result[id]=b\n del result[minm[0]]\n minm = [id,b]\n\n#write to file\nout = open('task_3_result.csv', 'w')\nout.write('account_id,account_balance\\n')\nfor key, value in sorted(result.items(), key=operator.itemgetter(1), reverse=True):\n out.write('{},{}\\n'.format(key,value))\nout.close()\n#end = time.time()\n#print(end-start)","sub_path":"task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"581667256","text":"class ProcedureMixin(object):\n\n results = None\n output = None\n\n def get_input_params(self, params = []):\n return [getattr(self, param[8:], None) for param in params]\n\n def set_output_params(self, params = [], data = []):\n self.output = data\n for i, param in enumerate(params):\n name = param[7:]\n if(hasattr(self, name)):\n setattr(self, name, data[i])\n\n def set_results(self, params = None):\n self.results = params\n try:\n for key, value in params[0][0].items():\n if (hasattr(self, key)):\n setattr(self, key, value)\n except:\n pass\n\n\n def get_output_params(self):\n return self.output\n\n def get_results(self):\n return self.results\n\n def get_result(self, index = 0):\n try:\n return self.results[index]\n except IndexError:\n return None\n\n def save(self, *args, **kwargs):\n pass\n\n\ndef procedure_defaults(request, params = [], data = []):\n\n params += [\n ':iparam_session_id',\n ':iparam_user_id',\n ':iparam_screen_id',\n ':iparam_debug_sproc',\n ':iparam_audit_screen_visit'\n ]\n\n data += [\n request.user.session_id,\n request.user.user_id,\n request.current_screen.screen_id,\n request.user.debug_sproc,\n request.user.audit_screen_visit\n ]\n\n return params, data","sub_path":"components/db/procedureMixin.py","file_name":"procedureMixin.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"546034792","text":"from random import randint\nfrom sorting import bubble_sort, selection_sort, insertion_sort, tree_sort, cocktail_shaker_sort\n\ndef merge(list1, list2):\n # merged_array = [0]*(len(list1)+len(lsit2))\n merged_array = []\n left_index = 0\n right_index = 0\n\n while left_index < len(list1) and right_index < len(list2):\n if list1[left_index] < list2[right_index]:\n merged_array.append(list1[left_index])\n left_index += 1\n else:\n merged_array.append(list2[right_index])\n right_index += 1\n\n merged_array.extend(list1[left_index:])\n merged_array.extend(list2[right_index:])\n return merged_array\n\ndef merge_sort_basic(items):\n list1 = cocktail_shaker_sort(items[:len(items)/2])\n list2 = cocktail_shaker_sort(items[len(items)/2:])\n return merge(list1, list2)\n\ndef merge_sort_recursive(items):\n if len(items) <= 1:\n return items\n\n list1 = merge_sort_recursive(items[:len(items)/2])\n list2 = merge_sort_recursive(items[len(items)/2:])\n return merge(list1, list2)\n\ndef merge_sort_basic_random(items):\n alg = randint(1, 5)\n if alg is 1:\n print('Using Bubble Sort.')\n print('Running Times:')\n print('Worst Case:')\n list1 = bubble_sort(items[:len(items)/2])\n list2 = bubble_sort(items[len(items)/2:])\n elif alg is 2:\n print('Using Selection Sort.')\n print('Running Times:')\n print('Worst Case:')\n list1 = selection_sort(items[:len(items)/2])\n list2 = selection_sort(items[len(items)/2:])\n elif alg is 3:\n print('Using Insertion Sort.')\n print('Running Times:')\n print('Worst Case:')\n list1 = insertion_sort(items[:len(items)/2])\n list2 = insertion_sort(items[len(items)/2:])\n elif alg is 4:\n print('Using Tree Sort.')\n print('Running Times:')\n print('Worst Case:')\n list1 = tree_sort(items[:len(items)/2])\n list2 = tree_sort(items[len(items)/2:])\n elif alg is 5:\n print('Using Cocktail Shaker Sort.')\n print('Running Times:')\n print('Worst Case:')\n list1 = cocktail_shaker_sort(items[:len(items)/2])\n list2 = cocktail_shaker_sort(items[len(items)/2:])\n\n return merge(list1, list2)\n\nif __name__ == '__main__':\n data = [3, 5, 6, 8, 6, 3, 2, 2, 5, 6, 5, 5, 1]\n data_sorted = [1, 2, 2, 3, 3, 5, 5, 5, 5, 6, 6, 6, 8]\n list1 = data_sorted[:len(data_sorted)/2]\n list2 = data_sorted[len(data_sorted)/2:]\n # merge(list1, list2)\n # print(merge_sort_basic(data))\n print(merge_sort_recursive(data))\n","sub_path":"source/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"351659868","text":"# Sample Line\nimport numpy as np\n\n\ndef sample_line(sampling_model, latent_dimensions, word2idx, idx2word, max_sequence_length):\n # initial inputs\n np_input = np.array([[word2idx['']]])\n h = np.zeros((1, latent_dimensions))\n c = np.zeros((1, latent_dimensions))\n\n # so we know when to quit\n eos = word2idx['']\n\n # store the output here\n output_sentence = []\n\n for number in range(max_sequence_length):\n o, h, c = sampling_model.predict([np_input, h, c])\n\n if number == 0:\n word_one = word2idx['this']\n output_sentence.append(idx2word.get(word_one, '' % word_one))\n continue\n\n if number == 1:\n word_one = word2idx['movie']\n output_sentence.append(idx2word.get(word_one, '' % word_one))\n continue\n\n if number == 2:\n word_one = word2idx['is']\n output_sentence.append(idx2word.get(word_one, '' % word_one))\n continue\n\n # print(\"o.shape:\", o.shape, o[0,0,:10])\n # idx = np.argmax(o[0,0])\n probs = o[0, 0]\n if np.argmax(probs) == 0:\n print(\"wtf\")\n probs[0] = 0\n probs /= probs.sum()\n idx = np.random.choice(len(probs), p=probs)\n if idx == eos:\n break\n\n # accumulate output\n output_sentence.append(idx2word.get(idx, '' % idx))\n\n # make the next input into model\n np_input[0, 0] = idx\n\n return ' '.join(output_sentence)\n","sub_path":"lib/sample_line.py","file_name":"sample_line.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"325221471","text":"# coding=gbk\nimport pandas as pd\ndf = pd .read_excel(\"/home/sam/Documents/Excel/Book2.xlsx\",\"Sheet1\")\n# print(df.shape)\n# _rowCount = len(df)\n# print(\"Total rows:{0}\".format(_rowCount))\n\n# for i in range(0, _rowCount):\n# print(i)\n# .ix[0:50,0:1]\n# df.apply(lambda x:f(x),axis=0)\nfor i in df.index:\n dic = {}\n for j in df.columns:\n # print(j)\n # if(j):\n dic[\"column\"+str(i)]=df.ix[i]\n print(dic)\n # print(\"column{0}:{1}\".format(i,))\n # print(j)\n # print(df[j][i])\n\n","sub_path":"DataQuest/PythonBasic/wordcheck/pandas_01.py","file_name":"pandas_01.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"606951222","text":"from numpy import *\nfrom math import *\n\ndef getDFT(signal, Fs, PLOT=None):\n\n #\n # function [FFT, Freq] = getDFT(signal, Fs, PLOT)\n #\n # This function returns the DFT of a discrete signal and the\n # respective frequency range.\n #\n # ARGUMENTS:\n # - signal: vector containing the samples of the signal\n # - Fs: the sampling frequency\n # - PLOT: use this argument if the FFT (and the respective\n # frequency values) need to be returned in the\n # [-fs/2..fs/2] range. Otherwise, only half of\n # the spectrum is returned.\n #\n # RETURNS:\n # - FFT: the magnitude of the DFT coefficients\n # - Freq: the corresponding frequencies (in Hz)\n #\n\n N = len(signal)# length of signal\n # compute the magnitude of the spectrum\n # (and normalize by the number of samples):\n FFT = abs(fft.fft(signal)) / N\n\n if PLOT == None: # return the first half of the spectrum:\n FFT = FFT[0 : ceil(N/2)]\n Freq = (Fs/2) * arange(1,ceil(N/2)+2) / (ceil(N/2)+1) # define the frequency axis\n else:\n # ... or return the whole spectrum\n # (in the range -fs/2 to fs/2)\n FFT = fft.fftshift(FFT)\n if mod(N, 2) == 0: # define the frequency axis:\n Freq = arange(-N/2, N/2) # if N is even\n else:\n Freq = arange(-(N-1)/2, (N-1)/2+1) # if N is odd\n Freq = (Fs/2) * Freq / ceil(N/2)\n return FFT\n","sub_path":"ml-scripts/python-training/getDFT.py","file_name":"getDFT.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"51128368","text":"# -*- coding: utf-8 -*-\nfrom collections import Counter\nimport math\nclass Ngram(object):\n def __init__(self, max_n, all_word_num=1, lambda_unigram=0.9, lambda_ngram=0.9):\n self.max_n = max_n\n # 学習用\n ngram_counter = {}\n for n in range(1, max_n+1):\n ngram_counter[n] = Counter()\n self.ngram_counter = ngram_counter\n\n # テスト用\n ngram_model = {}\n for n in range(1, max_n+1):\n ngram_model[n] = {} \n self.ngram_model = ngram_model\n self.all_word_num = all_word_num\n self.lambda_unigram = lambda_unigram\n self.lambda_ngram = lambda_ngram\n\n\n def learn(self, words):\n for i in range(len(words)):\n for n in range(1, self.max_n+1):\n ngram = self.get_ngram(n, i, words)\n if ngram is not None:\n self.ngram_counter[n][ngram] += 1\n\n def get_ngram(self, n, idx, words):\n if idx-n+1 >= 0:\n return tuple(words[idx-n+1:idx+1])\n else:\n return None\n\n def all_unigram_count(self):\n return sum(self.ngram_counter[1].values())\n\n def write_model(self, writer):\n for n in range(1, self.max_n+1):\n all_unigram_count = self.all_unigram_count()\n for ngram, count in self.ngram_counter[n].iteritems():\n if n == 1:\n prob = 1.0 * count / all_unigram_count\n else:\n pre_count = self.ngram_counter[n-1][ngram[0:n-1]]\n prob = 1.0 * count / pre_count\n \n ngram_str = u\"\\t\".join(ngram)\n writer.write(\"%s\\t%s\\t%s\\n\" % (\n str(n), str(prob), ngram_str.encode(\"utf-8\")))\n\n def write_model_to_file(self, filename):\n with open(filename, \"w\") as writer:\n self.write_model(writer)\n\n \n def read_model(self, reader):\n for line in reader.readlines():\n line = unicode(line, \"utf-8\")\n line = line.rstrip(u\"\\r\\n\")\n n_prob_ngram = line.split(u\"\\t\")\n \n n = int(n_prob_ngram[0])\n prob = float(n_prob_ngram[1])\n ngram = tuple(n_prob_ngram[2:])\n \n self.ngram_model[n][ngram] = prob\n \n def read_model_from_file(self, filename):\n with open(filename, \"r\") as reader:\n self.read_model(reader)\n\n def get_sentence_prob(self, words):\n log_prob = 0\n for i in xrange(len(words)):\n ngram = self.get_ngram(self.max_n, i, words)\n word = words[i]\n\n if ngram is None:\n continue # TODO: とりあえず\n\n if self.max_n == 1:\n prob = self.get_unigram_prob(word)\n else:\n prob = self.get_ngram_prob(ngram, word)\n log_prob = log_prob + math.log(prob, 2.0)\n return log_prob\n \n def get_unigram_prob(self, word):\n word_tuple = (word, )\n if word_tuple in self.ngram_model[1]:\n prob = self.lambda_unigram * self.ngram_model[1][word_tuple] + (1.0 - self.lambda_unigram) * (1.0/self.all_word_num)\n else:\n prob = (1.0 - self.lambda_unigram) * (1.0/self.all_word_num)\n return prob \n\n def get_ngram_prob(self, ngram, word):\n unigram_prob = self.get_unigram_prob(word)\n n = len(ngram)\n if ngram in self.ngram_model[n]:\n prob = self.lambda_ngram * self.ngram_model[n][ngram] + (1.0 - self.lambda_ngram) * unigram_prob\n else:\n prob = (1.0 - self.lambda_ngram) * unigram_prob\n return prob \n\n def covered(self, words):\n covered = 0\n\n n = self.max_n\n for i in xrange(len(words)):\n ngram = self.get_ngram(n, i, words)\n\n if (ngram is not None) and (ngram in self.ngram_model[n]):\n covered += 1\n return covered\n","sub_path":"nlptutorial/Ngram.py","file_name":"Ngram.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"402370493","text":"from random import shuffle\nimport os\n\n'''\nthis module contains all functions used to read the data set from given file (and handle its structure space-wise)\n'''\n\n\n# helping func for get_text_file_data\n# in the txt data source file there are lines with different number of spaces - thi function handles it\ndef handle_any_number_of_spaces(array_of_x_label_y, ):\n num_of_entrys = len(array_of_x_label_y)\n if num_of_entrys == 3:\n x_value = float(array_of_x_label_y[0])\n label = int(array_of_x_label_y[1])\n y_value = float(array_of_x_label_y[2])\n elif num_of_entrys == 2:\n x_value = float(array_of_x_label_y[0])\n splited_label_y = array_of_x_label_y[1].split(\" \")\n label = int(splited_label_y[0])\n y_value = float(splited_label_y[1])\n else:\n array_of_x_label_y = array_of_x_label_y[0].split(\" \")\n x_value = float(array_of_x_label_y[0])\n label = int(array_of_x_label_y[1])\n y_value = float(array_of_x_label_y[2])\n\n return (x_value, y_value, label)\n\n\n# func to get the points_array which is tuple of - heat-heart-class and weight_array\ndef get_data_set_from_path(path):\n abs_file_path = join_paths(path)\n points_array = []\n with open(abs_file_path, \"r\") as f:\n whole_line_wo_whitespaces = [line.rstrip() for line in f]\n for line in whole_line_wo_whitespaces:\n array_of_x_label_y = line.split(\" \")\n points_array.append(handle_any_number_of_spaces(array_of_x_label_y))\n return (points_array)\n\n\n# function to shuffle the data set in a way which\ndef shuffle_dataset(whole_data):\n size_of_set = int(len(whole_data) / 2)\n shuffle(whole_data)\n train_set = []\n test_set = []\n for data_point in whole_data[:size_of_set]:\n train_set.append(data_point)\n for data_point in whole_data[size_of_set:]:\n test_set.append(data_point)\n return (train_set, test_set)\n\ndef join_paths(path):\n script_dir = os.path.dirname(__file__)[:os.path.dirname(__file__).index(\"\\code\")]\n return os.path.join(script_dir, path)\n","sub_path":"code/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"460653005","text":"\"\"\"\nStudent code for Word Wrangler game\n\"\"\"\n\nimport urllib2\nimport codeskulptor\nimport poc_wrangler_provided as provided\n\nWORDFILE = \"assets_scrabble_words3.txt\"\n\n\n# Functions to manipulate ordered word lists\n\ndef remove_duplicates(list1):\n \"\"\"\n Eliminate duplicates in a sorted list.\n\n Returns a new sorted list with the same elements in list1, but\n with no duplicates.\n\n This function can be iterative.\n \"\"\"\n if len(list1) <= 1:\n return list(list1)\n\n new_list = []\n\n for item in list1:\n if item not in new_list:\n new_list.append(item)\n\n return new_list\n\n\ndef intersect(list1, list2):\n \"\"\"\n Compute the intersection of two sorted lists.\n\n Returns a new sorted list containing only elements that are in\n both list1 and list2.\n\n This function can be iterative.\n \"\"\"\n if len(list1) + len(list2) < 1:\n return []\n\n new_list = []\n\n for item in list1:\n if item in list2:\n new_list.append(item)\n\n return new_list\n\n\n# Functions to perform merge sort\n\ndef merge(list1, list2):\n \"\"\"\n Merge two sorted lists.\n\n Returns a new sorted list containing those elements that are in\n either list1 or list2.\n\n This function can be iterative.\n \"\"\"\n if len(list1) < 1:\n return list2\n if len(list2) < 1:\n return list1\n\n sum_len = len(list1) + len(list2)\n new_list = []\n id1 = 0\n id2 = 0\n\n while len(new_list) < sum_len:\n if list1[id1] < list2[id2]:\n new_list.append(list1[id1])\n id1 += 1\n else:\n new_list.append(list2[id2])\n id2 += 1\n\n if id1 == len(list1):\n new_list.extend(list2[id2:])\n elif id2 == len(list2):\n new_list.extend(list1[id1:])\n\n return new_list\n\n\ndef merge_sort(list1):\n \"\"\"\n Sort the elements of list1.\n\n Return a new sorted list with the same elements as list1.\n\n This function should be recursive.\n \"\"\"\n last_pos = len(list1)\n middle = last_pos // 2\n\n if last_pos <= 1:\n return list1\n\n first_list = merge_sort(list1[:middle])\n last_list = merge_sort(list1[middle:])\n\n return merge(first_list, last_list)\n\n\n# Function to generate all strings for the word wrangler game\n\ndef gen_all_strings(word):\n \"\"\"\n Generate all strings that can be composed from the letters in word\n in any order.\n\n Returns a list of all strings that can be formed from the letters\n in word.\n\n This function should be recursive.\n \"\"\"\n if len(word) < 1:\n return ['']\n\n first, rest = word[0], word[1:]\n rest_strings = gen_all_strings(rest)\n result = []\n\n for string in rest_strings:\n if string != '':\n for pos in range(len(string) + 1):\n new_word = string[:pos] + first + string[pos:]\n result.append(new_word)\n else:\n result.append(first)\n\n return rest_strings + result\n\n\n# Function to load words from a file\n\ndef load_words(filename):\n \"\"\"\n Load word list from the file named filename.\n\n Returns a list of strings.\n \"\"\"\n return []\n\n\ndef run():\n \"\"\"\n Run game.\n \"\"\"\n words = load_words(WORDFILE)\n wrangler = provided.WordWrangler(words, remove_duplicates,\n intersect, merge_sort,\n gen_all_strings)\n provided.run_game(wrangler)\n\n# Uncomment when you are ready to try the game\n# run()\n","sub_path":"Homework11/wrangler.py","file_name":"wrangler.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"507802681","text":"\"\"\"\nBack end for election panel information.\n\"\"\"\n\n__author__ = 'Waseem Ahmad '\n\nimport json\nimport logging\nimport webapp2\n\nfrom authentication import auth\nfrom datetime import datetime, timedelta\nfrom google.appengine.api import taskqueue\nfrom models import models, webapputils, report_results\nfrom models.admin_.organization_.election import get_panel\n\nPAGE_URL = '/admin/organization/election/information'\nTASK_URL = '/tasks/admin/organization/election/information'\n\nclass ElectionInformationHandler(webapp2.RequestHandler):\n\n def get(self):\n # Authenticate user\n voter = auth.get_voter(self)\n status = models.get_admin_status(voter)\n if not status:\n webapputils.render_page(self, '/templates/message', \n {'status': 'Error', 'msg': 'Not Authorized'})\n return\n \n data = {}\n\n # Get election\n election = auth.get_election()\n if election:\n data = {'id': str(election.key()),\n 'election': election.to_json()}\n panel = get_panel(PAGE_URL, data, data.get('id'))\n webapputils.render_page_content(self, PAGE_URL, panel)\n\n def post(self):\n methods = {\n 'get_election': self.get_election,\n 'update_election': self.update_election\n }\n\n # Authenticate user\n org = auth.get_organization()\n if not org:\n webapputils.respond(self, 'ERROR', 'Not Authorized')\n return\n\n # Get election\n election = auth.get_election()\n\n # Get the method\n data = json.loads(self.request.get('data'))\n method = data['method']\n logging.info('Method: %s\\n Data: %s', method, data)\n if method in methods:\n methods[method](election, data)\n else:\n webapputils.respond(self, 'ERROR', 'Unkown method')\n\n def get_election(self, election, data):\n out = {'status': 'OK'}\n if election:\n out['election'] = election.to_json()\n self.response.write(json.dumps(out))\n\n def update_election(self, election, data):\n out = {'status': 'OK'}\n if not election:\n # User must be trying to create new election\n election = models.Election(\n name=data['name'],\n start=datetime.fromtimestamp(data['times']['start']),\n end=datetime.fromtimestamp(data['times']['end']),\n organization=auth.get_organization(),\n universal=data['universal'],\n hidden=data['hidden'],\n result_delay=data['result_delay'],\n description=data['description'])\n election.put()\n out['msg'] = 'Created'\n auth.set_election(election)\n else:\n election.name = data['name']\n election.start = datetime.fromtimestamp(data['times']['start'])\n election.end = datetime.fromtimestamp(data['times']['end'])\n election.universal = data['universal']\n election.hidden = data['hidden']\n election.result_delay = data['result_delay']\n election.description = data['description']\n election.put()\n out['msg'] = 'Updated'\n self.schedule_result_computation(election)\n out['election'] = election.to_json()\n self.response.write(json.dumps(out))\n\n def schedule_result_computation(self, election):\n method_name = \"compute_results\"\n old_task_name = '-'.join(\n [str(election.key()), str(election.task_count), method_name])\n election.task_count += 1\n task_name = '-'.join(\n [str(election.key()), str(election.task_count), method_name])\n\n # Delete any existing tasks enqueued for computing results\n q = taskqueue.Queue()\n q.delete_tasks(taskqueue.Task(name=old_task_name))\n\n # Enqueue new task for computing results after election ends\n compute_time = election.end + timedelta(seconds=5)\n data = {'election_key': str(election.key()),\n 'method': method_name}\n retry_options = taskqueue.TaskRetryOptions(task_retry_limit=0)\n taskqueue.add(name=task_name,\n url=TASK_URL,\n params={'data': json.dumps(data)},\n eta=compute_time,\n retry_options=retry_options)\n election.put()\n logging.info('Election result computation enqueued.')\n \n","sub_path":"src/models/admin_/organization_/election_/information.py","file_name":"information.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"25762511","text":"#Zadatak:\n#Ako je broj prost ispisati zbroj njegovih znamenki\n\ndef fun3(num):\n suma=0\n while num>0:\n broj=num%10\n num=num//10\n suma=suma+broj\n print(suma)\n\ndef fun2(num):\n for i in range(2,num):\n if num%i==0:\n return 0\n \n return 1\n \n\nn = int(input(\"Unesite veličinu niza: \"))\nwhile n<1:\n n = int(input(\"Unesite veličinu niza: \"))\n\n\nniz = [0]*n\nfor i in range(n):\n num = int(input(\"Unesite broj: \"))\n niz[i] = num\n\nfor elem in niz:\n if(fun2(elem)):\n fun3(elem)\n \n","sub_path":"Programiranje/Prost/KresimirCrnjak_0.py","file_name":"KresimirCrnjak_0.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"463430215","text":"# На вход подаётся строка из целых уникальных чисел разделённых пробелами.\n# Напечатайте числа, поменяв минимальное и максимальное чисела местами.\n# Пример ввода\n# 3 4 5 2 1\n# Пример вывода\n# 3 4 1 2 5\n\nL = [3,4,5,2,1,9,3,10,5]\n\nmin_value = min(L)\nmax_value = max(L)\n\nmini = L.index(min_value)\nmaxi = L.index(max_value)\nL[mini], L[maxi] = L[maxi], L[mini]\nprint(L) ","sub_path":"specialist/level1/ex45.py","file_name":"ex45.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"178021171","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 14 15:43:22 2020\n\n@author: masa\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport AbsHtmlPage\nimport csv\nfrom datetime import datetime\nimport pandas as pd\n\n\"\"\"\nプロフィールページからのデータ抽出\n\"\"\"\nclass ContributionPage(AbsHtmlPage.AbsHtmlPage):\n pageDatetime = 'a'\n\n def getPage(self, address):\n print(address)\n #with open('Contribution.html', mode='r', encoding='utf-8') as f:\n # text = f.read()\n text = super().getHtmlPage(\"/room/profile?room_id=\" + str(self.roomId) )\n self.pageDatetime = datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n return text\n \n def extractData(self, text) :\n soup = BeautifulSoup(text, 'lxml')\n \n singleContributor = {'rank': 1, 'name': 'aa', 'point': 0}\n\n # ランキングのメンバー抽出\n memberTable = soup.find_all(\"table\", class_=\"table-type-01\")\n print(len(memberTable))\n members = memberTable[1].find_all(\"tr\")\n #print(members)\n for member in members :\n if(member.find(\"td\", class_=\"ta-r\")):\n singleData = member.find_all(\"td\")\n singleContributor = {'rank': singleData[0].text, 'name': singleData[1].text, 'point': singleData[2].text}\n # print(singleContributor)\n # 1 ひろふみ 69373pt\n #if(3 == len(contributor)):\n # singleContributor = {'rank': contributor[0].text, 'name': contributor[1].text, 'point': contributor[2].text}\n # print(singleContributor)\n # #self.getSingleProfle(roomId)\n \nif __name__ == \"__main__\":\n page = ContributionPage()\n text = page.getPage('test')\n dfs = pd.read_html(text)\n print(len(dfs))\n print(dfs[1])\n page.extractData(text)\n# profile.saveData()\n\n# print(profile.getData())\n \n#print(text)\n","sub_path":"getsrdata/ContributionPage.py","file_name":"ContributionPage.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"117391913","text":"from util.util_class import day_of_week\nimport sys\n\n\ndef main():\n\n #day = day_of_week(17, 5, 2019) Prints default result\n\n d = int(sys.argv[1]) # Take cmd line parameter & print result\n m = int(sys.argv[2])\n y = int(sys.argv[3])\n print(\"Value for given day is : \", day_of_week(d, m, y))\n #print(day)\n\n\nif __name__ == main():\n main()\n","sub_path":"DaysOfWeek.py","file_name":"DaysOfWeek.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"403356432","text":"#! /usr/bin/env python\n\nimport sys\n\ndef main():\n infilename = sys.argv[1]\n header = True\n outfilename = infilename.strip('.vcf') + '_summary.txt'\n infile = open(infilename, 'r')\n lines = infile.readlines()\n outfile = open(outfilename, 'w')\n outfile.write('position,anc,der\\n')\n\n for line in lines:\n if not line.startswith('#'):\n line = line.strip('\\n')\n position = float(line.split('\\t')[1])\n gts = line.split('\\t')[9:]\n gts = \"|\".join(gts)\n anc = gts.count(\"0\")\n der = gts.count(\"1\")\n outfile.write('{0:.0f},{1:.0f},{2:.0f}\\n'.format(position,anc,der))\n \n outfile.close()\n infile.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"arabidopsis_chr1_simulations/parsing_scripts/parsevcf.py","file_name":"parsevcf.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"29195017","text":"import collections\nfrom .. import config\nfrom ..util.http import BaseRestClient, APIError\n\ndefault_api_key = config.create(\"lastfm\", \"key\",\n fallback=\"unset\",\n comment=\"An API key from last.fm\")\n\nTrack = collections.namedtuple(\"Track\", \"artist title url\")\n\n\nclass LastFm(BaseRestClient):\n URL = \"http://ws.audioscrobbler.com/2.0/\"\n _api_key = None\n\n @property\n def api_key(self):\n return self._api_key or default_api_key()\n\n @api_key.setter\n def api_key(self, value):\n self._api_key = value\n\n def preprocess(self, json):\n if 'error' in json:\n raise APIError(json['message'])\n return json\n\n async def recent_tracks(self, username):\n json = await self.request(\"get\", self.URL, params={\n 'method': \"user.getrecenttracks\",\n 'user': username,\n 'format': 'json',\n 'api_key': self.api_key,\n })\n return [Track(i['artist']['#text'], i['name'], i['url']) for i in json['recenttracks']['track']]\n\n async def tag_tracks(self, tag):\n json = await self.request(\"get\", self.URL, params={\n 'method': \"tag.gettoptracks\",\n 'tag': tag,\n 'format': 'json',\n 'api_key': self.api_key,\n })\n return [Track(i['artist']['name'], i['name'], i['url']) for i in json['tracks']['track']]\n","sub_path":"plumeria/api/lastfm.py","file_name":"lastfm.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"533527295","text":"from keras.layers import Dense, Wrapper\nimport keras.backend as K\n\n\nclass DropConnectDense(Dense):\n \"\"\"\n Implementation of a drop connect layer.\n :param dense: A dense layer within a neural network.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize.\n :param args: None.\n :param kwargs: Pass a probability of dropout.\n \"\"\"\n self.prob = kwargs.pop(\"prob\", 0.5)\n if 0.0 < self.prob < 1.0:\n self.uses_learning_phase = True\n super(DropConnectDense, self).__init__(*args, **kwargs)\n\n def call(self, x, mask=None):\n \"\"\"\n Functions calls the layer.\n :param x: Datapoints.\n :param mask: None.\n :return: Dropout + activation on datapoints.\n \"\"\"\n # Definition of dropout function in training phase.\n if 0.0 < self.prob < 1.0:\n self.kernel = K.in_train_phase(\n K.dropout(self.kernel, self.prob), self.kernel\n )\n self.b = K.in_train_phase(K.dropout(self.b, self.prob), self.b)\n\n output = K.dot(x, self.W)\n if self.bias:\n output += self.b\n return self.activation(output)\n\n\nclass DropConnect(Wrapper):\n \"\"\"\n Wrapper class for manipulation of drop connect layer.\n :param Wrapper: Keras Wrapper.\n \"\"\"\n\n def __init__(self, layer, prob=1.0, **kwargs):\n \"\"\"\n Initialize.\n :param layer: Previous layer.\n :param prob: Probability of dropout.\n :param kwargs: Further arguments (None).\n \"\"\"\n self.prob = prob\n self.layer = layer\n super(DropConnect, self).__init__(layer, **kwargs)\n if 0.0 < self.prob < 1.0:\n self.uses_learning_phase = True\n\n def build(self, input_shape):\n \"\"\"\n Builds the layer by input_shape.\n :param input_shape: Input shape of the ndarray.\n :return: Output shape.\n \"\"\"\n if not self.layer.built:\n self.layer.build(input_shape)\n self.layer.built = True\n super(DropConnect, self).build()\n\n def compute_output_shape(self, input_shape):\n \"\"\"\n Computes the output shape of a keras layer.\n :param input_shape: Input shape of the previous layer.\n :return: Output shape.\n \"\"\"\n return self.layer.compute_output_shape(input_shape)\n\n def call(self, x):\n \"\"\"\n Calls the layers functionality.\n :param x: Datapoints (numpy ndarray).\n :return: Processed data.\n \"\"\"\n if 0.0 < self.prob < 1.0:\n self.layer.kernel = K.in_train_phase(\n K.dropout(self.layer.kernel, self.prob), self.layer.kernel\n )\n self.layer.bias = K.in_train_phase(\n K.dropout(self.layer.bias, self.prob), self.layer.bias\n )\n return self.layer.call(x)\n","sub_path":"lib/neural_networks/layers/layer_dropconnect.py","file_name":"layer_dropconnect.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"469628122","text":"import threading\nimport time\nimport random\n\n\n# 因为Python的线程虽然是真正的线程,但解释器执行代码时,有一个GIL锁:Global Interpreter Lock,任何Python线程执行前,必须先获得GIL锁,然后,每执行100条字节码,解释器就自动释放GIL锁,让别的线程有机会执行。这个GIL全局锁实际上把所有线程的执行代码都给上了锁,所以,多线程在Python中只能交替执行,即使100个线程跑在100核CPU上,也只能用到1个核。\n\n# GIL是Python解释器设计的历史遗留问题,通常我们用的解释器是官方实现的CPython,要真正利用多核,除非重写一个不带GIL的解释器。\n\n# 所以,在Python中,可以使用多线程,但不要指望能有效利用多核。如果一定要通过多线程利用多核,那只能通过C扩展来实现,不过这样就失去了Python简单易用的特点。\n\n# 不过,也不用过于担心,Python虽然不能利用多线程实现多核任务,但可以通过多进程实现多核任务。多个Python进程有各自独立的GIL锁,互不影响。\nlock=threading.Lock()\n# 由于锁只有一个,无论多少线程,同一时刻最多只有一个线程持有该锁,所以,不会造成修改的冲突。创建一个锁就是通过threading.Lock()来实现:\ndef a():\n a = random.randint(1, 10)\n a *= 10\n lock.acquire()\n try:\n print(a)\n finally:\n # 当多个线程同时执行lock.acquire()时,只有一个线程能成功地获取锁,然后继续执行代码,其他线程就继续等待直到获得锁为止。\n # 获得锁的线程用完后一定要释放锁,否则那些苦苦等待锁的线程将永远等待下去,成为死线程。所以我们用try...finally来确保锁一定会被释放。\n # 锁的好处就是确保了某段关键代码只能由一个线程从头到尾完整地执行,坏处当然也很多,首先是阻止了多线程并发执行,包含锁的某段代码实际上只能以单线程模式执行,效率就大大地下降了。其次,由于可以存在多个锁,不同的线程持有不同的锁,并试图获取对方持有的锁时,可能会造成死锁,导致多个线程全部挂起,既不能执行,也无法结束,只能靠操作系统强制终止。\n lock.release()\n\nthreads = []\nfor i in range(8):\n thread = threading.Thread(target=a)\n threads.append(thread)\n thread.start()\n\nfor thread in threads:\n thread.join()\n\nprint('Done')\n\n\n\n\n# 创建全局ThreadLocal对象:\nlocal_school = threading.local()\n\n# 全局变量local_school就是一个ThreadLocal对象,每个Thread对它都可以读写student属性,但互不影响。\n# 个ThreadLocal变量虽然是全局变量,但每个线程都只能读写自己线程的独立副本,互不干扰。ThreadLocal解决了参数在一个线程中各个函数之间互相传递的问题。\ndef process_student():\n # 获取当前线程关联的student:\n std = local_school.student\n print('Hello, %s (in %s)' % (std, threading.current_thread().name))\n\n\ndef process_thread(name):\n # 绑定ThreadLocal的student:\n local_school.student = name\n process_student()\n\n\nt1 = threading.Thread(target=process_thread, args=('Alice',), name='Thread-A')\nt2 = threading.Thread(target=process_thread, args=('Bob',), name='Thread-B')\nt1.start()\nt2.start()\nt1.join()\nt2.join()\n","sub_path":"Py/databaseAnalysis/basic/thread1111.py","file_name":"thread1111.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"242354979","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import View, ListView, DetailView, CreateView\nfrom django.urls import reverse_lazy, reverse\nfrom django.apps import apps\nfrom django import forms\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom .models import Category, BaseAdvertise\nfrom .forms import AdvertiseForm\nfrom advertise.models.motor_model import Motors, MotorCycle, Car, AutoAccessoriesAndParts, HeavyVehicles\nfrom advertise.forms import CarForm\n\n\ndef get_form(a):\n class MyForm(forms.ModelForm):\n class Meta:\n model = a\n fields = '__all__'\n return MyForm\n\n\nclass CategoryList(View):\n model = Category\n template_name = 'category/category_list.html'\n\n def get(self, request):\n context = {}\n if request.GET.get('id'):\n qs = self.model.objects.filter(parent__id=request.GET.get('id'))\n context['category'] = self.model.objects.get(id=request.GET.get('id'))\n else:\n qs = self.model.objects.filter(parent__isnull=True)\n context['object_list'] = qs\n context['advertise_form'] = AdvertiseForm()\n return render(request, self.template_name, context=context)\n\n\nclass CreateCategory(CreateView):\n model = Category\n fields = '__all__'\n template_name = 'category/category_create.html'\n success_url = reverse_lazy('categories')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n if self.model.objects.filter(id=self.request.GET.get('parent')):\n context['parent'] = self.model.objects.get(id=self.request.GET.get('parent'))\n print(context['parent'])\n return context\n\n def post(self, request, **kwargs):\n request.POST = request.POST.copy()\n if self.request.GET.get('parent'):\n request.POST['parent'] = self.model.objects.get(id=self.request.GET.get('parent'))\n return super(CreateCategory, self).post(request, **kwargs)\n\n\nclass CreateAd(View):\n model = BaseAdvertise\n template_name = 'category/create_ad.html'\n\n def get(self, request):\n context = {}\n if request.GET.get('id'):\n qs = Category.objects.filter(parent__id=request.GET.get('id'))\n context['category'] = Category.objects.get(id=request.GET.get('id'))\n try:\n md = apps.get_model(app_label='advertise', model_name=context['category'].keyword)\n context['model_form'] = get_form(md)\n except Exception as err:\n print(err)\n else:\n qs = Category.objects.filter(parent__isnull=True)\n context['object_list'] = qs\n context['advertise_form'] = AdvertiseForm()\n return render(request, self.template_name, context=context)\n\n def post(self, request):\n if not request.user:\n raise ValueError()\n print(request.POST)\n user = request.user\n category = Category.objects.get(id=request.POST['ct_id'])\n try:\n md = apps.get_model(app_label='advertise', model_name=category.keyword)\n model_form = get_form(md)(request.POST)\n except Exception as err:\n raise ValueError(err)\n if model_form.is_valid():\n obj = model_form.save()\n obj.base_advertise.create(object_id=obj.id, title=model_form.data['title'], price=model_form.data['price'],\n description=model_form.data['description'], location=model_form.data['location'],\n user=user, category=category)\n print(model_form.data)\n\n return redirect('categories')\n\n\nclass CarDetail(DetailView):\n model = Car\n template_name = 'category/advertise_detail.html'\n\n","sub_path":"rnd/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"257912156","text":"# 7. Написать программу, доказывающую или проверяющую,\n# что для множества натуральных чисел выполняется равенство: 1+2+...+n = n(n+1)/2,\n# где n — любое натуральное число.\n\nif __name__ == '__main__':\n n: int = int(input('Введите натуральное число: '))\n s: int = 0\n for i in range(1, n + 1):\n s += i\n m: int = int(n * (n + 1) // 2)\n print(f'1+2+...+n = n(n+1)/2 => {s} = {m}: {s == m}')\n","sub_path":"lesson_02/scripts/exercise_07.py","file_name":"exercise_07.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"150469432","text":"\"\"\"\nRain Water Trapped\nProblem Description\n\nGiven a vector A of non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.\n\n\n\nProblem Constraints\n1 <= |A| <= 100000\n\n\n\nInput Format\nFirst and only argument is the vector A\n\n\n\nOutput Format\nReturn one integer, the answer to the question\n\n\n\nExample Input\nInput 1:\n\nA = [0, 1, 0, 2]\nInput 2:\n\nA = [1, 2]\n\n\nExample Output\nOutput 1:\n\n1\nOutput 2:\n\n0\n\n\nExample Explanation\nExplanation 1:\n\n1 unit is trapped on top of the 3rd element.\nExplanation 2:\n\nNo water is trapped.\n\"\"\"\n\n\nclass Solution:\n # @param A : tuple of integers\n # @return an integer\n def trap(self, A):\n total = 0\n if len(A) <= 2:\n return total\n prefix_max = A[0]\n postfix_max = [0] * (len(A) - 1)\n # without the last element\n postfix_max[-1] = A[-1]\n for i in range(len(A) - 3, -1, -1):\n postfix_max[i] = A[i + 1] if A[i + 1] >= postfix_max[i + 1] else postfix_max[i + 1]\n for i in range(1, len(A) - 1):\n if A[i] < prefix_max and A[i] < postfix_max[i]:\n total += (min(prefix_max, postfix_max[i]) - A[i])\n prefix_max = A[i] if A[i] > prefix_max else prefix_max\n return total\n","sub_path":"InterviewBit/math/rain_water_trapped.py","file_name":"rain_water_trapped.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"281983867","text":"from matplotlib import pyplot as plt\n\nfrom sci_math import Sinais\n\n\ndef GerarGraficos(a: list, b: list, file):\n H = Sinais.system(a, b)\n y, name, ylabel, xscale = [None, None, None], ['Assintótico', 'Calculado', 'Linear'], ['dB', 'dB', 'Amplitude'], \\\n ['log', 'log', 'linear']\n w, y[0] = Sinais.bodeAssintotico(a, b, wlim=[0.1, 377], poits=10000)\n y[1], y[2] = [Sinais.db(H(i)) for i in w], [abs(H(h)) for h in w]\n print('sistema:\\n', Sinais.system2str(a, b), '\\n')\n for i in range(len(y)):\n a = plt.gca()\n a.plot(w, y[i])\n a.set_title(name[i])\n plt.xlabel('w')\n a.set_ylabel(ylabel[i])\n a.set_xscale(xscale[i])\n plt.grid(True, which=\"both\")\n plt.savefig((file + (name[i]).replace('ó', 'o')).replace(' ', '_') + '.png')\n plt.clf()\n\n\nGerarGraficos(Sinais.Polinomio(-1, cte=640), Sinais.Polinomio(-8, -40), 'out/Grafico_de_bode_H1_')\nGerarGraficos(Sinais.Polinomio(-40, cte=0.01), Sinais.Polinomio(-1, -8), 'out/Grafico_de_bode_H2_')\nGerarGraficos([6.4], Sinais.Polinomio(-8, -8), 'out/Grafico_de_bode_Ht_')\n","sub_path":"4 Periodo/sinais e sistemas 2/trabalho 1/resolucao/1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"468996315","text":"\r\nimport fastText.FastText as ft\r\ndef print_results(N, p, r):\r\n print(\"N\\t\" + str(N))\r\n print(\"P@{}\\t{:.3f}\".format(1, p))\r\n print(\"R@{}\\t{:.3f}\".format(1, r))\r\n\r\n\r\n'''\r\ninput;\r\nepoch:表示训练的次数\r\nlr:表示初始的学习速率\r\nwordNgrams:表示n-gram的值,一般使用2,表示2-gram\r\nminCount:表示参与计算的单词的最小出现次数。\r\n'''\r\n# ft.train_supervised()\r\nprint('================================')\r\nclassifier = ft.train_supervised('data/train1115.txt', # 表示训练数据文件的路径\r\n lr=0.05, # 学习率\r\n epoch=5000, # 训练次数\r\n loss='softmax', # softmax 、 hs\r\n wordNgrams=1,\r\n bucket=20000,\r\n label=\"__label__\",)\r\nclassifier.save_model('models/try_model.bin')\r\nprint('=========# 加载模型=======================')\r\nmode_load = ft.load_model('models/try_model.bin')\r\n\r\nprint('=========# 测试=======================')\r\nmode_test = mode_load.test('data/intent_small_train.txt')\r\nprint_results(*mode_test)\r\n\r\nprint('=========# 预测:')\r\nlabels = mode_load.predict(['25V/470UF 体积8*12MM\tC5 C46\t2\t1000\t0.1'])\r\nlabels_ = mode_load.predict(['1UF 50V 0402\tC78\t1\t500\t0.08'])\r\nprint(labels, '\\n', labels_)\r\nprint('================================')\r\nlabels1 = mode_load.predict(['4.7K 5% 0603 472\tR81\t1\t500\t0.03'])[0] # 只显示标签\r\nlabels1_ = mode_load.predict(['4.7K5%0603472R8115000.03'])[0] # 只显示标签\r\nprint(labels1, '\\n', labels1_)\r\nprint('================================')\r\nlabels2 = mode_load.predict(['5.1K 5% 0402\tR32\t1\t500\t0.03',\r\n 'STWNR8040-470M 8040 47UH 丝印470 4*4*1.6MM\tL2\t1\t500\t0.45'])\r\nlabels3 = mode_load.predict(['5.1K 5% 0402\tR32\t1\t500\t0.03',\r\n 'STWNR8040-470M 8040 47UH 丝印470 4*4*1.6MM\tL2\t1\t500\t0.45'], k=3) # 得到前3个类别\r\nprint(labels2, '\\n', labels3)\r\n\r\nwhile True:\r\n txt = input('输入句子: \\n')\r\n if txt != 'stop':\r\n print(mode_load.predict(txt))\r\n else:\r\n break # 结束循环\r\n\r\n\r\n\r\n# lr = 0.05\r\n# dim = 10\r\n# classifier = ft.train_supervised(input='data/intent_small_train.txt',\r\n# dim=dim,\r\n# lr=lr,\r\n# epoch=50,\r\n# label='__label__')\r\n# result_tr = classifier.test('data/intent_small_train.txt')[1]\r\n# # result_val = classifier.test('data/intent_small_valid.txt')\r\n# print(result_tr)\r\n# # print(result_val)\r\n# print(classifier.predict(['t第三方都是的说法是的发送到 ', 'hello', 'bye bye', 'show me chinese restaurants'], k=1))\r\n","sub_path":"2019-11-15/fasttext.py","file_name":"fasttext.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"584576242","text":"import os,shutil\nimport glob\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom keras.applications.vgg16 import VGG16\nfrom keras.preprocessing.image import *\nfrom keras import *\n\n#run 1 walk 0\noriginal_dataset_dir ='./datasets'\n\norigin_train_run = os.path.join(original_dataset_dir,'train/run')\norigin_train_walk= os.path.join(original_dataset_dir,'train/walk')\n\norigin_test_run = os.path.join(original_dataset_dir,'test/run')\norigin_test_walk = os.path.join(original_dataset_dir,'test/walk')\n\nall_test_walk =glob.glob(os.path.join(origin_test_walk,'*.png'))\nall_test_run =glob.glob(os.path.join(origin_test_run,'*.png'))\nall_test = all_test_walk+ all_test_run\nall_test_label = [0]*len(all_test_walk)+[1]*len(all_test_run)\n\nall_train_walk = glob.glob(os.path.join(origin_train_walk,'*.png'))\nall_train_run = glob.glob(os.path.join(origin_train_run,'*.png'))\nall_walk_labels = [1]*len(all_train_walk)\nall_run_labels= [0]* len(all_train_run)\n\n\ndivided_folder= os.path.join(original_dataset_dir,'divided')\nvalidataion_location = os.path.join(divided_folder,'validataion')\ntest_location = os.path.join(divided_folder,'test')\ntrain_location = os.path.join(divided_folder,'train')\n\n\nos.mkdir(divided_folder)\nos.mkdir(validataion_location)\nos.mkdir(test_location)\nos.mkdir(train_location)\n\nrun_train = os.path.join(train_location,'run')\nwalk_train =os.path.join(train_location,'walk')\nrun_test= os.path.join(test_location,'run')\nwalk_test= os.path.join(test_location,'walk')\nvalidation_walk = os.path.join(validataion_location,'walk')\nvalidation_run = os.path.join(validataion_location,'run')\n\nos.mkdir(run_train)\nos.mkdir(walk_train)\nos.mkdir(run_test)\nos.mkdir(walk_test)\nos.mkdir(validation_walk)\nos.mkdir(validation_run)\n\nall_train = all_train_walk +all_train_run\nall_train_labels = all_walk_labels+ all_run_labels\nall_train_index = np.array(range(len(all_train)))\nnp.random.shuffle(all_train_index)\nall_train =[all_train[i] for i in all_train_index]\nall_train_labels =[all_train_labels[i] for i in all_train_index]\n\n\n\nvalidation_data = all_train[:100]\nvalidation_data = np.asarray([cv2.imread(img) for img in validation_data])\nvalidation_label = all_train_labels[:100]\n\ntrain_data = all_train[100:]\ntrain_data =np.asarray([cv2.imread(img) for img in train_data])\ntrain_labels = all_train_labels[100:]\n\nfor data_piece in all_train_walk[50:]:\n shutil.copy(data_piece,walk_train)\n \nfor data_piece in all_train_run[50:]:\n shutil.copy(data_piece,run_train)\n \nfor data_piece in all_test_walk[50:]:\n shutil.copy(data_piece,walk_test)\n \nfor data_piece in all_test_run[50:]:\n shutil.copy(data_piece,run_test)\n \nfor data_piece in all_train_walk[:50]:\n shutil.copy(data_piece,validation_walk)\n \nfor data_piece in all_train_run[:50]:\n shutil.copy(data_piece,validation_run)\n \n\n\ndatagen = image.ImageDataGenerator(rescale=1./255,rotation_range=30,width_shift_range=0.2,height_shift_range=0.2,shear_range=0.1,zoom_range=0.2,\n horizontal_flip=True,fill_mode='nearest')\n\ntest_gen = image.ImageDataGenerator(rescale=1./255)\n\ntrain_generator = datagen.flow_from_directory(train_location,target_size=(224,224),batch_size=32,class_mode='binary')\ntest_generator = datagen.flow_from_directory(test_location,target_size=(224,224),batch_size=32,class_mode='binary')\nvalidataion_generator = datagen.flow_from_directory(validataion_location,target_size=(224,224),batch_size=32,class_mode='binary')\n\nmodel = models.Sequential()\nconv_base =VGG16(input_shape=(224,224,3))\nconv_model_layers = conv_base.layers[:-3]\n\nfor conv_model in conv_model_layers:\n model.add(conv_model)\n\nmodel.add(layers.Dense(100,activation='relu'))\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(1,activation='sigmoid'))\nmodel.compile(optimizer=optimizers.Adam(lr=1e-5),loss='binary_crossentropy',metrics=['acc'])\nhistory = model.fit_generator(train_generator,steps_per_epoch=100,epochs=10,validation_data=validataion_generator,validation_steps=50)\nmodel.save('walk and run_v2.h5')\n\n\n\n\n\n\n\n\n\n","sub_path":"Kaggle_activates/Walk_and_run_from_kaggle/Walk_or_Run/walk_or_run_3th.py","file_name":"walk_or_run_3th.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"130113979","text":"import setuptools\nfrom os.path import join\n\n# should be loaded below\n__version__ = None\n\nwith open(join('nb_conda', '_version.py')) as version:\n exec(version.read())\n\nsetuptools.setup(\n name=\"nb_conda\",\n version=__version__,\n url=\"https://github.com/Anaconda-Platform/nb_conda\",\n author=\"Continuum Analytics\",\n description=\"Manage your conda environments from the Jupyter Notebook\",\n long_description=open('README.md').read(),\n packages=setuptools.find_packages(),\n include_package_data=True,\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"255589130","text":"import os\nfrom setuptools import find_namespace_packages, setup\n\nwith open(os.path.join(os.path.dirname(__file__), \"VERSION\")) as version:\n VERSION = version.read()\n\nsetup(\n name=\"acme-data-generator\",\n version=VERSION,\n packages=find_namespace_packages(),\n include_package_data=True,\n license=\"MIT\",\n description=\"Python application for generating fake airport data\",\n author=\"Diego Quintana\",\n author_email=\"diego.quintana@estudiantat.upc.edu\",\n classifiers=[\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n ],\n py_modules=[\"acme_data_generation\"],\n entry_points='''\n [console_scripts]\n airbase-gen=acme_data_generation.cli:cli\n ''',\n\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"471055406","text":"from collections import OrderedDict\n\nfrom rest_framework import serializers, validators\n\nfrom module1.models import (\n Bookmark,\n Note,\n TaggedItem\n)\n\nclass TaggedObjectRelatedField(serializers.RelatedField):\n \n def to_representation(self, value):\n\n if isinstance(value, Bookmark):\n serializer = BookmarkSerializer(value)\n elif isinstance(value, Note):\n serializer = NoteSerializer(value)\n else:\n raise Exception('Unexpected type of tagged object')\n\n return serializer.data\n\nclass TaggedObjectTypeRelatedField(serializers.RelatedField):\n \n def to_representation(self, value):\n\n if isinstance(value, Bookmark):\n return 'bookmark'\n elif isinstance(value, Note):\n return 'note'\n raise Exception('Unexpected type of tagged object')\n\nclass BookmarkSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = Bookmark \n fields = ('slug','url')\n \n def to_representation(self, instance):\n \"\"\"\n Object instance -> Dict of primitive datatypes.\n \"\"\"\n ret = OrderedDict()\n fields = [field for field in self.fields.values() if not field.write_only]\n\n for field in fields:\n try:\n attribute = field.get_attribute(instance)\n except SkipField:\n continue\n\n if attribute is None:\n # We skip `to_representation` for `None` values so that\n # fields do not have to explicitly deal with that case.\n ret[field.field_name] = None\n else:\n ret[field.field_name] = field.to_representation(attribute)\n ret['content_name'] = instance.__class__.__name__.lower()\n\n return ret\n \nclass NoteSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = Note\n fields = ('slug', 'text')\n\n def to_representation(self, instance):\n \"\"\"\n Object instance -> Dict of primitive datatypes.\n \"\"\"\n ret = OrderedDict()\n fields = [field for field in self.fields.values() if not field.write_only]\n\n for field in fields:\n try:\n attribute = field.get_attribute(instance)\n except SkipField:\n continue\n\n if attribute is None:\n # We skip `to_representation` for `None` values so that\n # fields do not have to explicitly deal with that case.\n ret[field.field_name] = None\n else:\n ret[field.field_name] = field.to_representation(attribute)\n ret['content_name'] = instance.__class__.__name__.lower()\n\n return ret\n\n\nclass TaggedItemSerializer(serializers.ModelSerializer):\n tagged_object = TaggedObjectRelatedField(queryset=TaggedItem.objects.all())\n class Meta:\n model = TaggedItem\n fields = ('slug', 'tagged_object')\n","sub_path":"generic_relation_api/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"281176987","text":"# import\nimport re\nimport os\nimport requests\n\nclass DownloadImg:\n\n fullPath = ''\n link = ''\n\n def __init__(self):\n print('Download Image')\n\n \"\"\"\n # \n # Requests\n # \n \"\"\"\n def request(self, url, st = False):\n return requests.get(url, stream=st)\n\n \"\"\"\n # \n # Regex Code\n # \n \"\"\"\n def regexCode(self, code, regex):\n return re.finditer(regex, code, re.MULTILINE)\n\n \"\"\"\n # \n # Get Data Groups\n # \n \"\"\"\n def getDataGroups(self, datas):\n data_log = []\n for data in datas:\n data_log.append(data.groups()[0])\n return data_log\n\n \"\"\"\n # \n # Download\n # \n \"\"\"\n def download(self, url, image_path, image_name):\n try:\n path = '{path}/{name}'.format(path = image_path, name = image_name)\n fileImage = open(path, 'wb')\n fileImage.write(self.request(url, True).content)\n fileImage.close()\n except IOError:\n print('Download: {name}\\t\\t[error]'.format(name = image_name))\n return False\n else:\n print('Download: {name}\\t\\t[success]'.format(name = image_name))\n return True\n\n \"\"\"\n # \n # Create Directory\n # \n \"\"\"\n def create_directory(self, directory_name):\n try:\n if not os.path.exists(directory_name):\n os.makedirs(directory_name)\n except OSError:\n print (\"Creation of the directory %s failed\" % directory_name)\n return False\n else:\n print (\"Successfully created the directory %s\" % directory_name)\n return True\n\n \"\"\"\n # \n # Delete Directory\n # \n \"\"\"\n def delete_directory(self, directory_name):\n try:\n if not os.path.exists(directory_name):\n os.rmdir(directory_name)\n except OSError:\n print (\"Deletion of the directory %s failed\" % directory_name)\n return False\n else:\n print (\"Successfully deleted the directory %s\" % directory_name)\n return True\n# main\ndef main():\n img1 = DownloadImg()\n img1.download('https://images.freeimages.com/images/large-previews/dfa/jungle-1377573.jpg', 'uploads', 'image1.jpg')\n\n# run\nif __name__ == \"__main__\":\n main()\n","sub_path":"download_img.py","file_name":"download_img.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"509573873","text":"# -*- coding: utf-8 -*-\n\n__all__ = [\"MorningstarSwedenScraper\"]\n\nimport datetime\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom fondscrape.data_types import *\n\n\nclass MorningstarSwedenScraper:\n def __init__(self, html):\n self.html = html\n\n def scrape(self):\n soup = BeautifulSoup(self.html, 'html5lib')\n current_day = self._parse_day_value(soup)\n history = self._parse_history(soup)\n return ScrapeData(current_day, history)\n\n def _parse_day_value(self, soup):\n # table.alternatedtoplist.halftoplist\n # tr\n # td > Senaste NAV\n # td > 2 789 34 SEK\n # td > 2015-02-27\n # tr...\n cells = soup.select(\"table.alternatedtoplist.halftoplist tr td\")\n valuestr = cells[1].text.strip()\n datestr = cells[2].text.strip()\n\n # 1 234,56 SEK -> 1234.56\n parsable_valuestr = valuestr[:-4].replace(\",\", \".\").replace(\" \", \"\")\n value = Decimal(parsable_valuestr)\n\n # 2015-05-15\n date = datetime.datetime.strptime(datestr, \"%Y-%m-%d\").date()\n\n return DateValue(date, value)\n\n def _parse_history(self, soup):\n # div#ctl00_ctl01_cphContent_cphMain_quicktake1_FlowColumn2_TrailingReturnsTopListItem1_ctl04\n # table.alternatedtoplist\n # tr\n # td > 1 dag\n # td > 0,5\n # tr\n # td > 1 vecka\n # td > -0,5\n # ... 1 månad, 3 månader, 6 månader, 1 år, 3 år, 5 år, 10 år\n cells = soup.select(\"div#ctl00_ctl01_cphContent_cphMain_quicktake1_FlowColumn2_TrailingReturnsTopListItem1_ctl04 table.alternatedtoplist tr td\")\n\n def get_change(idx, days):\n if len(cells) <= idx:\n return None\n cellstr = cells[idx].text.strip()\n if not cellstr or cellstr == \"-\":\n return None\n delta = datetime.timedelta(days=days)\n percentage = Decimal(cellstr.replace(\",\", \".\")) / 100\n return PeriodChange(delta, percentage)\n\n # Index, days\n period_indices = [\n (1, 1),\n (3, 7),\n (5, 30),\n (7, 30 * 3),\n (9, 30 * 6),\n (11, 365),\n (13, 365 * 3),\n (15, 365 * 5),\n (17, 365 * 10)\n ]\n\n period_data = (get_change(p[0], p[1]) for p in period_indices)\n period_data = filter(None, period_data)\n return list(period_data)\n","sub_path":"fondscrape/scrapers/morningstar_sweden.py","file_name":"morningstar_sweden.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"410266598","text":"numbers = [1, 3, 4, 5, 8, 2, 1, 4, 5, 9, 5]\r\nhand = \"right\"\r\n\r\nkeys = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [\"*\", 0, \"#\"]]\r\nhands = [\"*\", \"#\"]\r\nanswer = []\r\n\r\nfor n in numbers:\r\n if n in [1, 4, 7]:\r\n answer.append(\"L\")\r\n hands[0] = n\r\n elif n in [3, 6, 9]:\r\n answer.append(\"R\")\r\n hands[1] = n\r\n elif n in [2, 5, 8, 0]:\r\n for key in keys:\r\n if n in key:\r\n lenl2 = keys.index(key)\r\n for key in keys:\r\n if hands[0] in key:\r\n lenl1 = keys.index(key)\r\n\r\n disl = abs(lenl1-lenl2)\r\n\r\n for key in keys:\r\n if n in key:\r\n lenr2 = keys.index(key)\r\n for key in keys:\r\n if hands[1] in key:\r\n lenr1 = keys.index(key)\r\n\r\n disr = abs(lenr1-lenr2)\r\n\r\n if disr > disl:\r\n hands[0] = n\r\n answer.append(\"L\")\r\n elif disr < disl:\r\n hands[1] = n\r\n answer.append(\"R\")\r\n elif disr == disl:\r\n if hand == \"right\":\r\n hands[1] = n\r\n answer.append(\"R\")\r\n else:\r\n hands[0] = n\r\n answer.append(\"L\")\r\n print(hands)\r\n\r\nprint(answer)\r\n","sub_path":"python_programmers/Lv1/키패드누르기.py","file_name":"키패드누르기.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"453449237","text":"from flask import Flask\nfrom flask import request, current_app, abort\nfrom functools import wraps\nfrom Recommender import ProductRecommender\n\napp = Flask(__name__)\napp.config.from_object('settings')\n\n\ndef token_auth(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.headers.get('X-API-TOKEN', None) != current_app.config['API_TOKEN']:\n abort(403)\n return f(*args, **kwargs)\n return decorated_function\n\n# Calling prediction method with list of product id.\n@app.route('/predict', methods=['POST'])\n@token_auth\ndef predict():\n product_list = request.get_json()\n if not product_list:\n return []\n recommendations = recommender.predict_products(product_list['cart'])\n return {'recommended_products': recommendations}\n\n# Calling training method with full path of product meta data file.\n@app.route('/train')\n@token_auth\ndef train():\n filename_turkish_stopwors = 'turkish_stopwords.txt'\n filenames = request.get_json()\n recommender.train_recommender(filenames['filename_metadata'], filename_turkish_stopwors)\n return {\"message\": \"Successfully Trained!\", \"success\": 1}\n\n\nif __name__ == '__main__':\n recommender = ProductRecommender(predictWithPreTrainedModel=False)\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"18773364","text":"# coding=utf-8\nimport os\nimport os.path as osp\n\nimport torch\nfrom overrides import overrides\n\nfrom datasets.cpvton_dataset import CPDataLoader, CpVtonDataset\nfrom datasets.vvt_dataset import VVTDataset\n\n\nclass MPVDataset(VVTDataset):\n \"\"\" CP-VTON dataset with the MPV folder structure. \"\"\"\n\n def __init__(self, opt):\n super(MPVDataset, self).__init__(opt)\n\n #@overrides(CpVtonDataset)\n def load_file_paths(self):\n \"\"\" Reads the datalist txt file for CP-VTON\"\"\"\n self.root = self.opt.mpv_dataroot\n self.image_names = []\n self.cloth_names = []\n\n datalist = osp.join(self.root, \"all_poseA_poseB_clothes_0607.txt\")\n with open(datalist, \"r\") as f:\n for line in f.readlines():\n person_name_1, person_name_2, cloth_name, _ = line.strip().split()\n self.image_names.extend([person_name_1, person_name_2])\n # both poses correspond to the same cloth\n self.cloth_names.extend([cloth_name, cloth_name])\n\n assert len(self.image_names) == len(\n self.cloth_names\n ), f\"len mismatch: {len(self.image_names)} != {len(self.cloth_names)}\"\n\n ########################\n # CLOTH REPRESENTATION\n ########################\n #@overrides(CpVtonDataset)\n def get_input_cloth_path(self, index):\n cloth_name = self.get_input_cloth_name(index)\n subdir = \"all\" if self.stage == \"GMM\" else \"warp-cloth\"\n cloth_path = osp.join(self.root, subdir, cloth_name)\n return cloth_path\n\n #@overrides(CpVtonDataset)\n def get_input_cloth_name(self, index):\n return self.cloth_names[index]\n\n ########################\n # PERSON REPRESENTATION\n ########################\n #@overrides(CpVtonDataset)\n def get_person_image_path(self, index):\n image_name = self.get_person_image_name(index)\n image_path = osp.join(self.root, \"all\", image_name)\n return image_path\n\n #@overrides(CpVtonDataset)\n def get_person_image_name(self, index):\n return self.image_names[index]\n\n #@overrides(CpVtonDataset)\n def get_person_parsed_path(self, index):\n image_name = self.get_person_image_name(index).replace(\".jpg\", \".png\")\n parsed_path = osp.join(self.root, \"all_parsing\", image_name)\n return parsed_path\n\n #@overrides(CpVtonDataset)\n def get_input_person_pose_path(self, index):\n image_name = self.get_person_image_name(index)\n pose_path = osp.join(self.root, \"all_person_clothes_keypoints\", image_name)\n pose_path = pose_path.replace(\".jpg\", \"_keypoints.json\")\n return pose_path\n\n\nif __name__ == \"__main__\":\n print(\"Check the dataset for geometric matching module!\")\n\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataroot\", default=\"data\")\n parser.add_argument(\"--mpv_dataroot\", default=\"/data_hdd/mpv_competition\")\n parser.add_argument(\"--datamode\", default=\"train\")\n parser.add_argument(\"--stage\", default=\"GMM\")\n parser.add_argument(\"--data_list\", default=\"train_pairs.txt\")\n parser.add_argument(\"--fine_width\", type=int, default=192)\n parser.add_argument(\"--fine_height\", type=int, default=256)\n parser.add_argument(\"--radius\", type=int, default=3)\n parser.add_argument(\"--shuffle\", action=\"store_true\", help=\"shuffle input data\")\n parser.add_argument(\"-b\", \"--batch-size\", type=int, default=4)\n parser.add_argument(\"-j\", \"--workers\", type=int, default=1)\n\n opt = parser.parse_args()\n dataset = MPVDataset(opt)\n data_loader = CPDataLoader(opt, dataset)\n\n print(\n \"Size of the dataset: %05d, dataloader: %04d\"\n % (len(dataset), len(data_loader.data_loader))\n )\n first_item = dataset.__getitem__(0)\n first_batch = data_loader.next_batch()\n\n from IPython import embed\n\n embed()\n","sub_path":"datasets/mpv_dataset.py","file_name":"mpv_dataset.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"353457099","text":"import debug\nfrom keras.models import load_model\nimport os\nfrom keras.layers import Input\nfrom train import get_classes, get_anchors\nfrom yolo3.utils import get_random_data, letterbox_image\nfrom yolo3.model import preprocess_true_boxes, yolo_loss, yolo_body\nimport numpy as np\nfrom train import create_model, create_locloss_model\nfrom keras import backend as K\nfrom PIL import Image\nfrom keras.optimizers import Adam\nimport ipdb\nimport time\nfrom filter_loss import filter_high_loss, filter_low_loss\nfrom yolo import YOLO\nimport matplotlib.pyplot as plt\nimport json\nimport sys\n\n# model_path = \"logs/000/ep009-loss30.814-val_loss30.951.h5\"\nmodel_path = \"logs/000/ep003-loss45.538-val_loss45.596.h5\"\n# model_path = 'model_data/yolo_weights.h5'\nclasses_path = 'model_data/classes.txt'\nanchors_path = 'model_data/yolo_anchors.txt'\nclass_names = get_classes(classes_path)\nnum_classes = len(class_names)\nanchors = get_anchors(anchors_path)\nnum_anchors = len(anchors)\ninput_shape = (416,416)\n\nmodel = create_locloss_model(input_shape, anchors, num_classes, freeze_body=2, weights_path=model_path, grid_loss=True)\nsess = K.get_session()\n\nannotation_path = sys.argv[1] \nuuid_path = sys.argv[2] \n\nval_split = 0.99\nwith open(annotation_path) as f:\n annotation_lines = f.readlines()\nwith open(uuid_path) as f:\n uuid_lines = f.readlines()\nnum_val = int(len(annotation_lines)*val_split)\nnum_train = len(annotation_lines) - num_val\n\nannotate_dict = {}\nfor i, line in enumerate(annotation_lines):\n annotate_dict[line.split()[0]] = i\n\n# high_loss_idx = filter_high_loss(10)\n# high_loss_idx = filter_low_loss(10)\n# extract only top 100 entries for now\n# n = len(high_loss_idx)\nn = len(annotation_lines)\nstart = time.time()\n\ngrid_mapping_full = ['0_xy_model_loss', '0_wh_model_loss', '0_class_model_loss', '0_confidence_model_loss',\n '1_xy_model_loss', '1_wh_model_loss', '1_class_model_loss', '1_confidence_model_loss',\n '2_xy_model_loss', '2_wh_model_loss', '2_class_model_loss', '2_confidence_model_loss',\n 'model_loss_total', 'model_output_0', 'model_output_1', 'model_output_2']\n\ngrid_mapping_partial = ['0_xy_model_loss', '0_wh_model_loss', '0_class_model_loss', '0_confidence_model_loss',\n '1_xy_model_loss', '1_wh_model_loss', '1_class_model_loss', '1_confidence_model_loss',\n '2_xy_model_loss', '2_wh_model_loss', '2_class_model_loss', '2_confidence_model_loss',\n 'model_loss_total']\n\ngrid_mapping = ['0_xy_model_loss', '0_wh_model_loss', '0_class_model_loss', \n '1_xy_model_loss', '1_wh_model_loss', '1_class_model_loss', \n '2_xy_model_loss', '2_wh_model_loss', '2_class_model_loss', \n 'model_loss_total']\n\nfor i in range(n):\n # idx = annotate_dict[high_loss_idx[i]] # extract line number of high loss image from dict\n annotation_line = annotation_lines[i] # extract line text\n image, box = get_random_data(annotation_line, input_shape, random=False)\n # extract image location\n image_data = []\n box_data = []\n batch_data = []\n image_data.append(image)\n box_data.append(box)\n batch_data.append(annotation_line)\n # first element of uuid_data list is the image path\n uuid_data = uuid_lines[i].split()[1:]\n print(uuid_data)\n\n image_data = np.array(image_data)\n box_data = np.array(box_data)\n uuid_data = np.array(uuid_data)\n\n y_true, obj_uuid = preprocess_true_boxes(box_data, input_shape, anchors, num_classes, batch_data, uuid_data)\n\n tensor_map = {}\n\n for j in range(len(obj_uuid)):\n # TODO: retrieve uuid scale mapping\n flat_tensor = obj_uuid[j].flatten()\n flat_tensor = flat_tensor[np.nonzero(flat_tensor)]\n tensor_map[str(j)+'_uuid'] = flat_tensor.tolist()\n \n out = sess.run(model.output, feed_dict={k:d for k, d in zip(model.input, [image_data, *y_true])})\n\n for grid_n in range(len(grid_mapping_partial)):\n # TODO: retrieve dict name mapping \n flat_tensor = out[grid_n].flatten()\n flat_tensor = flat_tensor[np.nonzero(flat_tensor)]\n tensor_map[grid_mapping_partial[grid_n]] = flat_tensor.tolist()\n \n obj_mask_idx = ['model_obj_mask_0', 'model_obj_mask_1', 'model_obj_mask_2']\n try:\n img_name = annotation_line.split()[0]\n frame_no = img_name.split('/')[-1].split('.')[0]\n subtask = img_name.split('/')[-3]\n task = img_name.split('/')[-4]\n tensor_map['image_name'] = img_name\n tensor_map['frame_no'] = frame_no\n tensor_map['subtask'] = subtask\n tensor_map['task'] = task\n for idx in range(1,len(obj_mask_idx)+1):\n # hack. obj masks located at the end of out tensor. hence iterating backward.\n tensor_map[obj_mask_idx[-idx]] = np.transpose(np.nonzero(out[-idx])).tolist()\n with open(str(i).zfill(5) + '_' + 'yolo_data.json', 'w') as fp:\n json.dump(tensor_map, fp, indent=4, sort_keys=True)\n if(i % 100 == 0):\n print('image:', i)\n except:\n continue\n\nend = time.time()\nprint('Total time:', end-start)\n\n# model = YOLO() \n# i = 0\n# for i in range(n):\n# img = high_loss_idx[i]\n# image = Image.open(img)\n# idx = annotate_dict[high_loss_idx[i]] # extract line number of high loss image from dict\n# annotation_line = annotation_lines[idx] # extract line text\n# print(img, annotation_line)\n# box = np.array([np.array(list(map(int,box.split(',')))) for box in annotation_line.split()[1:]]) \n# box[:,[0,1]] = box[:,[1,0]]\n# box[:,[2,3]] = box[:,[3,2]]\n# img_arr = model.detect_image_bboxes(image, box)\n# img_arr.save(str(i) + '_detect.jpeg')\n\n\n # img_arr = model.detect_image(image)\n # model = create_model(input_shape, anchors, num_classes, freeze_body=2, weights_path=model_path, grid_loss=False)\n\n# K.clear_session()\n\n# image_input = Input(shape=(None, None, 3))\n\n# model = yolo_body(image_input, num_anchors//3, num_classes)\n\n# model.load_weights(model_path, by_name=True, skip_mismatch=True)\n\n\n# model.compile(optimizer=Adam(lr=1e-3), loss={'yolo_loss': lambda y_true, y_pred: y_pred})\n\n# model_image_size = (416,416)\n\n# img = \"data/5cc3a5ef4e436f43f7b5615f/images/0.jpeg\"\n# image = Image.open(img)\n# boxed_image = letterbox_image(image, tuple(reversed(model_image_size)))\n# image_data = np.array(boxed_image, dtype='float32')\n# image_data /= 255.\n# image_data = np.expand_dims(image_data, 0)\n# input_image_shape = K.placeholder(shape=(2, ))\n\n# retrieve_layers = [-4,-3,-2]\n# i, j, k = sess.run([model.layers[i].output for i in retrieve_layers], feed_dict={i:d for i, d in zip(model.input, [image_data, *y_true])})\n\n# i = sess.run(*model.output, feed_dict={model.input: image_data})\n\n# print(i)\n# outputs = [i, j, k]\n\n# total_loss = yolo_loss([*outputs, *y_true], anchors, num_classes, ignore_thresh=0.5)\n","sub_path":"edit_this_file.py","file_name":"edit_this_file.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"95534602","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 26 16:25:06 2019\n\n@author: Donnie\n\"\"\"\n\n\"\"\"\nSlower than XRD_Excel_Maker because it uses xlsxwriter to write the formula\nfor offsetting y in each row rather than putting the formula as a string\nwithin the database.\n\"\"\"\n\nimport numpy as np\nimport sympy as sp\nsp.init_session(quiet=True)\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nimport pandas as pd\nfrom xlsxwriter.utility import xl_rowcol_to_cell\nfrom scipy.interpolate import UnivariateSpline\nfrom fileOrganizer import fileFinder, fileMover\nimport time\n\ndef generateXRD(inputList, sampleNames, sheetName=None, plotExcel=False, \n plotPython=False, phases=None, phase_dict=None, offset=None,\n reset_sheet_num=False):\n \"\"\"\n Creates an Excel file from csv files\n \n ***Only set reset_sheet_num to True if beginning a new Excel workbook,\n otherwise it may cause data to be overwritten\n \n \"\"\"\n #Defaults the Excel sheet name to 'Sheet sheet_number' if not named, and iterates\n if (reset_sheet_num) or ('sheet_number' not in globals()):\n global sheet_number\n sheet_number = 1\n \n sheetName = sheetName if sheetName is not None else f'Sheet {sheet_number}'\n sheet_number += 1\n \n offset = offset if offset is not None else [1000] * len(inputList)\n phases = phases if phases is not None else []\n phase_dict = phase_dict if phase_dict is not None else {}\n \n #create a Pandas dataframe containing all of the calculated data\n dataFrame = pd.DataFrame()\n formulaDataFrame = pd.DataFrame({'title': 0}, index=[0])\n columnNames = []\n \n #collects all of the raw data into a dataframe\n i = 0\n for datafile in inputList:\n tempData = pd.read_csv(datafile[0], sep=None, usecols=[1, 2],\n engine='python')\n dataFrame = pd.concat([dataFrame, tempData], axis=1)\n dataFrame = pd.concat([dataFrame, formulaDataFrame], axis=1)\n columnNames.append(sampleNames[i])\n i += 1\n \n #\\u00B0 and \\u03B8 are unicode for the degree sign and theta, respectively\n subheaderNames = ['2\\u03B8 (\\u00B0)', 'Intensity, I (Counts)',\n 'I + offset (Counts)'] \n\n dataFrame.to_excel(writer, sheet_name=sheetName, index=False, startrow=2, header=False)\n worksheet = writer.sheets[sheetName]\n \n print('\\n\\n'+'-'*8+f' {time.time()-time0:.2f} seconds '+'-'*8) \n \n #Modifies the formatting to look good in Excel\n i = 0\n for col_num, value in enumerate(dataFrame.columns.values):\n if col_num % 3 == 0:\n if i % 2 == 0:\n worksheet.merge_range(0, col_num, 0, col_num+2, columnNames[i], even_header_format)\n worksheet.write(1, col_num, subheaderNames[col_num % 3], even_subheader_format)\n worksheet.write(1, col_num+1, subheaderNames[(col_num+1) % 3], even_subheader_format)\n worksheet.write(1, col_num+2, subheaderNames[(col_num+2) % 3], even_subheader_format)\n worksheet.set_column(col_num, col_num+2, 10, even_colnum_format)\n else:\n worksheet.merge_range(0, col_num, 0, col_num+2, columnNames[i], odd_header_format)\n worksheet.write(1, col_num, subheaderNames[col_num % 3], odd_subheader_format)\n worksheet.write(1, col_num+1, subheaderNames[(col_num+1) % 3], odd_subheader_format)\n worksheet.write(1, col_num+2, subheaderNames[(col_num+2) % 3], odd_subheader_format)\n worksheet.set_column(col_num, col_num+2, 10, odd_colnum_format)\n #writes a formula to apply offset to intensity\n for row_num in range(2, dataFrame.iloc[:,col_num+1].count() + 2):\n worksheet.write_formula(row_num, col_num+2, '=%s+%i' \n %(xl_rowcol_to_cell(row_num, col_num+1), offset[i]))\n i += 1 \n \n worksheet.set_row(0, 18)\n worksheet.set_row(1, 30)\n \n if plotExcel:\n chart = workbook.add_chart({'type': 'scatter',\n 'subtype':'straight'})\n for col_num in range(len(dataFrame.columns)):\n if col_num % 3 == 0:\n chart.add_series({\n 'name': [sheetName, 0, col_num],\n 'categories':[sheetName, 2, col_num, dataFrame.iloc[:,col_num].count() + 2, col_num],\n 'values':[sheetName, 2, col_num+2, dataFrame.iloc[:,col_num].count() + 2, col_num+2],\n 'line': {'width':2}\n })\n chart.set_x_axis({'name':'2\\u03B8 (\\u00B0)'})\n chart.set_y_axis({'name':'Intensity (a.u.)'})\n worksheet.insert_chart('D8', chart)\n \n if plotPython:\n fig = plt.figure()\n ax = plt.gca()\n j = 0\n for col_num, col_name in enumerate(dataFrame.columns.values):\n if col_num % 3 == 0:\n dataFrame.iloc[:,col_num+2] = dataFrame.iloc[:,col_num+1] + offset[j]\n plt.plot(dataFrame.iloc[:,col_num], dataFrame.iloc[:,col_num+2], lw=1)\n #Fits a spline to the each sample in order to get y position for\n #labelling each line\n spline = UnivariateSpline(dataFrame.iloc[:,col_num], dataFrame.iloc[:,col_num+2], k=1)\n plt.text(93.5, spline(90)+ 400,'%s'%(columnNames[j]), ha='right', va='bottom')\n j += 1\n \n peakDict = {'SiO\\u2082': [22], \n 'C': [26, 44],\n 'SiC': [35.6, 60, 72]}\n styles = ['bo', 'rs', 'gD']\n #plt.legend(columnNames)\n plt.xlabel('2\\u03B8 (\\u00B0)')\n plt.ylabel('Intensity (a.u.)')\n ax.yaxis.set_minor_locator(AutoMinorLocator(2))\n ax.xaxis.set_minor_locator(AutoMinorLocator(2))\n ax.set_xlim(5, 95)\n ax.xaxis.set_ticks(np.arange(10, 100, 10))\n ax.set_ylim(1000, ax.get_ylim()[1]+3000)\n ax.yaxis.set_ticks(np.arange(2000, ax.get_ylim()[1]+3000, 4000))\n #plt.text(0.5, 1.01, '%s'%(sheetName), ha='center', va='bottom', transform=ax.transAxes) \n \n i = 0\n for phase in phases:\n if phase in peakDict:\n for peak in peakDict[phase]:\n #Uses the spline from the last sample to find the\n #y value to place the phase markers\n plt.plot(peak, 1200 + spline(peak), styles[i], mec='k', mew=0.7)\n plt.plot(0.1 +0.2*i, 0.96, styles[i], mec='k', mew=0.7, transform=ax.transAxes)\n plt.text(0.125 + 0.2*i, 0.955, phase, ha='left',\n va='center', transform=ax.transAxes)\n i += 1\n plt.show()\n \n return dataFrame\n\n'''\nInput Here, put different samples into different inputData lists\n''' \nif __name__ == '__main__':\n \n # Makes the default serif font into Times New Roman\n plt.rcParams['font.serif'] = \"Times New Roman\"\n # Makes the default font type into serif i.e. always use Times New Roman\n plt.rcParams['font.family'] = \"serif\"\n plt.rcParams['font.size'] = 12\n plt.rcParams['mathtext.default'] = \"regular\"\n plt.rcParams['xtick.direction'] = 'in'\n plt.rcParams['ytick.direction'] = 'in'\n plt.rcParams['xtick.minor.visible']=True\n plt.rcParams['ytick.minor.visible']=True\n plt.rcParams['xtick.major.size'] = 5\n plt.rcParams['xtick.major.width'] = 0.6\n plt.rcParams['xtick.minor.size'] = 2.5\n plt.rcParams['xtick.minor.width'] = 0.6\n plt.rcParams['ytick.major.size'] = 5\n plt.rcParams['ytick.major.width'] = 0.6\n plt.rcParams['ytick.minor.size'] = 2.5\n plt.rcParams['ytick.minor.width'] = 0.6\n plt.rcParams['lines.linewidth'] = 1.2\n plt.rcParams['lines.markersize'] = 5\n plt.rcParams['axes.linewidth'] = 0.6\n plt.rcParams['legend.frameon']=False\n plt.rcParams['figure.autolayout'] = True\n \n time0 = time.time()\n \n #Writes the panda dataframes to excel sheets and then save the file\n writer = pd.ExcelWriter(r\"..\\Output\\XRD-test6.xlsx\",\n engine='xlsxwriter')\n workbook = writer.book\n \n #Formatting styles for the Excel workbook\n odd_header_format = workbook.add_format({'text_wrap': True, 'text_v_align': 2,\n 'text_h_align': 2, 'bold':True,\n 'bg_color':'DBEDFF', 'font_size':12})\n even_header_format = workbook.add_format({'text_wrap': True, 'text_v_align': 2,\n 'text_h_align': 2, 'bold':True,\n 'bg_color':'FFEAD6', 'font_size':12})\n odd_subheader_format = workbook.add_format({'text_wrap': True, 'text_v_align': 2,\n 'text_h_align': 2, 'bold':True,\n 'bottom':True, 'bg_color':'DBEDFF'})\n even_subheader_format = workbook.add_format({'text_wrap': True, 'text_v_align': 2,\n 'text_h_align': 2, 'bold':True,\n 'bottom':True, 'bg_color':'FFEAD6'})\n odd_colnum_format = workbook.add_format({'num_format': '0.0', 'bg_color':'DBEDFF'})\n even_colnum_format = workbook.add_format({'num_format': '0.0', 'bg_color':'FFEAD6'}) \n \n searchDirectory = r\"..\\Raw Data\\XRD\"\n \n TMTVS = fileFinder(fileDirectory = searchDirectory+r'\\TMTVS',\n keyword_1=['1100', '1300', '1400'],\n keyword_2=['100', '10TMTVS', '20TMTVS', '30TMTVS', '40TMTVS'],\n additional_keywords='PSO', skip_asking=True)\n \n POSS = fileFinder(fileDirectory = searchDirectory+r'\\POSS',\n keyword_1=['1100', '1300', '1400'],\n keyword_2=['100', '10POSS', '20POSS', '30POSS', '40POSS'],\n additional_keywords='PSO', skip_asking=True)\n \n generateXRD(TMTVS[0],\n ['PSO', '10T', '20T', '30T', '40T'],\n 'TMTVS-1100\\u00B0C', True, True,\n phases=['SiO\\u2082', 'C'],\n offset = [0, 3000, 6000, 9000, 12000]) \n generateXRD(TMTVS[1],\n ['PSO', '10T', '20T', '30T', '40T'],\n 'TMTVS-1300\\u00B0C', True, True,\n phases = ['SiO\\u2082', 'C', 'SiC'],\n offset = [0, 3000, 8000, 11000, 14000]) \n generateXRD(TMTVS[2],\n ['PSO', '10T', '20T', '30T', '40T'],\n 'TMTVS-1400\\u00B0C', True, True,\n phases = ['SiO\\u2082', 'C', 'SiC'],\n offset = [0, 4000, 7000, 13000, 16000]) \n generateXRD(POSS[0],\n ['PSO', '10P', '20P', '30P', '40P'],\n 'POSS-1100\\u00B0C', True, True,\n phases = ['SiO\\u2082', 'C'],\n offset = [0, 3000, 6000, 9000, 13000])\n generateXRD(POSS[1],\n ['PSO', '10P', '20P', '30P', '40P'],\n 'POSS-1300\\u00B0C', True, True,\n phases = ['SiO\\u2082', 'C', 'SiC'],\n offset = [0, 5000, 8000, 11000, 14000])\n generateXRD(POSS[2],\n ['PSO', '10P', '20P', '30P', '40P'],\n 'POSS-1400\\u00B0C', True, True,\n phases = ['SiO\\u2082', 'C', 'SiC'],\n offset = [0, 4000, 6000, 9000, 16000])\n \n #newPath = r\"..\\Output\\XRD\"\n #fileMover(TMTVS[0], newPath+'/TMTVS/1100') \n #fileMover(TMTVS[1], newPath+'/TMTVS/1300')\n #fileMover(TMTVS[2], newPath+'/TMTVS/1400')\n #fileMover(POSS[0], newPath+'/POSS/1100') \n #fileMover(POSS[1], newPath+'/POSS/1300')\n #fileMover(POSS[2], newPath+'/POSS/1400')\n \n print('\\n\\n'+'-'*8+f' {time.time()-time0:.2f} seconds '+'-'*8) \n \n #Saves the dataframes to an Excel sheet\n #writer.save()\n #print('\\n\\n'+'-'*8+f' {time.time()-time0:.2f} seconds '+'-'*8)","sub_path":"XRD/XRD_Excel_Maker_alternate2.py","file_name":"XRD_Excel_Maker_alternate2.py","file_ext":"py","file_size_in_byte":12051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"647065575","text":"import logging\nimport os\nimport sys\nimport numpy\nimport time\nimport theano\nfrom theano.tensor.type import TensorType\nfrom blocks.algorithms import GradientDescent, Adam\nfrom blocks.extensions import FinishAfter, Printing\nfrom blocks.extensions.monitoring import TrainingDataMonitoring\nfrom blocks.graph import ComputationGraph\nfrom blocks.main_loop import MainLoop\nfrom blocks.model import Model\nfrom fuel.schemes import ShuffledScheme, SequentialScheme\nfrom utils import prepare_dir, load_df, DummyLoop\nfrom utils import SaveExpParams, SaveLog, SaveParams, AttributeDict\nfrom nn import ApproxTestMonitoring, FinalTestMonitoring, TestMonitoring\nfrom nn import LRDecay\nfrom ladder import LadderAE\nfrom utils import make_datastream, setup_data\nfrom random import randint\nlogger = logging.getLogger('main')\n\n\ndef setup_model(p):\n ladder = LadderAE(p)\n # Setup inputs\n input_type = TensorType(\n 'float32', [False] * (len(p.encoder_layers[0]) + 1))\n x_only = input_type('features_unlabeled')\n x = input_type('features_labeled')\n y = theano.tensor.lvector('targets_labeled')\n ladder.apply(x, y, x_only)\n\n # Load parameters if requested\n if p.get('load_from'):\n with open(p.load_from + '/trained_params.npz') as f:\n loaded = numpy.load(f)\n model = Model(ladder.costs.total)\n params_dicts = model.params\n params_names = params_dicts.keys()\n for param_name in params_names:\n param = params_dicts[param_name]\n # '/f_6_.W' --> 'f_6_.W'\n slash_index = param_name.find('/')\n param_name = param_name[slash_index + 1:]\n assert param.get_value().shape == loaded[param_name].shape\n param.set_value(loaded[param_name])\n\n return ladder\n\n\ndef load_and_log_params(cli_params):\n cli_params = AttributeDict(cli_params)\n if cli_params.get('load_from'):\n p = load_df(cli_params.load_from, 'params').to_dict()[0]\n p = AttributeDict(p)\n for key in cli_params.iterkeys():\n if key not in p:\n p[key] = None\n new_params = cli_params\n loaded = True\n else:\n p = cli_params\n new_params = {}\n loaded = False\n\n # Make dseed seed unless specified explicitly\n if p.get('dseed') is None and p.get('seed') is not None:\n p['dseed'] = p['seed']\n\n logger.info('== COMMAND LINE ==')\n logger.info(' '.join(sys.argv))\n\n logger.info('== PARAMETERS ==')\n for k, v in p.iteritems():\n if new_params.get(k) is not None:\n p[k] = new_params[k]\n replace_str = \"<- \" + str(new_params.get(k))\n else:\n replace_str = \"\"\n logger.info(\" {:20}: {:<20} {}\".format(k, v, replace_str))\n return p, loaded\n\n\ndef get_error(args):\n \"\"\" Calculate the classification error \"\"\"\n args['data_type'] = args.get('data_type', 'test')\n args['no_load'] = 'g_'\n\n targets, acts = analyze(args)\n guess = numpy.argmax(acts, axis=1)\n correct = numpy.sum(numpy.equal(guess, targets.flatten()))\n\n return (1. - correct / float(len(guess))) * 100.\n\n\ndef analyze(cli_params):\n p, _ = load_and_log_params(cli_params)\n _, data, whiten, cnorm = setup_data(p, test_set=True)\n ladder = setup_model(p)\n\n # Analyze activations\n dset, indices, calc_batchnorm = {\n 'train': (data.train, data.train_ind, False),\n 'valid': (data.valid, data.valid_ind, True),\n 'test': (data.test, data.test_ind, True),\n }[p.data_type]\n\n if calc_batchnorm:\n monitored_variables = [\n ladder.costs.CE_clean,\n ladder.costs.CE_corr,\n ladder.error.clean,\n ladder.costs.total] + ladder.costs.denois.values()\n logger.info('Calculating batch normalization for clean.labeled path')\n main_loop = DummyLoop(\n extensions=[\n FinalTestMonitoring(\n monitored_variables,\n make_datastream(data.train, data.train_ind,\n # These need to match with the training\n p.batch_size,\n n_labeled=p.labeled_samples,\n n_unlabeled=len(data.train_ind),\n cnorm=cnorm,\n whiten=whiten, scheme=ShuffledScheme),\n make_datastream(data.valid, data.valid_ind,\n p.valid_batch_size,\n n_labeled=len(data.valid_ind),\n n_unlabeled=len(data.valid_ind),\n cnorm=cnorm,\n whiten=whiten, scheme=ShuffledScheme),\n prefix=\"valid_final\", before_training=True),\n Printing(),\n ])\n main_loop.run()\n\n # Make a datastream that has all the indices in the labeled pathway\n ds = make_datastream(dset, indices,\n batch_size=p.get('batch_size'),\n n_labeled=len(indices),\n n_unlabeled=len(indices),\n balanced_classes=False,\n whiten=whiten,\n cnorm=cnorm,\n scheme=SequentialScheme)\n\n # We want out the values after softmax\n outputs = ladder.act.clean.labeled.h[len(ladder.layers) - 1]\n\n # Replace the batch normalization paramameters with the shared variables\n if calc_batchnorm:\n outputreplacer = TestMonitoring()\n _, _, outputs = outputreplacer._get_bn_params(outputs)\n\n cg = ComputationGraph(outputs)\n f = cg.get_theano_function()\n\n it = ds.get_epoch_iterator(as_dict=True)\n res = []\n inputs = {'features_labeled': [],\n 'targets_labeled': [],\n 'features_unlabeled': []}\n # Loop over one epoch\n for d in it:\n # Store all inputs\n for k, v in d.iteritems():\n inputs[k] += [v]\n # Store outputs\n res += [f(*[d[str(inp)] for inp in cg.inputs])]\n\n # Concatenate all minibatches\n res = [numpy.vstack(minibatches) for minibatches in zip(*res)]\n inputs = {k: numpy.vstack(v) for k, v in inputs.iteritems()}\n\n return inputs['targets_labeled'], res[0]\n\n\ndef train(cli_params):\n cli_params['save_dir'] = prepare_dir(cli_params['save_to'])\n logfile = os.path.join(cli_params['save_dir'], 'log.txt')\n fh = logging.FileHandler(filename=logfile)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n p, loaded = load_and_log_params(cli_params)\n in_dim, data, whiten, cnorm = setup_data(p, test_set=True)\n if not loaded:\n p.encoder_layers = (in_dim,) + p.encoder_layers\n\n ladder = setup_model(p)\n\n # Training\n all_params = ComputationGraph([ladder.costs.total]).parameters\n logger.info('Number of found parameters: %s' % str(len(all_params)))\n logger.info('Found parameters:\\n %s' % str(all_params))\n\n # Fetch all batch normalization updates. They are in the clean path.\n bn_updates = ComputationGraph([ladder.costs.CE_clean]).updates\n assert 'counter' in [u.name for u in bn_updates.keys()], \\\n 'No batch norm params in graph - the graph has been cut?'\n\n training_algorithm = GradientDescent(\n cost=ladder.costs.total, params=all_params,\n step_rule=Adam(learning_rate=ladder.lr))\n # In addition to actual training, also do BN variable approximations\n training_algorithm.add_updates(bn_updates)\n\n model = Model(ladder.costs.total)\n\n monitored_variables = [\n ladder.costs.CE_clean,\n ladder.costs.CE_corr,\n ladder.error.clean,\n training_algorithm.total_gradient_norm,\n ladder.costs.total] + ladder.costs.denois.values()\n\n main_loop = MainLoop(\n training_algorithm,\n make_datastream(data.train, data.train_ind,\n p.batch_size,\n n_labeled=p.labeled_samples,\n n_unlabeled=p.unlabeled_samples,\n whiten=whiten,\n cnorm=cnorm),\n model=model,\n extensions=[\n FinishAfter(after_n_epochs=p.num_epochs),\n ApproxTestMonitoring(\n monitored_variables,\n make_datastream(data.valid, data.valid_ind,\n p.valid_batch_size, whiten=whiten, cnorm=cnorm,\n scheme=ShuffledScheme),\n prefix=\"valid_approx\"),\n FinalTestMonitoring(\n monitored_variables,\n make_datastream(data.train, data.train_ind,\n p.batch_size,\n n_labeled=p.labeled_samples,\n whiten=whiten, cnorm=cnorm,\n scheme=ShuffledScheme),\n make_datastream(data.valid, data.valid_ind,\n p.valid_batch_size,\n n_labeled=len(data.valid_ind),\n whiten=whiten, cnorm=cnorm,\n scheme=ShuffledScheme),\n prefix=\"valid_final\",\n after_n_epochs=p.num_epochs),\n FinalTestMonitoring(\n monitored_variables,\n make_datastream(data.train, data.train_ind,\n p.batch_size,\n n_labeled=p.labeled_samples,\n whiten=whiten, cnorm=cnorm,\n scheme=ShuffledScheme),\n make_datastream(data.test, data.test_ind,\n p.valid_batch_size,\n n_labeled=len(data.test_ind),\n whiten=whiten, cnorm=cnorm,\n scheme=ShuffledScheme),\n prefix=\"test_final\",\n after_n_epochs=p.num_epochs),\n\n TrainingDataMonitoring(\n monitored_variables,\n prefix=\"train\", after_epoch=True),\n\n SaveParams('valid_approx_CE_clean', model, p.save_dir,\n after_epoch=False),\n SaveExpParams(p, p.save_dir, before_training=True),\n SaveLog(p.save_dir, after_epoch=True),\n LRDecay(ladder.lr, p.num_epochs * p.lrate_decay, p.num_epochs,\n after_epoch=True),\n Printing()])\n main_loop.run()\n\n\nif __name__ == \"__main__\":\n index = int(sys.argv[1])\n\n dseeds = [1, 777, 405, 186, 620, 209, 172, 734, 154, 996]\n lrs = [0.001, 0.002, 0.0001, 0.0002]\n labeled_sampless = [100, 60000]\n denoising_cost_xs = [(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (100.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (500.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (1000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (4000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (1000, 10, 0.1, 0.1, 0.1, 0.1, 0.1),\n (6000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (500.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 100.0)]\n f_local_noise_std = [(0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3),\n (0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (0.4, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (0.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (0.7, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3),\n (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7)]\n decoder_specs = [('sig', 'sig', 'sig', 'sig', 'sig', 'sig', 'sig'),\n ('sig', 'vert', 'vert', 'vert', 'vert', 'vert', 'vert')]\n seed = 1\n\n def pick_random(options):\n random_index = randint(0, len(options) - 1)\n return options[random_index]\n\n logging.basicConfig(level=logging.INFO)\n evaluate = False\n t_start = time.time()\n if evaluate:\n d = {'load_from': 'results/mnist_all_bottom51',\n 'cmd': 'evaluate',\n 'data_type': 'test'}\n err = get_error(d)\n logger.info('Test error: %f' % err)\n else:\n d = {'dseed': 1, 'seed': 1, 'num_epochs': 150,\n 'lr': 0.002, 'lrate_decay': 0.67,\n 'unlabeled_samples': 60000, 'labeled_samples': 100,\n 'denoising_cost_x': (1000, 10, 0.1, 0.1, 0.1, 0.1, 0.1),\n 'f_local_noise_std': (0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3),\n 'decoder_spec': ('sig', 'sig', 'sig', 'sig', 'sig', 'sig', 'sig'),\n 'save_to': 'mnist_all_bottom',\n 'cmd': 'train',\n 'top_c': True, 'batch_size': 100, 'dataset': 'mnist',\n 'valid_set_size': 10000, 'whiten_zca': 0,\n 'act': 'relu', 'valid_batch_size': 100, 'contrast_norm': 0,\n 'encoder_layers': ('1000', '500', '250', '250', '250', '10'),\n }\n train(d)\n logger.info('Took %.1f minutes' % ((time.time() - t_start) / 60.))\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":13442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629552650","text":"galera = list()\r\ndados = list()\r\nmaior = menor = 0\r\nfor c in range(0, 3):\r\n dados.append(str(input('Nome: ')))\r\n dados.append(int(input('Idade: ')))\r\n galera.append(dados[:])\r\n dados.clear()\r\nprint(dados, galera)\r\nfor p in galera:\r\n if p[1] >= 21:\r\n print(f'{p[0]} é maior de idade')\r\n maior += 1\r\n else:\r\n print(f'{p[0]} é menor de idade')\r\n menor += 1\r\nprint(f'Temos no total {maior + menor} pessoas, sendo {maior} de maiores e {menor} menores')","sub_path":"aula18d.py","file_name":"aula18d.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"122361971","text":"import sys\nfrom model import data_pre, inference, accuracy\nimport time\nimport numpy as np\nimport itertools\nimport pickle\n\nniter = 1\npchar = \"uniform\"\n\n\nss = [0.1,0.5,5,20]\nbetas = [0.01,0.1,0.5,0.9]\ngs = [0.01,0.1,0.5,0.9]\ncombinations = list(itertools.product(ss,betas,gs))\n## read the parameter used here\nidx = int(sys.argv[1]) - 1\n(s,beta, g) = combinations[idx]\n\nconfig = {\"s\":s, \"beta\":beta,\"g\":g, \"niter\":niter, \"pchar\":pchar}\nprint(\"s = {}; beta = {}; g = {}\\n\".format(s,beta,g))\n\n(Xs, Btruth) = data_pre()\nstart = time.time()\nBs,NBC = inference(Xs,Btruth,config)\nruntime = time.time() - start\nprint(\"runtime : {}\".format(runtime))\n\n# import pdb\n# pdb.set_trace()\nacc = accuracy(Bs, Btruth)\n\nprint(\"acc : {}\".format(acc))\n\nout = {}\nout[\"s\"] = s\nout[\"beta\"] = beta\nout[\"g\"] = g\nout[\"acc\"] = acc\n\n## save result\nout_name = \"../output/exper_2a_{}.pkl\".format(idx)\nwith open(out_name, \"wb\") as f:\n\tpickle.dump(out, f)\n\npred_name = \"../output/exper_2a_{}_B.pkl\".format(idx)\nwith open(pred_name, \"wb\") as f:\n pickle.dump(Bs, f)\n\nNBC_name = \"../output/exper_2a_{}_NBC.pkl\".format(idx)\nwith open(NBC_name, \"wb\") as f:\n pickle.dump(NBC, f)\n","sub_path":"hw5/code/exper_2a.py","file_name":"exper_2a.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"559658778","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport logging\n\nimport numpy as np\nfrom astropy.coordinates import Angle, SkyCoord\nfrom astropy.extern import six\nfrom astropy.wcs.utils import skycoord_to_pixel\n\nfrom . import CountsSpectrum\nfrom .results import SpectrumStats\nfrom ..extern.pathlib import Path\nfrom ..extern.bunch import Bunch\nfrom ..background import ring_area_factor, Cube\nfrom ..data import DataStore, ObservationTable\nfrom ..image import ExclusionMask\nfrom ..region import SkyCircleRegion, find_reflected_regions\nfrom ..utils.energy import EnergyBounds, Energy\nfrom ..utils.scripts import (\n get_parser, set_up_logging_from_args, read_yaml, make_path,\n)\n\n__all__ = [\n 'SpectrumExtraction',\n 'SpectrumObservation',\n 'SpectrumObservationList',\n]\n\nlog = logging.getLogger(__name__)\n\n\nclass SpectrumExtraction(object):\n \"\"\"Class for creating input data to 1D spectrum fitting\n\n The purpose of this class is to create 1D counts on and off counts vectors\n as well as an effective area vector and an energy dispersion matrix starting\n from an event list and 2D irfs for as set of observations. The container\n class for one specific observation is `~gammapy.spectrum.SpectrumObservation`.\n The present class is responsible for filling a list of such observations,\n starting from some extraction parameters.\n For more info see :ref:`spectral_fitting`.\n\n Parameters\n ----------\n datastore : `~gammapy.data.DataStore`\n Data for the analysis\n obs_ids : list, str\n List of observations or file containing such a list\n on_region : `gammapy.region.SkyCircleRegion`\n Circular region to extract on counts\n exclusion : `~gammapy.image.ExclusionMask`\n Exclusion regions\n bkg_method : dict\n Background method including necessary parameters\n nobs : int\n number of observations to process, 0 means all observations\n ebounds : `~gammapy.utils.energy.EnergyBounds`, optional\n Reconstructed energy binning definition\n \"\"\"\n\n def __init__(self, datastore, obs_ids, on_region, exclusion, bkg_method,\n nobs=-1, ebounds=None, **kwargs):\n\n self.on_region = on_region\n self.store = datastore\n self.exclusion = exclusion\n if ebounds is None:\n ebounds = EnergyBounds.equal_log_spacing(0.1, 10, 20, 'TeV')\n self.ebounds = ebounds\n self.bkg_method = bkg_method\n self.nobs = nobs\n self.extra_info = kwargs\n\n if isinstance(obs_ids, six.string_types):\n temp = make_path(obs_ids)\n obs_ids = np.loadtxt(str(temp), dtype=np.int)\n self.obs_ids = obs_ids\n\n self._observations = None\n\n def run(self):\n \"\"\"Run all steps\n\n Extract spectrum, filter observations, write results to disk.\n \"\"\"\n self.extract_spectrum()\n if self.bkg_method['type'] == 'reflected':\n self.filter_observations()\n\n o = self.observations\n o.write_ogip_data('ogip_data')\n o.total_spectrum.spectrum_stats.to_yaml('total_spectrum_stats.yaml')\n o.to_observation_table().write('observation_table.fits', format='fits',\n overwrite=True)\n\n def filter_observations(self):\n \"\"\"Filter observations by number of reflected regions\"\"\"\n obs = self.observations\n mask = obs.filter_by_reflected_regions(self.bkg_method['n_min'])\n self._observations = SpectrumObservationList(np.asarray(obs)[mask])\n\n def extract_spectrum(self, nobs=None):\n \"\"\"Extract 1D spectral information\n\n The result can be obtained via\n :func:`~gammapy.spectrum.spectrum_extraction.observations`\n \"\"\"\n nobs = self.nobs if nobs is None else nobs\n observations = []\n for i, val in enumerate(np.atleast_1d(self.obs_ids)):\n log.info('Extracting spectrum for observation {}'.format(val))\n try:\n temp = SpectrumObservation.from_datastore(val, self.store,\n self.on_region,\n self.bkg_method,\n self.ebounds,\n self.exclusion,\n **self.extra_info\n )\n except IndexError as err:\n log.warning(\n 'Could not load observation {} from store{}'\n 'Error: \\n{}'.format(val, self.store.base_dir, err))\n nobs += 1\n continue\n \n observations.append(temp)\n if i == nobs - 1:\n break\n\n self._observations = SpectrumObservationList(observations)\n\n if len(self.observations) == 0:\n raise ValueError(\"No valid observations found\")\n\n @property\n def observations(self):\n \"\"\"`~gamampy.spectrum.ObservationList` of all observations\n\n This list is generated via\n :func:`~gammapy.spectrum.spectrum_extraction.extract_spectrum`\n when the property is first called and the result is cached.\n \"\"\"\n if self._observations is None:\n self.extract_spectrum()\n return self._observations\n\n def copy(self, bkg_method=None):\n \"\"\"Return copy of `~gammapy.spectrum.SpectrumExtraction`\n\n Parameters\n ----------\n bkg_method : dict, optional\n New background estimation method\n \"\"\"\n\n bkg_method = self.bkg_method if bkg_method is None else bkg_method\n\n ana = SpectrumExtraction(datastore=self.store, obs_ids=self.obs_ids,\n on_region=self.on_region,\n bkg_method=bkg_method,\n exclusion=self.exclusion, nobs=0,\n ebounds=self.ebounds)\n return ana\n\n @classmethod\n def from_config(cls, config, **kwargs):\n \"\"\"Create `~gammapy.spectrum.SpectrumAnalysis` from config dict\n\n Parameters\n ----------\n configfile : dict\n config dict\n \"\"\"\n config = config['extraction']\n\n # Observations\n obs = config['data']['runlist']\n storename = config['data']['datastore']\n store = DataStore.from_all(storename)\n nobs = config['data']['nruns']\n\n # Binning\n sec = config['binning']\n if sec['equal_log_spacing']:\n emin = Energy(sec['emin'])\n emax = Energy(sec['emax'])\n nbins = sec['nbins']\n ebounds = EnergyBounds.equal_log_spacing(\n emin, emax, nbins)\n else:\n if sec['binning'] is None:\n raise ValueError(\"No binning specified\")\n\n # ON region\n radius = Angle(config['on_region']['radius'])\n x = config['on_region']['center_x']\n y = config['on_region']['center_y']\n frame = config['on_region']['system']\n center = SkyCoord(x, y, frame=frame)\n on_region = SkyCircleRegion(center, radius)\n\n # OFF region\n bkg_method = config['off_region']\n\n # Exclusion\n excl_file = config['excluded_regions']['file']\n exclusion = ExclusionMask.from_fits(excl_file)\n\n return cls(datastore=store, obs_ids=obs, on_region=on_region,\n bkg_method=bkg_method, exclusion=exclusion,\n nobs=nobs, ebounds=ebounds, **kwargs)\n\n @classmethod\n def from_configfile(cls, configfile):\n \"\"\"Create `~gammapy.spectrum.SpectrumExtraction` from configfile\n\n Parameters\n ----------\n configfile : str\n YAML config file\n \"\"\"\n import yaml\n with open(configfile) as fh:\n config = yaml.safe_load(fh)\n\n return cls.from_config(config)\n\n def info(self):\n \"\"\"Print some information\n \"\"\"\n ss = \"\\nSpectrum Analysis\\n\"\n ss += \"Observations : {}\\n\".format(len(self.observations))\n ss += \"ON region : {}\\n\".format(self.on_region.pos)\n\n return ss\n\n\nclass SpectrumObservation(object):\n \"\"\"Storage class holding ingredients for 1D region based spectral analysis\n \"\"\"\n\n def __init__(self, obs_id, on_vector, off_vector, energy_dispersion,\n effective_area, meta=None):\n self.obs_id = obs_id\n self.on_vector = on_vector\n self.off_vector = off_vector\n self.energy_dispersion = energy_dispersion\n self.effective_area = effective_area\n self.meta = Bunch(meta) if meta is not None else Bunch()\n\n # These values are needed for I/O\n self.meta.setdefault('phafile', 'None')\n\n @classmethod\n def read_ogip(cls, phafile):\n \"\"\" Read `~gammapy.spectrum.SpectrumObservation` from OGIP files\n\n BKG file, ARF, and RMF must be set in the PHA header\n\n Parameters\n ----------\n phafile : str\n OGIP PHA file to read\n \"\"\"\n # Put here due to circular imports issues\n from ..irf import EnergyDispersion, EffectiveAreaTable\n\n f = make_path(phafile)\n base = f.parent\n on_vector = CountsSpectrum.read(f)\n\n meta = on_vector.meta\n energy_dispersion = EnergyDispersion.read(str(base / meta.RESPFILE))\n effective_area = EffectiveAreaTable.read(str(base / meta.ANCRFILE))\n off_vector = CountsSpectrum.read(str(base / meta.BACKFILE),\n str(base / meta.RESPFILE))\n\n meta.update(phafile=phafile)\n return cls(meta.OBS_ID, on_vector, off_vector, energy_dispersion,\n effective_area, meta)\n\n @classmethod\n def from_datastore(cls, obs_id, store, on_region, bkg_method, ebounds,\n exclusion, save_meta=True, dry_run=False, calc_containment=False):\n \"\"\" Create Spectrum Observation from datastore\n\n Extraction parameters are stored in the meta attribute\n\n Parameters\n ----------\n obs : int\n Observation ID, runnumber\n store : `~gammapy.data.DataStore`\n Data Store\n on_region : `gammapy.region.SkyCircleRegion`\n Circular region to extract on counts\n bkg_method : dict\n Background method including necessary parameters\n ebounds : `~gammapy.utils.energy.EnergyBounds`\n Reconstructed energy binning definition\n exclusion : `~gammapy.image.ExclusionMask`\n Exclusion mask\n save_meta : bool, optional\n Save meta information, default: True\n dry_run : bool, optional\n Only process meta data, not actual spectra are extracted\n calc_containment : bool, optional\n Calculate containment fraction of the on region\n \"\"\"\n\n event_list = store.load(obs_id=obs_id, filetype='events')\n on = None\n off = None\n aeff = None\n edisp = None\n\n m = Bunch()\n m['pointing'] = event_list.pointing_radec\n m['offset'] = m.pointing.separation(on_region.pos)\n m['livetime'] = event_list.observation_live_time_duration\n m['exclusion'] = exclusion\n m['on_region'] = on_region\n m['bkg_method'] = bkg_method\n m['datastore'] = store\n m['ebounds'] = ebounds\n m['obs_id'] = obs_id\n\n if calc_containment:\n psf2d = store.load(obs_id=obs_id, filetype='psf')\n val = Energy('10 TeV')\n psf = psf2d.psf_at_energy_and_theta(val, m.offset)\n cont = psf.containment_fraction(m.on_region.radius)\n m['psf_containment'] = float(cont)\n\n if dry_run:\n return cls(obs_id, None, None, None, None, meta=m)\n\n b = BackgroundEstimator(event_list, m)\n b.make_off_vector()\n m['off_list'] = b.off_list\n m['off_region'] = b.off_region\n off_vec = b.off_vec\n off_vec.meta.update(backscal=b.backscal)\n off_vec.meta.update(livetime=m.livetime)\n\n m['on_list'] = event_list.select_circular_region(on_region)\n on_vec = CountsSpectrum.from_eventlist(m.on_list, ebounds)\n\n aeff2d = store.load(obs_id=obs_id, filetype='aeff')\n arf_vec = aeff2d.to_effective_area_table(m.offset)\n elo, ehi = arf_vec.energy_thresh_lo, arf_vec.energy_thresh_hi\n m['safe_energy_range'] = EnergyBounds([elo, ehi])\n\n edisp2d = store.load(obs_id=obs_id, filetype='edisp')\n rmf_mat = edisp2d.to_energy_dispersion(m.offset, e_reco=ebounds)\n\n m = None if not save_meta else m\n\n # Todo: Agree where to store all meta info\n on_vec.meta.update(m)\n\n return cls(obs_id, on_vec, off_vec, rmf_mat, arf_vec, meta=m)\n\n @classmethod\n def from_observation_list(cls, obs_list, obs_id=None):\n \"\"\"Create `~gammapy.spectrum.SpectrumObservations` from list\n\n Observation stacking is implemented as follows\n\n Averaged exposure ratio between ON and OFF regions\n\n :math:`\\\\alpha_{\\\\mathrm{tot}}` for all observations is calculated as\n\n .. math:: \\\\alpha_{\\\\mathrm{tot}} = \\\\frac{\\\\sum_{i}\\\\alpha_i \\\\cdot N_i}{\\\\sum_{i} N_i}\n\n where :math:`N_i` is the number of OFF counts for observation :math:`i`\n\n Parameters\n ----------\n obs_list : list of `~gammapy.spectrum.SpectrumObservations`\n Observations to stack\n obs_id : int, optional\n Observation ID for stacked observations\n \"\"\"\n obs_id = 0 if obs_id is None else obs_id\n\n on_vec = np.sum([o.on_vector for o in obs_list])\n off_vec = np.sum([o.off_vector for o in obs_list])\n # Todo : Stack RMF and ARF\n arf = None\n rmf = None\n\n # Calculate average alpha (remove?)\n val = [o.alpha * o.off_vector.total_counts for o in obs_list]\n num = np.sum(val)\n den = np.sum([o.off_vector.total_counts for o in obs_list])\n alpha = num/den\n off_vec.meta.backscal = 1. / alpha\n\n #Calculate safe energy range\n emin = min([_.meta.safe_energy_range[0] for _ in obs_list])\n emax = max([_.meta.safe_energy_range[1] for _ in obs_list])\n\n m = Bunch()\n m['energy_range'] = EnergyBounds([emin, emax])\n m['obs_ids'] = [o.obs_id for o in obs_list]\n m['alpha_method1'] = alpha\n return cls(obs_id, on_vec, off_vec, arf, rmf, meta=m)\n\n @property\n def alpha(self):\n \"\"\"Exposure ratio between ON and OFF region\"\"\"\n return self.on_vector.meta.backscal / self.off_vector.meta.backscal\n\n @property\n def excess_vector(self):\n \"\"\"Excess vector\n\n Excess = n_on - alpha * n_off\n \"\"\"\n return self.on_vector + self.off_vector * self.alpha * -1\n\n @property\n def spectrum_stats(self):\n \"\"\"`~gammapy.spectrum.results.SpectrumStats`\n \"\"\"\n n_on = self.on_vector.total_counts\n n_off = self.off_vector.total_counts\n val = dict()\n val['n_on'] = n_on\n val['n_off'] = n_off\n val['alpha'] = self.alpha\n val['excess'] = float(n_on) - float(n_off) * self.alpha\n val['energy_range'] = self.meta.energy_range\n return SpectrumStats(**val)\n\n def restrict_energy_range(self, energy_range=None, method='binned'):\n \"\"\"Restrict to a given energy range\n\n If no energy range is given, it will be extracted from the PHA header.\n Tow methods are available . Unbinned method: The new counts vectors are\n created from the list of on and off events. Therefore this list must be\n saved in the meta info. Binned method: The counts vectors are taken as\n a basis for the energy range restriction. Only bins that are entirely\n contained in the desired energy range are copied.\n\n Parameters\n ----------\n energy_range : `~gammapy.utils.energy.EnergyBounds`, optional\n Desired energy range\n method : str {'unbinned', 'binned'}\n Use unbinned on list / binned on vector\n\n Returns\n -------\n obs : `~gammapy.spectrum.spectrum_extraction.SpectrumObservation`\n Spectrum observation in desired energy range\n \"\"\"\n\n if energy_range is None:\n arf = self.effective_area\n energy_range = [arf.energy_thresh_lo, arf.energy_thresh_hi]\n\n energy_range = EnergyBounds(energy_range)\n ebounds = self.on_vector.energy_bounds\n if method == 'unbinned':\n on_list_temp = self.meta.on_list.select_energy(energy_range)\n off_list_temp = self.meta.off_list.select_energy(energy_range)\n on_vec = CountsSpectrum.from_eventlist(on_list_temp, ebounds)\n off_vec = CountsSpectrum.from_eventlist(off_list_temp, ebounds)\n elif method == 'binned':\n val = self.on_vector.energy_bounds.lower_bounds\n mask = np.invert(energy_range.contains(val))\n on_counts = np.copy(self.on_vector.counts)\n on_counts[mask] = 0\n off_counts = np.copy(self.off_vector.counts)\n off_counts[mask] = 0\n on_vec = CountsSpectrum(on_counts, ebounds)\n off_vec = CountsSpectrum(off_counts, ebounds)\n else:\n raise ValueError('Undefined method: {}'.format(method))\n\n off_vec.meta.update(backscal = self.off_vector.meta.backscal)\n m = copy.deepcopy(self.meta)\n m.update(energy_range=energy_range)\n\n return SpectrumObservation(self.obs_id, on_vec, off_vec,\n self.energy_dispersion, self.effective_area,\n meta=m)\n\n def write_ogip(self, phafile=None, bkgfile=None, rmffile=None, arffile=None,\n outdir=None, clobber=True):\n \"\"\"Write OGIP files\n\n The arf, rmf and bkg files are set in the :ref:`gadf:ogip-pha` FITS\n header. If no filenames are given, default names will be chosen.\n\n Parameters\n ----------\n phafile : `~gammapy.extern.pathlib.Path`, str\n PHA filename\n bkgfile : str\n BKG filename\n arffile : str\n ARF filename\n rmffile : str\n RMF : filename\n outdir : None\n directory to write the files to, default: pwd\n clobber : bool\n Overwrite\n \"\"\"\n\n cwd = Path.cwd()\n outdir = cwd if outdir is None else cwd /make_path(outdir)\n outdir.mkdir(exist_ok=True, parents=True)\n\n if phafile is None:\n phafile = \"pha_run{}.pha\".format(self.obs_id)\n if arffile is None:\n arffile = \"arf_run{}.fits\".format(self.obs_id)\n if rmffile is None:\n rmffile = \"rmf_run{}.fits\".format(self.obs_id)\n if bkgfile is None:\n bkgfile = \"bkg_run{}.fits\".format(self.obs_id)\n\n self.meta['phafile'] = str(outdir/phafile)\n\n self.on_vector.write(str(outdir/phafile), bkg=str(bkgfile), arf=str(arffile),\n rmf=str(rmffile), clobber=clobber)\n self.off_vector.write(str(outdir/bkgfile), clobber=clobber)\n self.effective_area.write(str(outdir/arffile), energy_unit='keV',\n effarea_unit='cm2', clobber=clobber)\n self.energy_dispersion.write(str(outdir/rmffile), energy_unit='keV',\n clobber=clobber)\n\n def plot_exclusion_mask(self, size=None, **kwargs):\n \"\"\"Plot exclusion mask for this observation\n\n The plot will be centered at the pointing position\n\n Parameters\n ----------\n size : `~astropy.coordinates.Angle`\n Edge length of the plot\n \"\"\"\n size = Angle('5 deg') if size is None else Angle(size)\n ax = self.meta.exclusion.plot(**kwargs)\n self._set_ax_limits(ax, size)\n point = skycoord_to_pixel(self.meta.pointing, ax.wcs)\n ax.scatter(point[0], point[1], s=250, marker=\"+\", color='black')\n return ax\n\n def plot_on_region(self, ax=None, **kwargs):\n \"\"\"Plot target regions\"\"\"\n ax = self.plot_exclusion_mask() if ax is None else ax\n self.meta.on_region.plot(ax, **kwargs)\n return ax\n\n def plot_reflected_regions(self, ax=None, **kwargs):\n \"\"\"Plot reflected regions\"\"\"\n ax = self.plot_exclusion_mask() if ax is None else ax\n self.meta.off_region.plot(ax, **kwargs)\n return ax\n\n def _check_binning(self, **kwargs):\n \"\"\"Check that ARF and RMF binnings are compatible\n \"\"\"\n pass\n\n def _set_ax_limits(self, ax, extent):\n\n if 'GLAT' in ax.wcs.to_header()['CTYPE2']:\n center = self.meta.pointing.galactic\n xlim = (center.l + extent/2).value, (center.l - extent/2).value\n ylim = (center.b + extent/2).value, (center.b - extent/2).value\n else:\n center = self.meta.pointing.icrs\n xlim = (center.ra + extent/2).value, (center.ra - extent/2).value\n ylim = (center.dec + extent/2).value, (center.dec - extent/2).value\n\n limits = ax.wcs.wcs_world2pix(xlim, ylim,1)\n ax.set_xlim(limits[0])\n ax.set_ylim(limits[1])\n\n\nclass SpectrumObservationList(list):\n \"\"\"List of `~gammapy.spectrum.SpectrumObservation`\n \"\"\"\n def get_obs_by_id(self, id):\n \"\"\"Return an observation with a certain id\n\n Parameters\n ----------\n id : int\n Observation Id (runnumber)\n\n Returns\n -------\n observation : `~gammapy.spectrum.SpectrumObservation`\n Spectrum observation\n \"\"\"\n ids = [o.obs_id for o in self]\n try:\n i = ids.index(id)\n except ValueError:\n raise ValueError(\"Observation {} not in list\".format(id))\n return self[i]\n\n @property\n def total_spectrum(self):\n return SpectrumObservation.from_observation_list(self)\n\n def info(self):\n \"\"\"Info string\"\"\"\n ss = \" *** SpectrumObservationList ***\"\n ss += \"\\n\\nNumber of observations: {}\".format(len(self))\n ss += \"\\nObservation IDs: {}\".format([o.obs_id for o in self])\n\n return ss\n\n def filter_by_reflected_regions(self, n_min):\n \"\"\"Filter observation list according to number of reflected regions\n\n Condition: number of reflected regions >= nmin\n\n Parameters\n ----------\n n_min : int\n Minimum number of reflected regions\n\n Returns\n -------\n idx : `~np.array`\n Indices of element fulfilling the condition\n \"\"\"\n val = [o.off_vector.meta.backscal for o in self]\n condition = np.array(val) >= n_min\n idx = np.nonzero(condition)\n return idx[0]\n\n def write_ogip_data(self, outdir, **kwargs):\n \"\"\"Create OGIP files\n\n Parameters\n ----------\n outdir : str, `~gammapy.extern.pathlib.Path`\n write directory\n \"\"\"\n for obs in self:\n obs.write_ogip(outdir=outdir, **kwargs)\n\n @classmethod\n def read_ogip(cls, dir='ogip_data'):\n \"\"\"Read `~gammapy.spectrum.SpectrumObservationList` from OGIP files\n\n The pha file need to be contained in one directroy and have '.pha' as\n suffix\n\n Parameters\n ----------\n dir : str, Path\n Directory holding the OGIP data\n \"\"\"\n dir = make_path(dir)\n obs = [SpectrumObservation.read_ogip(_) for _ in dir.glob('*.pha')]\n return cls(obs)\n\n def to_observation_table(self):\n \"\"\"Create `~gammapy.data.ObservationTable`\"\"\"\n names = ['OBS_ID', 'PHAFILE', 'OFFSET']\n col1 = [o.obs_id for o in self]\n col2 = [o.meta.phafile for o in self]\n col3 = [o.meta.offset.value for o in self]\n return ObservationTable(data=[col1, col2, col3], names=names)\n\n\nclass BackgroundEstimator(object):\n \"\"\"TBD\n\n Select events inside off regsion. At one point this can be replaced by a\n more generic `~gammapy.regions` module\n\n For available methods see :ref:`spectrum_background_method`\n\n Parameters\n ----------\n event_list : `~gammapy.data.EventList`\n Event list\n params : dict\n Necessary parameters\n \"\"\"\n\n def __init__(self, event_list, params):\n self.event_list = event_list\n self.params = params\n m = self.params['bkg_method']['type']\n if m not in ['ring', 'reflected', 'bgmodel']:\n raise ValueError(\"Undefined background method: {}\".format(m))\n\n self.off_list = None\n self.off_vec = None\n self.backscal = None\n self.off_region = None\n\n def make_off_vector(self):\n m = self.params['bkg_method']['type']\n if m == \"ring\":\n self._make_off_vector_ring()\n elif m == \"reflected\":\n self._make_off_vector_reflected()\n elif m == \"bgmodel\":\n self._make_off_vector_bgmodel()\n\n def _make_off_vector_reflected(self):\n \"\"\"Helper function to create OFF vector from reflected regions\"\"\"\n kwargs = self.params['bkg_method'].copy()\n kwargs.pop('type')\n kwargs.pop('n_min')\n off = find_reflected_regions(self.params['on_region'],\n self.params['pointing'],\n self.params['exclusion'], **kwargs)\n off_list = self.event_list.select_circular_region(off)\n self.off_region = off\n self.backscal = len(off)\n self.off_list = off_list\n self.off_vec = CountsSpectrum.from_eventlist(off_list, self.params['ebounds'])\n\n def _make_off_vector_ring(self):\n \"\"\"Helper function to create OFF vector from ring\"\"\"\n center = self.params['on_region'].pos\n radius = self.params['on_region'].radius\n m = self.params['bkg_method']\n inner = Angle(m['inner_radius'])\n outer = Angle(m['outer_radius'])\n off_list = self.event_list.select_sky_ring(center, inner, outer)\n self.backscal = ring_area_factor(radius.deg, inner.deg, outer.deg)\n self.off_list = off_list\n self.off_vec = CountsSpectrum.from_eventlist(off_list, self.params['ebounds'])\n\n def _make_off_vector_bgmodel(self, method):\n \"\"\"Helper function to create OFF vector from BgModel\"\"\"\n s = self.params['datastore']\n filename = s.filename(obs_id=self.params.obs_id, filetype='background')\n cube = Cube.read(filename, scheme='bg_cube')\n # TODO: Properly transform to SkyCoords\n coords = Angle([self.params['offset'], '0 deg'])\n spec = cube.make_spectrum(coords, self.params['ebounds'])\n cnts = spec * self.params['ebounds'].bands * self.params['livetime'] * \\\n self['params'].on_region.area\n off_vec = CountsSpectrum(cnts.decompose(), self.ebounds, backscal=1)\n self.backscal = 1\n self.off_vec = off_vec\n\n\n","sub_path":"gammapy/spectrum/spectrum_extraction.py","file_name":"spectrum_extraction.py","file_ext":"py","file_size_in_byte":27118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"317387935","text":"# Andrew UM\n# HW4\n# COMP431\n\nimport os.path\nimport re\nfrom socket import *\nimport sys\n\n\nglobal boolean\nboolean = True\nBoolean = True\nsave_path = 'forward/'\n\nwhile True:\n # input port number\n try:\n serverPort = int(sys.argv[1])\n serverName = ''\n serverSocket = socket(AF_INET, SOCK_STREAM)\n serverSocket.bind((serverName, serverPort))\n serverSocket.listen(1)\n break\n except:\n print('Socket connection error. Try again.')\n sys.exit()\n\n\nwhile Boolean:\n try:\n connectionSocket, addr = serverSocket.accept()\n connectionSocket.send(\"220 Connection accepted from \" + gethostname())\n except:\n print('Socket connection error.')\n sys.exit()\n\n # receive HELO from client\n try:\n helo = connectionSocket.recv(1024)\n if helo[:4] == 'HELO':\n connectionSocket.send(\"250 Hello \" + gethostname() + '. Pleased to meet you.')\n boolean = True\n except:\n print('HELO error. Try again.')\n sys.exit()\n \n while boolean:\n # receive mail command\n command = connectionSocket.recv(1024)\n\n # check if command input is out of order\n _check1 = re.match(r'RCPT(\\s+|$)TO:', command)\n _check2 = re.match(r'DATA', command)\n\n # checks for valid MAIL FROM command \n _cmd = re.match(r'MAIL(\\s+|$)FROM:' , command)\n # checks for valid path\n _path = re.match(r'MAIL(.+)FROM:(\\s*)<[^\\s](.+)@(.+)[^\\s]>', command)\n # checks for valid mailbox\n _mb = re.match(r'MAIL(.+)FROM:(\\s*)<([\\+/\\'!\\?\\w-]+)@[^\\s](.+)[^\\s]>', command)\n # checks for valid local-part\n _lp = re.match(r'MAIL(.+)FROM:(\\s*)<([\\+/\\'!\\?\\w-]+)@(.+)>', command)\n # checks for valid url domain \n _domain = re.search(r'MAIL(.+)FROM:(\\s*)<(.+)@([\\D.]+)>', command)\n\n if _check1:\n connectionSocket.send('503 Bad sequence of commands')\n continue\n if _check2:\n connectionSocket.send('503 Bad sequence of commands')\n continue\n elif not _cmd:\n connectionSocket.send('500 Syntax error: command unrecognized')\n continue\n elif not _path:\n connectionSocket.send('501 Syntax error in parameters or arguments')\n continue\n elif not _mb:\n connectionSocket.send('501 Syntax error in parameters or arguments')\n continue\n elif not _lp:\n connectionSocket.send('501 Syntax error in parameters or arguments')\n continue\n elif not _domain:\n connectionSocket.send('501 Syntax error in parameters or arguments')\n continue\n else:\n From = command.replace(\"MAIL FROM\", \"From\")\n connectionSocket.send('250 OK')\n\n _bool = True\n to_list = []\n rcpt_list = []\n\n while boolean:\n\n # receive receipt\n receipt = connectionSocket.recv(1024)\n\n # checks for out of order commands \n check = re.match(r'DATA', receipt)\n # if check:\n # _bool=False \n check2 = re.match(r'MAIL(\\s+|$)FROM:' , receipt)\n\n # checks for valid RECEIPT TO command \n rcpt = re.match(r'RCPT(\\s+|$)TO:', receipt)\n # checks for valid path\n fpath = re.match(r'RCPT(.+)TO:(\\s*)<([\\+/\\'!\\?\\w-]+)@([\\D.]+)>', receipt)\n\n if receipt[:7] == 'Subject':\n receipt = 'DATA'\n _bool = False\n continue\n\n if _bool is False:\n if check:\n break\n\n if check2:\n connectionSocket.send('503 Bad sequence of commands')\n continue\n if not rcpt:\n connectionSocket.send('501 Syntax error in parameters or arguments')\n continue\n elif not fpath:\n connectionSocket.send('501 Syntax error in parameters or arguments')\n continue\n else:\n _bool = False\n # make save names from recipients\n name_of_file = receipt.replace(\"RCPT TO: \", \"\")\n name_of_file = name_of_file.strip('>')\n name_of_file = name_of_file.split('@', 1)[-1]\n to = receipt.replace(\"RCPT TO: \", \"\")\n rcpt_list.append(to)\n save_name = os.path.join(save_path, name_of_file)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n file1 = open(save_name, \"a\")\n to_list.append(file1)\n\n connectionSocket.send('250 OK')\n continue\n # write From and To in files\n '''for files in to_list:\n file1 = files\n size = len(rcpt_list)\n file1.write(From + \"\\n\")\n file1.write(\"To: \")\n for rcpt in rcpt_list:\n size = size - 1\n if size is 0:\n file1.write(rcpt + \"\\n\")\n else:\n file1.write(rcpt + \", \")'''\n\n\n while boolean:\n if not check:\n # receive DATA cmd \n datacmd = connectionSocket.recv(1024)\n check = re.match(r'DATA', datacmd)\n\n if not check:\n connectionSocket.send('500 Syntax error: command unrecognized')\n continue\n else:\n connectionSocket.send('354 Start mail input; end with .')\n \n while boolean:\n # receive msg until QUIT \n data = connectionSocket.recv(1024)\n if data == '.':\n connectionSocket.send('250 OK')\n boolean = False\n \n for files in to_list:\n file1 = files\n file1.close()\n\n quitCmd = connectionSocket.recv(1024)\n if re.match(r'QUIT', quitCmd):\n connectionSocket.send('221 Bye')\n boolean = False\n break\n\n else:\n connectionSocket.send(data)\n for files in to_list:\n file1 = files\n file1.write(data + \"\\n\")\n continue\n\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"49190710","text":"\nimport tensorflow as tf\nfrom tensorflow import keras\n\nW = tf.Variable([.3], tf.float32)\nb = tf.Variable([.2], tf.float32)\nx = tf.placeholder(tf.float32)\n\nlinear_model = W*x + b\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n output = sess.run(linear_model, {x:[1, 2, 3, 4]})\n print(output)\n\n sess.run(W.assign_add([0.1]))\n output = sess.run(linear_model, {x:[1, 2, 3, 4]})\n print(output)\n","sub_path":"concepts/tensorflow-tutorial/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"610770590","text":"import os\nimport tempfile\nimport subprocess\nfrom pathlib import Path\n\nif not 'DEVKITPRO' in os.environ:\n print('DEVKITPRO not found in environment variables')\n quit(-1)\n\ndkp_root = os.environ['DEVKITPRO']\n\n# Allow overriding of path to wut outside of devkitPro incase you have a local\n# copy of wut with different header files.\nif not 'WUT_ROOT' in os.environ:\n wut_root = os.path.join(dkp_root, 'wut')\nelse:\n wut_root = os.environ['WUT_ROOT']\n\ninclude_subdirs = [\n 'avm',\n 'coreinit',\n 'dmae',\n 'gx2',\n 'gx2r',\n 'h264',\n 'nn',\n 'nsyshid',\n 'nsysnet',\n 'padscore',\n 'proc_ui',\n 'sndcore2',\n 'swkbd',\n 'sysapp',\n 'vpad',\n]\n\nsysroot = os.path.join(dkp_root, 'devkitPPC', 'powerpc-eabi')\n\ngcc_include_path = os.listdir(os.path.join(dkp_root, 'devkitPPC', 'lib', 'gcc', 'powerpc-eabi'))\nif len(gcc_include_path) != 1:\n print('Unexpected include paths in devkitPPC')\n quit(-1)\ngcc_include_path = os.path.join(dkp_root, 'devkitPPC', 'lib', 'gcc', 'powerpc-eabi', gcc_include_path[0])\n\ntf = tempfile.NamedTemporaryFile(mode='w', suffix='.h',delete=False)\nfor subdir in include_subdirs:\n for path in Path(os.path.join(wut_root, 'include', subdir)).rglob('*.h'):\n tf.write('#include \"%s\"\\n' % (path))\ntf.close()\nprint(tf.name)\n\ncmd = [\n 'bindgen',\n tf.name,\n '--use-core',\n '--ctypes-prefix=cty',\n '--default-enum-style', 'moduleconsts',\n '--no-include-path-detection',\n '--raw-line', '#[allow(non_camel_case_types,non_snake_case,non_upper_case_globals,dead_code)]',\n '--blacklist-item=OSSpinLock',\n '--',\n '-target', 'powerpc-none-eabi',\n '-march=powerpc',\n '-mfloat-abi=hard',\n '-nostdinc',\n '--sysroot', sysroot,\n '-isystem', os.path.join(sysroot, 'include'),\n '-isystem', os.path.join(gcc_include_path, 'include'),\n '-isystem', os.path.join(gcc_include_path, 'include-fixed'),\n '-I%s' % os.path.join(wut_root, 'include'),\n]\noutput = subprocess.check_output(cmd)\n\nf = open(os.path.join('src', 'sys.rs'), 'wb')\nf.write(output)\nf.close()\n","sub_path":"cafeos-sys/wut-bindgen.py","file_name":"wut-bindgen.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"348780623","text":"N = int (input ())\nif N == 1 :\n print (1)\nelif N == 2 :\n print (2)\nelse :\n before = 1\n after = 2\n for i in range (3, N + 1) :\n temp = after\n after += before % 15746\n before = temp % 15746\n print (after % 15746)","sub_path":"Algorithm-Study/Backjoon/1904.py","file_name":"1904.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"547097736","text":"from django.shortcuts import render\n\nimport pandas as pd\nimport _pickle as cPickle\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import SGDClassifier\n# Create your views here.\nimport string\nfrom nltk.corpus import stopwords\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse,HttpResponse\n\ndef text_process(mess):\n\t\"\"\"\n\tTakes in a string of text, then performs the following:\n\t1. Remove all punctuation\n\t2. Remove all stopwords\n\t3. Returns a list of the cleaned text\n\t\"\"\"\n\t# Check characters to see if they are in punctuation\n\tnopunc = [char for char in mess if char not in string.punctuation]\n\n\t# Join the characters again to form the string.\n\tnopunc = ''.join(nopunc)\n\t\n\t# Now just remove any stopwords\n\treturn [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n@csrf_exempt\ndef predict(request):\n\tif request.method == 'POST':\n\t\tmessages1 = request.POST['parag']\n\t\twith open('website/models/emotion_detect_model.pkl', 'rb') as fin:\n\t\t bow_transformer, emotion_detect_model = cPickle.load(fin)\n\n\t\t# fid = open('', 'rb')\n\t\t# emotion_detect_model = cPickle.load(fid)\n\n\n\t\tmessages_bow_test = bow_transformer.transform((messages1.split('.')))\n\n\n\t\ttfidf_transformer = TfidfTransformer()\n\n\t\tmessages_tfidf_test = tfidf_transformer.fit_transform(messages_bow_test)\n\n\t\tall_predictions = emotion_detect_model.predict(messages_tfidf_test)\n\t\tprint(all_predictions)\n\t\treturn HttpResponse(all_predictions)\n\n\ndef train(request):\n\tprint(\"Working\")\n\tmessages = pd.read_csv('website/input/data.bak3.tsv', sep='\\t',\n\t\t\t\t\t\t names=[\"label\", \"message\"])\n\tbow_transformer = CountVectorizer(analyzer=text_process)\n\tX_train = bow_transformer.fit(messages['message'])\n\tmessages_bow = X_train.transform(messages['message'])\n\ttfidf_transformer = TfidfTransformer()\n\ttf = tfidf_transformer.fit(messages_bow)\n\tmessages_tfidf = tf.transform(messages_bow)\n\temotion_detect_model = SGDClassifier(alpha=0.001, random_state=5, max_iter=15, tol=None).fit(messages_tfidf, messages['label'])\n\twith open('website/models/emotion_detect_model.pkl', 'wb') as fid:\n\t\tcPickle.dump((bow_transformer,emotion_detect_model), fid) \n\tprint(\"Done\")","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"355131834","text":"import numpy as np\nimport time\n\nfrom . import logger\n\n\ndef rollout(env, policy, episode_length, render=False, speedup=None):\n Da = env.action_space.n\n Do = np.prod(env.observation_space.shape)\n\n observation = env.reset()\n\n observations = np.zeros((episode_length + 1, Do))\n actions = np.zeros((episode_length, Da))\n dones = np.zeros((episode_length, ))\n rewards = np.zeros((episode_length, ))\n agent_infos = []\n env_infos = []\n\n t = 0\n for t in range(episode_length):\n action, agent_info = policy.get_action(observation)\n next_obs, reward, done, env_info = env.step(action)\n\n agent_infos.append(agent_info)\n env_infos.append(env_info)\n\n actions[t] = action\n dones[t] = done\n rewards[t] = reward\n observations[t] = observation\n\n observation = next_obs\n\n if render:\n env.render()\n time_step = 0.05\n time.sleep(time_step / speedup)\n\n if done:\n break\n\n observations[t + 1] = observation\n\n episode = {\n 'observations': observations[:t + 1],\n 'actions': actions[:t + 1],\n 'rewards': rewards[:t + 1],\n 'dones': dones[:t + 1],\n 'next_observations': observations[1:t + 2],\n 'agent_infos': agent_infos,\n 'env_infos': env_infos\n }\n\n return episode\n\n\ndef rollouts(env, policy, episode_length, n_episodes):\n episodes = list()\n for i in range(n_episodes):\n episodes.append(rollout(env, policy, episode_length))\n\n return episodes\n\n\nclass Sampler(object):\n def __init__(self, max_episode_length, min_replay_buffer_size, batch_size):\n self._max_episode_length = max_episode_length\n self._min_replay_buffer_size = min_replay_buffer_size\n self._batch_size = batch_size\n\n self.env = None\n self.policy = None\n self.replay_buffer = None\n\n def initialize(self, env, policy, replay_buffer):\n self.env = env\n self.policy = policy\n self.replay_buffer = replay_buffer\n\n def sample(self):\n raise NotImplementedError\n\n def batch_ready(self):\n enough_samples = len(self.replay_buffer) >= self._min_replay_buffer_size\n return enough_samples\n\n def random_batch(self):\n return self.replay_buffer.random_batch(self._batch_size)\n\n def terminate(self):\n self.env.terminate()\n\n def log_diagnostics(self):\n logger.record_tabular('replay-buffer-size', len(self.replay_buffer))\n logger.record_tabular('replay-buffer-limit', self.replay_buffer.size)\n\n\nclass SimpleSampler(Sampler):\n\n def __init__(self, **kwargs):\n super(SimpleSampler, self).__init__(**kwargs)\n self._episode_length = 0\n self._episode_return = 0\n self._last_episode_return = 0\n self._max_episode_return = -np.inf\n self._n_episodes = 0\n self._current_observation = None\n self._total_samples = 0\n\n def sample(self):\n if self._current_observation is None:\n self._current_observation = self.env.reset()\n\n action, _ = self.policy.get_action(self._current_observation)\n next_observation, reward, done, info = self.env.step(action)\n self._episode_length += 1\n self._episode_return += reward\n self._total_samples += 1\n\n self.replay_buffer.add_sample(\n observation=self._current_observation,\n action=action,\n reward=reward,\n done=done,\n next_observation=next_observation)\n\n if done or self._episode_length >= self._max_episode_length:\n self._current_observation = self.env.reset()\n self._episode_length = 0\n self._max_episode_return = max(self._max_episode_return,\n self._episode_return)\n self._last_episode_return = self._episode_return\n\n self._episode_return = 0\n self._n_episodes += 1\n\n else:\n self._current_observation = next_observation\n\n def log_diagnostics(self):\n super(SimpleSampler, self).log_diagnostics()\n logger.record_tabular('max-episode-return', self._max_episode_return)\n logger.record_tabular('last-episode-return', self._last_episode_return)\n logger.record_tabular('episodes', self._n_episodes)\n logger.record_tabular('total-samples', self._total_samples)\n\n\nclass SkipSampler(SimpleSampler):\n\n def __init__(self, skip_rate=20, **kwargs):\n super(SimpleSampler, self).__init__(**kwargs)\n self.skip_rate = skip_rate\n self._episode_length = 0\n self._episode_return = 0\n self._last_episode_return = 0\n self._max_episode_return = -np.inf\n self._n_episodes = 0\n self._current_observation = None\n self._total_samples = 0\n\n def sample(self):\n if self._current_observation is None:\n self._current_observation = self.env.reset()\n\n # TODO:replace this hack for valkyrie env\n try:\n action, _ = self.policy.get_action(self._current_observation)\n self.env.reset_joint_interpolation(action)\n except AttributeError:\n pass\n\n reward = 0\n for _ in range(self.skip_rate):\n action, _ = self.policy.get_action(self._current_observation)\n next_observation, reward_now, done, info = self.env.step(action)\n reward += reward_now\n self._episode_length += 1\n self._episode_return += reward\n self._total_samples += 1\n\n self.replay_buffer.add_sample(\n observation=self._current_observation,\n action=action,\n reward=reward,\n done=done,\n next_observation=next_observation)\n\n if done or self._episode_length >= self._max_episode_length:\n self._current_observation = self.env.reset()\n self._episode_length = 0\n self._max_episode_return = max(self._max_episode_return,\n self._episode_return)\n self._last_episode_return = self._episode_return\n\n self._episode_return = 0\n self._n_episodes += 1\n\n else:\n self._current_observation = next_observation\n\n\nclass DummySampler(Sampler):\n def __init__(self, batch_size, max_episode_length):\n super(DummySampler, self).__init__(\n max_episode_length=max_episode_length,\n min_replay_buffer_size=0,\n batch_size=batch_size)\n\n def sample(self):\n pass\n","sub_path":"softqlearning/misc/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"267160935","text":"import pigpio\nimport time\n\nclass rotary:\n last_A = 1\n last_B = 1\n last_gpio = 0\n\n last_event_time = time.time()\n debounce_time = 0\n\n def __init__(self, A=None, B=None, G=None, callback=None):\n if not A or not B:\n raise BaseException(\"Encoder pins must be specified!\")\n self.pi = pigpio.pi()\n self.Enc_A = A\n self.Enc_B = B\n self.callback = callback\n\n self.pi.set_mode(self.Enc_A, pigpio.INPUT)\n self.pi.set_pull_up_down(self.Enc_A, pigpio.PUD_UP)\n self.pi.set_mode(self.Enc_B, pigpio.INPUT)\n self.pi.set_pull_up_down(self.Enc_B, pigpio.PUD_UP)\n\n self.pi.callback(self.Enc_A, pigpio.EITHER_EDGE, self.rotary_interrupt)\n self.pi.callback(self.Enc_B, pigpio.EITHER_EDGE, self.rotary_interrupt)\n\n def rotary_interrupt(self, gpio, level, tick):\n if gpio == self.Enc_A:\n self.last_A = level\n else:\n self.last_B = level;\n\n if gpio != self.last_gpio:\n self.last_gpio = gpio\n if gpio == self.Enc_A and level == 1:\n if self.last_B == 1:\n if time.time() - self.last_event_time > self.debounce_time:\n self.last_event_time = time.time()\n if self.callback is not None:\n self.callback(-1)\n elif gpio == self.Enc_B and level == 1:\n if self.last_A == 1:\n if time.time() - self.last_event_time > self.debounce_time:\n self.last_event_time = time.time()\n if self.callback is not None:\n self.callback(1)\n","sub_path":"sw/pyBackend/splitFlapClockRadioBackend/userInterface/rotary.py","file_name":"rotary.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"431780266","text":"from csv import Sniffer, DictReader\nfrom openpyxl import load_workbook as lw\n\n\nclass Reader:\n \"\"\"\n Base Reader class\n \"\"\"\n def check_template(self, snippet):\n \"\"\"\n Checks if snippet needs to be personalized for each recipient.\n Changes self.required to True if personalization is required.\n\n Returns True if personalization needed else False\n \"\"\"\n for header in self.headers:\n if f\"{{{{ {header} }}}}\" in snippet.content:\n self.required = True\n break\n else:\n return False\n\n return True\n \n def get_headers(self):\n \"\"\"\n Returns self.headers\n \"\"\"\n return self.headers\n\n def is_required(self):\n \"\"\"\n Returns self.required\n \"\"\"\n return self.required\n\n\nclass CSVReader(Reader):\n \"\"\"\n This class inherits from mailtorpedo.reader.Reader object and processes \n CSV/TSV or similar files. Returns a list of dict instances having column \n headers as key and value for each row.\n \"\"\"\n def __init__(self, filename, email_field, encoding=\"utf-8\"):\n \"\"\"\n filename: File path (instance of str class)\n Path of the file that contains delimeter separated values\n email_field: str\n Name of the column header that contains the email addresses of the recipient\n encoding (optional): str\n Encoding of the file mentioned as filename\n \"\"\"\n self.filename = filename\n self.encoding = encoding\n self.required = False\n self.email_field = email_field\n\n with open(self.filename, \"r\", encoding=self.encoding) as csvfile:\n topline = csvfile.readline().rstrip()\n self.delimiter = Sniffer().sniff(topline).delimiter\n\n headers = topline.split(self.delimiter)\n self.headers = headers\n\n def parsed_dict(self):\n \"\"\"\n Returns a list having dictionary associated for each row of delimiter seperated values.\n\n Each of the dictionaries represent a row in CSV file where the keys are column headers\n and the values are associated cells in each column for that row.\n \"\"\"\n if self.email_field not in self.headers:\n raise ValueError(f\"No column with header '{self.email_field}' is present\")\n\n with open(self.filename, \"r\", encoding=self.encoding) as csvfile:\n reader = list(DictReader(csvfile, delimiter=self.delimiter))\n return reader\n\n\nclass ExcelReader(Reader):\n \"\"\"\n This class inherits from mailtorpedo.reader.Reader class and processes \n Microsoft Excel files. Returns a list of dict instances having column \n headers as key and value for each row.\n \"\"\"\n def __init__(self, filename, email_field, sheet=0):\n \"\"\"\n filename: File path (instance of str class)\n Path of the file that contains delimeter separated values\n email_field: str\n Name of the column header that contains the email addresses of the recipient\n sheet (optional): int or str\n Name of the worksheet in excel file or its serial number (indexed from 0)\n By default it opens the first sheet of the excel file.\n \"\"\"\n self.filename = filename\n self.email_field = email_field\n self.sheet_index = sheet\n self.required = False\n self.load_workbook()\n\n def load_workbook(self):\n \"\"\"\n Loads the Excel file and processes it.\n \n Returns None\n \"\"\"\n self.workbook = lw(self.filename, read_only=True, data_only=True)\n if type(self.sheet_index) == int:\n self.workbook.active = self.sheet_index\n elif type(self.sheet_index) == str:\n if self.sheet_index in self.workbook.sheetnames:\n self.workbook.active = self.workbook.sheetnames.index(self.sheet_index)\n else:\n raise Exception(\"Sheet not found\")\n else:\n raise TypeError(\"Sheet value must be integer or string\")\n self.sheet = self.workbook.active\n self.headers = [i.value for i in self.workbook.active[1]]\n\n def change_sheet(self, sheet):\n self.sheet_index = sheet\n if type(self.sheet_index) == int:\n self.workbook.active = self.sheet_index\n elif type(self.sheet_index) == str:\n if self.sheet_index in self.workbook.sheetnames:\n self.workbook.active = self.workbook.sheetnames.index(self.sheet_index)\n else:\n raise Exception(\"Sheet not found\")\n else:\n raise TypeError(\"Sheet value must be integer or string\")\n self.sheet = self.workbook.active\n self.headers = [i.value for i in self.workbook.active[1]]\n\n def parsed_dict(self):\n \"\"\"\n Returns a list having dictionary associated for each row of delimiter seperated values.\n\n Each of the dictionaries represent a row in the Excel sheet where the keys are column \n headers and the values are associated cells in each column for that row.\n \"\"\"\n if self.email_field not in self.headers:\n raise ValueError(f\"No column with header '{self.email_field}' is present\")\n\n header_length = len(self.headers)\n reader = []\n rowtuples = self.sheet[2 : self.sheet.max_row]\n if type(rowtuples[0]).__name__ != \"tuple\":\n rowtuples = (rowtuples,)\n for i in rowtuples:\n temp_dict = dict()\n for k in range(0, header_length):\n if self.headers[k] == self.email_field and i[k].value is None:\n break\n temp_dict[self.headers[k]] = str(i[k].value)\n else:\n reader.append(temp_dict)\n \n self.workbook.close()\n\n return reader\n","sub_path":"torpedo-gui/modtorpedo/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"420806543","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport numpy as np\nimport uproot\n\ndef make_generator(paths, batch_size, format='xn', features=None, n_vert_max=256, y_dtype=np.int, y_features=None, dataset_name='events'):\n def get_event():\n for data in uproot.iterate(paths, dataset_name, ['n', 'x', 'y']):\n n = data['n']\n x = data['x']\n y = data['y']\n\n start = 0\n end = batch_size\n while True:\n if end > n.shape[0]:\n this_batch_size = n.shape[0] - start\n else:\n this_batch_size = batch_size\n\n nfeat = x.content.shape[1]\n\n v_x = np.zeros((this_batch_size, n_vert_max, nfeat), dtype=np.float)\n\n batch_indices = np.repeat(np.arange(this_batch_size), n[start:end])\n\n cluster_indices = np.r_[tuple(np.s_[:x] for x in n[start:end])]\n\n v_x[batch_indices, cluster_indices] = x[start:end].flatten()\n\n if features is None:\n v_x = v_x[:, :, features]\n\n yield [v_x, n[start:end]], y[start:end]\n\n if end >= n.shape[0]:\n break\n\n start = end\n end += batch_size\n\n return get_event\n\n\nif __name__ == '__main__':\n generator = make_generator('/tmp/yiiyama/test_0.root', 2)()\n\n print(next(generator))\n","sub_path":"keras/generators/uproot_jagged.py","file_name":"uproot_jagged.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"416818867","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 3 11:40:34 2019\n\n@author: cpatrizio\n\"\"\"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 23 12:00:06 2018\n\n@author: cpatrizio\n\"\"\"\n\nimport sys\nsys.path.append(\"/Users/cpatrizio/repos/\")\nimport matplotlib\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport cartopy as cart\nfrom scipy import signal\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nimport xarray as xr\nimport xesmf as xe\nimport pandas as pd\nimport ocean_atmosphere.stats as st\nimport ocean_atmosphere.misc_fns as st2\nimport colorcet as cc\nimport importlib\nimport ocean_atmosphere.map\nimportlib.reload(ocean_atmosphere.map)\nfrom ocean_atmosphere.map import Mapper\nimport matplotlib.colors as colors\nimport matplotlib.patches as mpatches\nimport cmocean\nimport numpy.fft as fft\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter, ScalarFormatter,\n AutoMinorLocator, LogLocator)\nimport proplot as plot\n#import seaborn as sns\n#import proplot as plot\n\n\n#fin = '/Users/cpatrizio/data/MERRA2/'\n#fin = '/Users/cpatrizio/data/ECMWF/'\n#fin = '/Users/cpatrizio/data/OAFlux/'\n#fout = '/Volumes/GoogleDrive/My Drive/PhD/figures/ocean-atmosphere/localSST_global/'\n\nfin = '/Volumes/GoogleDrive/My Drive/data_drive/MERRA2/'\n\nfout = '/Users/cpatrizio/figures_arc/'\n\n\n#MERRA-2\nfsstM2 = xr.open_dataset(fin + 'MERRA2_SST_ocnthf_monthly1980to2017.nc')\nfta = xr.open_dataset(fin + 'MERRA2_t10m_monthly1980to2017.nc')\nfthf = xr.open_dataset(fin + 'MERRA2_thf_monthly1980to2017.nc')\n#fSLP = xr.open_dataset(fin + 'MERRA2_SLP_monthly1980to2017.nc')\nradfile = xr.open_dataset(fin + 'MERRA2_rad_monthly1980to2017.nc')\n#cffile = xr.open_dataset(fin + 'MERRA2_modis_cldfrac_monthly1980to2017.nc')\n#frad = cdms2.open(fin + 'MERRA2_tsps_monthly1980to2017.nc')\n#fuv = cdms2.open(fin + 'MERRA2_uv_monthly1980to2017.nc')\n#fRH = cdms2.open(fin + 'MERRA2_qv10m_monthly1980to2017.nc')\n#fcE = cdms2.open(fin + 'MERRA2_cE_monthly1980to2017.nc')\n#fcD = cdms2.open(fin + 'MERRA2_cD_monthly1980to2017.nc')\nftau = xr.open_dataset(fin + 'MERRA2_tau_monthly1980to2019.nc')\nfssh = xr.open_dataset(fin + 'ncep.ssh.198001-201912.nc')\n#fseaice = xr.open_dataset(fin + 'MERRA2_seaice_monthly1980to2019.nc')\n\n\n#dataname = 'ERAi'\n#dataname = 'MERRA2'\ndataname = 'OAFlux'\n#dataname = 'ERA5'\n#dataname = 'ECCO'\n\n#ECCO\nfin = '/Volumes/GoogleDrive/My Drive/data_drive/ECCO/'\n#ft= xr.open_dataset(fin + 'ECCO_theta_monthly1992to2015.nc')\nfh = xr.open_dataset(fin + 'ECCO_mxldepth_interp_1992to2015.nc')\n#fTmxlfrac = xr.open_dataset(fin + 'ECCO_Tmxlfrac.nc')\n\nfsst = xr.open_dataset(fin + 'ecco_SST.nc')\nfTmxl = xr.open_dataset(fin + 'ecco_T_mxl.nc')\n\n#fsst = fsst.rename({'__xarray_dataarray_variable__':'Ts'})\n#fTmxl = fTmxl.rename({'__xarray_dataarray_variable__':'Tmxl'})\n\nsst_ECCO = fsst.Ts\nTmxl_ECCO = fTmxl.Tmxl\n\n#Tmxlfrac = Tmxl_ECCO/sst_ECCO\n\n#Tmxlfrac = fTmxlfrac.Tmxlfrac\n\nssh = fssh.sshg\n\n\n#OAFlux \nfin = '/Volumes/GoogleDrive/My Drive/data_drive/OAFlux/'\n#fta = xr.open_dataset(fin + 'oaflux_ta_1980to2017.nc')\nfsstoa = xr.open_dataset(fin + 'oaflux_ts_1980to2017.nc')\nfthf = xr.open_dataset(fin + 'oaflux_thf_1980to2017.nc')\n\n#ISCCP \n#fin_rad = '/Users/cpatrizio/data/ISCCP/'\n#lwfile = xr.open_dataset(fin_rad + 'ISCCP_lw_1983to2009.nc')\n#swfile = xr.open_dataset(fin_rad + 'ISCCP_sw_1983to2009.nc')\n\n#ERA5\n# fin = '/Volumes/GoogleDrive/My Drive/data_drive/ERA5/'\n# fsst = xr.open_dataset(fin + 'ERA5_sst_monthly1979to2019.nc')\n# fthf = xr.open_dataset(fin + 'ERA5_thf_monthly1979to2019.nc')\n# frad = xr.open_dataset(fin + 'ERA5_rad_monthly1979to2019.nc')\n\n#ERSST\n#fsst = xr.open_dataset('/Users/cpatrizio/data/ERSST/sst.mnmean.nc')\n\nh = fh.MXLDEPTH\n#theta = ft.THETA\n\ntime = h.tim\nlats = h.lat[:,0]\nlons = h.lon[0,:]\n#z = theta.dep\n#z = z.rename({'i2':'k'})\n\nh.i1.values = h.tim.values[:]\nh.i2.values = h.lat.values[:,0]\nh.i3.values = h.lon.values[0,:]\n\nh = h.drop('lat')\nh = h.drop('lon')\nh = h.drop('tim')\n\nh = h.rename({'i1':'time','i2': 'lat', 'i3':'lon'})\n\n\n#theta.i1.values = theta.tim.values[:]\n#theta.i2.values = theta.dep.values[:]\n#theta.i3.values = theta.lat.values[:,0]\n#theta.i4.values = theta.lon.values[0,:]\n#\n#theta = theta.drop('lat')\n#theta = theta.drop('lon')\n#theta = theta.drop('tim')\n#theta = theta.drop('dep')\n#\n#theta = theta.rename({'i1':'time','i2': 'k', 'i3':'lat', 'i4':'lon'})\n#\n#delz = z.diff(dim='k')\n#\n#mxldepth_clim = h.mean(dim='time')\n#\n#mxlpoints = theta.k < mxldepth_clim\n#\n#delz_sum = delz.where(mxlpoints).sum(dim='k')\n#\n#weights = delz/delz_sum\n#\n##sst = theta.isel(k=0)\n#\n#theta_mxl = (weights*theta).where(mxlpoints).sum(dim='k')\n\n#\"\n#h = np.ma.array(h, mask=np.isnan(h))\n#\n#hlats = h.getLatitude()[:,0]\n#hlons = h.getLongitude()[0,:]\n#hgrid = cdms2.createGenericGrid(hlats.getValue(),hlons.getValue())\n#h.setGrid(hgrid)\n#\n#months = range(h.shape[0])\n#ta = cdms2.createAxis(months)\n#ta.id = 'time'\n#ta.units = 'months since 1992-01-01'\n#h.setAxis(0,ta)\n\nhmean = h.mean(dim='time')\nhbar=hmean\n#h = fh.dbss_obml\n#h_anom, h_clim = st.anom(h)\n#h = h.subRegion(latitude=(minlat, maxlat), longitude=(minlon, maxlon))\n#hbar = h_clim.max(dim='month')\n#hbar = h.mean(dim='time')\n#ERA-interim\n#fsst = cdms2.open(fin + 'sstslp.197901-201712.nc')\n#fthf = cdms2.open(fin + 'thf.197901-201712.nc')\n\n#plot.rc.update({'mathtext.fontset': 'cm'})\n\n\nmatplotlib.rcParams.update({'font.size': 24})\nmatplotlib.rcParams.update({'axes.titlesize': 30})\nmatplotlib.rcParams.update({'figure.figsize': (10,8)})\nmatplotlib.rcParams.update({'lines.linewidth': 2})\nmatplotlib.rcParams.update({'legend.fontsize': 24})\nmatplotlib.rcParams.update({'mathtext.fontset': 'cm'})\nmatplotlib.rcParams.update({'ytick.major.size': 3})\n#matplotlib.rcParams.update({'axes.labelsize': 22})\nmatplotlib.rcParams.update({'ytick.labelsize': 22})\nmatplotlib.rcParams.update({'xtick.labelsize': 22})\n\nplot.rc.update({'mathtext.fontset': 'cm'})\nplot.rc.update({'mathtext.default': 'it'})\n#plot.rc.update({'font.size': 18})\n#plot.rc.update({'axes.titlesize': 22})\nplot.rc.update({'small':12})\nplot.rc.update({'large':14})\n\n\n#EDIT THIS FOR BOUNDS\nlonbounds = [0.5,359.5]\nlatbounds = [-89.5,89.5]\n\n#longitude bounds are in degrees East\n#lonbounds = [140,180]\n#latbounds = [30,50]\n\n\n#lonbounds = [120,180]\n#latbounds = [20,60]\n\n#latw=5\n#slats = np.array([35])\n\n#KO extension \n#lonbounds = [153,168]\n#latbounds = [34,43]\n\n#lonbounds = [142,165]\n#latbounds = [35,42]\n\n#KO extension (long)\n#lonbounds=[145,205]\n#latbounds=[30,45]\n\n#KOE (west)\n#lonbounds=[145,175]\n#latbounds=[30,45]\n\n#KOE (east)\n#lonbounds=[175,205]\n#latbounds=[30,45]\n\n#ENA\n#lonbounds = [315,345]\n#latbounds = [30,50]\n\n\n#ENP\n#lonbounds = [210,230]\n#latbounds = [38,53]\n\n#SP\n#latbounds=[-52,-38]\n#lonbounds=[190,250]\n\n#SubEP\n#latbounds=[12,25]\n#lonbounds=[215,235]\n\n#North Atlantic\n# latbounds=[0,65]\n# lonbounds=[270,360]\n\n##O'Reilly region (AMOmid)\n#lonbounds=[300,340]\n#latbounds=[40,60]\n\n#Extratropical North Atlantic\n#lonbounds=[315,345]\n#latbounds=[47,60]\n\n#Extropical East North Pacific\n#lonbounds=[210,245]\n#latbounds=[30,60]\n\n#Equatorial Pacific\n#lonbounds=[175,285]\n#latbounds=[-8,5]\n\n#Southern Oceans\n#lonbounds=[0,360]\n#latbounds=[-60,-18]\n\n#NP\n#lonbounds = [120,290]\n#latbounds = [15,60]\n\n#NP\n# lonbounds = [120.5,290.5]\n# latbounds = [-10,60]\n\n#NH\n#lonbounds = [0,360]\n#latbounds = [0,65]\n\n\n#NEW REGIONS\n#Southern Indian Ocean\n#latbounds = [-40,-28]\n#lonbounds = [60,115]\n\n#Eastern South Pacific\n#latbounds = [-50,-34]\n#lonbounds = [220,280]\n\n\n#Subtropical North Pacific\n#latbounds=[12,25]\n#lonbounds=[215,235]\n\n#Subtropical South Atlantic\n#latbounds=[-25,-10]\n#lonbounds=[340,355]\n\n#Subtropical North Atlantic\n#latbounds = [10,30]\n#lonbounds = [300,360]\n\n#Subtropical South Pacific\n#latbounds=[-22,-8]\n#lonbounds=[260,280]\n\n##Equatorial West Pacific\n#latbounds=[-10,10]\n#lonbounds=[165,205]\n\n#Equatorial East Pacific\n#latbounds=[-7,7]\n#lonbounds=[230,270]\n\n#Tropical Atlantic\n#latbounds=[5,25]\n#lonbounds=[300,360]\n\n#Midlatitude Pacific\n#latbounds = [30,45]\n#lonbounds = [160,220]\n\nminlon=lonbounds[0]\nmaxlon=lonbounds[1]\nminlat = latbounds[0]\nmaxlat= latbounds[1]\n\n#ps = fSLP.SLP\n# ps = ps/1e2\n# ps = ps.subRegion(latitude=(minlat, maxlat), longitude=(minlon, maxlon))\n# nt_ps = ps.shape[0]\n# ps = ps[tskip:,:]\n\n# #ps = fsst('msl')\n# ps = ps/1e2\n# ps = ps.assign_coords(lon=(ps.lon % 360)).roll(lon=((ps.shape[2] // 2)-1))\n# ps = ps.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\n\n\ntskip = 12\n#cf = cffile('MDSCLDFRCLO')\n#cf = cffile('MDSCLDFRCHI')\n#ps = fSLP('SLP')\n#ps = ps/1e2\n##ps = ps.subRegion(latitude=(minlat, maxlat), longitude=(minlon, maxlon))\n#nt_ps = cf.shape[0]\n\n#ps = fsst('msl')\n#ps = ps/1e2\n#ps = ps.subRegion(latitude=(minlat, maxlat), longitude=(minlon, maxlon))\n#nt_ps = ps.shape[0]\n#ps = ps[tskip:,:]\n\n#ERA5\n# sst = fsst.sst\n# lhf = fthf.mslhf\n# shf = fthf.msshf\n# LW_net_surf = frad.msnlwrf\n# SW_net_surf = frad.msnswrf\n\n#MERRA-2\n#sst = fsstM2.TSKINWTR\n#ta = fta.T10M\n#ta.load()\n# lhf = fthf.EFLUX\n# shf = fthf.HFLUX\n# LW_net_surf = radfile.LWGNT\n# SW_net_surf = radfile.SWGNT\n\n\n#OAFlux\nsst = fsstoa.tmpsf\nta = fta.T10M\nta.load()\n#ta = fta.tmp2m \n#ta = ta.where(~(ta==327.66))\nlhf = fthf.lhtfl\nshf = fthf.shtfl\nLW_net_surf = radfile.LWGNT\nSW_net_surf = radfile.SWGNT\n\nsst = sst+273.15\n#ta = ta+273.15\n\n#ta.load()\n\n#ECCO\n#sst = fsst.Ts\n#sst = fTmxl.Tmxl\n#sst_interp = sst\n#sst_interp= theta.isel(k=0)\n#sst = theta_mxl\n\n#ERSST\n#sst = fsst.sst\n\n\nthf = lhf + shf\nQ_net_surf = LW_net_surf + SW_net_surf\n\nif dataname == 'ERA5':\n sst = sst.rename({'latitude':'lat', 'longitude':'lon'})\n sst = sst.sortby('lat',ascending=True)\n\n\nlats = sst.lat\nlons = sst.lon\n\n#lons_interp = sst_interp.lon\n#lats_interp = sst_interp.lat\nif lons.max() <= 180:\n sst = sst.assign_coords(lon=(sst.lon % 360)).roll(lon=((len(lons) // 2)-1))\n \nif ta.lon.max() <= 180:\n ta = ta.assign_coords(lon=(ta.lon % 360)).roll(lon=((ta.shape[2] // 2)-1))\n \nsstbar = sst.mean(dim='time')\ntabar = ta.mean(dim='time')\n\nta= ta.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\nta = ta.transpose('time', 'lat', 'lon')\n \n#if lons_interp.max() <= 180:\n# sst_interp = sst_interp.assign_coords(lon=(sst_interp.lon % 360)).roll(lon=((len(lons_interp) // 2)))\n \nsst = sst.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\n\nsst = sst.transpose('time', 'lat', 'lon')\n\n#sst_interp = sst_interp.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\n\n#sst_interp = sst_interp.transpose('time', 'lat', 'lon')\n#f_seaice = fseaice.FRSEAICE\n#f_seaice = f_seaice.rename({'TIME':'time', 'XDim': 'lon', 'YDim': 'lat'})\n#f_seaice = f_seaice.assign_coords(lon=(f_seaice.lon % 360)).roll(lon=((f_seaice.shape[2] // 2)-1))\n#f_ocean = 1 - f_seaice\n#\n#f_oceanbar = f_ocean.mean(dim='time')\n\n#thf = f_oceanbar*thf\n#lhf = f_oceanbar*lhf\n#shf = f_oceanbar*shf\n\nif dataname == 'ERA5':\n lhf = lhf.rename({'latitude':'lat', 'longitude':'lon'})\n thf=thf.rename({'latitude':'lat', 'longitude':'lon'})\n shf = shf.rename({'latitude':'lat', 'longitude':'lon'})\n thf = thf.sortby('lat',ascending=True)\n lhf = lhf.sortby('lat',ascending=True)\n shf = shf.sortby('lat',ascending=True)\n \n\n#In ERA5 the turbulent heat fluxes are defined positive down\nif dataname == 'ERA5':\n thf = -thf\n lhf = -lhf\n shf = -shf\nif thf.lon.max() <= 180:\n thf = thf.assign_coords(lon=(thf.lon % 360)).roll(lon=((thf.shape[2] // 2)-1))\n \nthf = thf.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\n \n# taux = ftau.TAUXWTR\n# tauy = ftau.TAUYWTR\n# taux = taux.assign_coords(lon=(taux.lon % 360)).roll(lon=((taux.shape[2] // 2)-1))\n# tauy = tauy.assign_coords(lon=(tauy.lon % 360)).roll(lon=((tauy.shape[2] // 2)-1))\n# taux = taux.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\n# tauy = tauy.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\n\n#thf = -thf\n#thf is positive down in ERAi, convert to positive up\n#thf = thf\n#lhf = -lhf\n#shf = -shf\n\n#ps = fSLP('SLP')\n#ps = ps/1e2\n\n#ps = ps.subRegion(latitude=(minlat, maxlat), longitude=(minlon, maxlon))\n#ps = ps[tskip:,:]/1e2\n\n#u = fuv('U10M')\n#u = fuv('u10')\n#u = u.subRegion(latitude=(minlat, maxlat), longitude=(minlon, maxlon))\n#u = u[tskip:,:]\n\n#v = fuv('V10M')\n#v = fuv('v10')\n#v = v.subRegion(latitude=(minlat, maxlat), longitude=(minlon, maxlon))\n#v = v[tskip:,:]\n\n#umag = np.sqrt(np.square(v) + np.square(u))\n\n\n\nif dataname == 'ERA5':\n Q_net_surf=Q_net_surf.rename({'latitude':'lat', 'longitude':'lon'})\n Q_net_surf=Q_net_surf.sortby('lat',ascending=True)\n\n\nif Q_net_surf.lon.max() <=180:\n Q_net_surf = Q_net_surf.assign_coords(lon=(Q_net_surf.lon % 360)).roll(lon=((Q_net_surf.shape[2] // 2)-1))\n #W_net_surf = LW_net_surf.assign_coords(lon=(LW_net_surf.lon % 360)).roll(lon=((LW_net_surf.shape[2] // 2)-1))\n #SW_net_surf = SW_net_surf.assign_coords(lon=(SW_net_surf.lon % 360)).roll(lon=((SW_net_surf.shape[2] // 2)-1))\n\n#Q_net_surf_cs = LW_net_surf_cs + SW_net_surf_cs\n#\n\n\n\n\nQ_net_surf = Q_net_surf.sel(lat=slice(minlat,maxlat),lon=slice(minlon,maxlon))\n\nif not(dataname == 'ECCO'):\n sst = sst[:Q_net_surf.shape[0],:,:]\n ta = ta[:Q_net_surf.shape[0],:,:]\n #sst_interp = sst_interp[:Q_net_surf.shape[0],:,:]\nthf = thf[:Q_net_surf.shape[0],:,:]\nssh = ssh[:Q_net_surf.shape[0],:,:]\n# taux = taux[:Q_net_surf.shape[0],:,:]\n# tauy = tauy[:Q_net_surf.shape[0],:,:]\n#ps = ps[:Q_net_surf.shape[0],:,:]\n#SW_net_surf = SW_net_surf[:Q_net_surf.shape[0],:,:]\n\n\nmonths_sst = np.arange(sst.shape[0])\n#months_sst_interp = np.arange(sst_interp.shape[0])\nmonths = np.arange(thf.shape[0])\ntyears = 1980 + months/12.\nif dataname == 'ERA5':\n dates = pd.date_range('1979-01-01', periods=len(months), freq='MS')\nelse:\n dates = pd.date_range('1980-01-01', periods=len(months), freq='MS')\n \nif dataname == 'ECCO':\n dates_sst = pd.date_range('1992-01-01', periods=len(months_sst), freq='MS')\n# dates_sst_interp = pd.date_range('1992-01-01', periods=len(months_sst_interp), freq='MS')\nelse:\n dates_sst = dates\n\nsst.time.values = dates_sst\nta.time.values = dates_sst\n#sst_interp.time.values = dates_sst_interp\n\nthf.time.values = dates\n#h.time.values = dates\nQ_net_surf.time.values = dates\n#SW_net_surf.time.values = dates\n#ssh.time.values = dates\n# taux.time.values = dates\n# tauy.time.values = dates\nif dataname == 'ECCO':\n# sst_interp = sst_interp.sel(time=slice('1992-02-01','2015-12-01'))\n sst = sst.sel(time=slice('1992-02-01','2015-12-01'))\n thf = thf.sel(time=slice('1992-02-01','2015-12-01'))\n Q_net_surf = Q_net_surf.sel(time=slice('1992-02-01','2015-12-01'))\n # taux = taux.sel(time=slice('1992-02-01','2015-12-01'))\n # tauy = tauy.sel(time=slice('1992-02-01','2015-12-01'))\n #h = h.sel(time=slice('1992-02-01','2015-12-01'))\n\n# sst = sst.sel(time=slice('1992-01-01','2015-01-01'))\n# thf = thf.sel(time=slice('1992-01-01','2015-01-01'))\n# Q_net_surf = Q_net_surf.sel(time=slice('1992-01-01','2015-01-01'))\n# h = h.sel(time=slice('1992-01-01','2015-01-01'))\n# taux = taux.sel(time=slice('1992-01-01','2015-01-01'))\n# tauy = tauy.sel(time=slice('1992-01-01','2015-01-01'))\n\nii=-1\n\n#if dataname == 'MERRA2':\n\n#sst = sst[tskip:ii,:,:]\n##h = h[tskip:ii,:,:]\n#ssh = ssh[tskip:ii,:,:]\n#Q_net_surf = Q_net_surf[tskip:ii,:,:]\n#SW_net_surf = SW_net_surf[tskip:ii,:,:]\n#thf = thf[tskip:ii,:,:]\n#taux = taux[tskip:ii,:,:]\n#tauy = tauy[tskip:ii,:,:]\n#ps = ps[tskip:ii,:,:]\n\n \n#True for low-pass filtering \nlowpass = False\nhighpass = False\nanom_flag = True\ntimetend=False\ndetr=True\nrENSO=True\ncorr=False\nlterm=True\ndrawmaps=False\ndrawbox=False\nQekplot = True\n\n#interpolate to same grid\ncstep_lat=1.0\ncstep_lon=1.0\nif dataname == 'ERA5':\n cstep_lat=0.5\n cstep_lon=0.5\nif dataname == 'MERRA2':\n cstep_lat = 0.5\n cstep_lon= 0.625\nif dataname == 'OAFlux':\n cstep_lat=1.0\n cstep_lon=1.0\n\n#cstep_lat = 2.0\n#cstep_lon = 2.0\nlats = np.arange(minlat,maxlat+cstep_lat,cstep_lat)\nlons = np.arange(minlon,maxlon+cstep_lon,cstep_lon)\n\nthf = thf.transpose('time','lat','lon')\nlhf = lhf.transpose('time', 'lat','lon')\nshf = shf.transpose('time', 'lat','lon')\nQ_net_surf = Q_net_surf.transpose('time', 'lat','lon')\n\n\nds_out = xr.Dataset({'lat': (['lat'], lats),\n 'lon': (['lon'], lons)})\n\nregridder = xe.Regridder(sst, ds_out, 'bilinear', reuse_weights=True)\nsst = regridder(sst) # print basic regridder information.\n\nregridder = xe.Regridder(ta, ds_out, 'bilinear', reuse_weights=True)\nta = regridder(ta) # print basic regridder information.\n\nregridder = xe.Regridder(h, ds_out, 'bilinear', reuse_weights=True)\nh = regridder(h)\n\n#regridder = xe.Regridder(Tmxlfrac, ds_out, 'bilinear', reuse_weights=True)\n#Tmxlfrac = regridder(Tmxlfrac)\n\nregridder = xe.Regridder(thf, ds_out, 'bilinear', reuse_weights=True)\nthf = regridder(thf)\n#\nregridder = xe.Regridder(hbar, ds_out, 'bilinear', reuse_weights=True)\nhbar = regridder(hbar)\n\nregridder = xe.Regridder(sstbar, ds_out, 'bilinear', reuse_weights=True)\nsstbar = regridder(sstbar)\n\nregridder = xe.Regridder(tabar, ds_out, 'bilinear', reuse_weights=True)\ntabar = regridder(tabar)\n\nregridder = xe.Regridder(hmean, ds_out, 'bilinear', reuse_weights=True)\nhmean= regridder(hmean)\n\nregridder = xe.Regridder(Q_net_surf, ds_out, 'bilinear', reuse_weights=True)\nQ_net_surf = regridder(Q_net_surf)\n\n# regridder = xe.Regridder(taux, ds_out, 'bilinear', reuse_weights=True)\n# taux = regridder(taux)\n# #\n# regridder = xe.Regridder(tauy, ds_out, 'bilinear', reuse_weights=True)\n# tauy = regridder(tauy)\n\nregridder = xe.Regridder(sst_ECCO, ds_out, 'bilinear', reuse_weights=True)\nsst_ECCO = regridder(sst_ECCO)\n#\nregridder = xe.Regridder(Tmxl_ECCO, ds_out, 'bilinear', reuse_weights=True)\nTmxl_ECCO = regridder(Tmxl_ECCO)\n\n\n##cstep_lat = 2.0\n##cstep_lon = 2.0\n#lats_in = sst.lat.values\n#lons_in = sst.lon.values\n#\n#lats_in = lats\n#lons_in = lons\n#\n#dlat = np.diff(lats)[0]\n#dlon = np.diff(lons)[0]\n#\n#lats_in_b = 0.5*(lats_in[1:] + lats_in[:-1])\n#\n#lats_in_b = np.insert(lats_in_b,0,lats_in_b[0]-dlat)\n#lats_in_b = np.append(lats_in_b,lats_in_b[-1]+dlat)\n#\n#lons_in_b = 0.5*(lons_in[1:] + lons_in[:-1])\n#\n#lons_in_b = np.insert(lons_in_b,0,lons_in_b[0]-dlat)\n#lons_in_b = np.append(lons_in_b,lons_in_b[-1]+dlat)\n# \n#lats_out_b = np.arange(minlat,maxlat+cstep_lat,cstep_lat)\n#lons_out_b = np.arange(minlon,maxlon+cstep_lon,cstep_lon)\n#\n#lons_out = 0.5*(lons_out_b[1:]+lons_out_b[:-1]) \n#lats_out = 0.5*(lats_out_b[1:]+lats_out_b[:-1])\n#\n#grid_in = {'lon': lons_in, 'lat': lats_in, 'lon_b': lons_in_b, 'lat_b': lats_in_b}\n#grid_out = {'lon': lons_out, 'lat': lats_out, 'lon_b': lons_out_b, 'lat_b': lats_out_b}\n#\n#\n#thf = thf.transpose('time','lat','lon')\n#lhf = lhf.transpose('time', 'lat','lon')\n#shf = shf.transpose('time', 'lat','lon')\n#Q_net_surf = Q_net_surf.transpose('time', 'lat','lon')\n#\n#\n#ds_out = xr.Dataset({'lat': (['lat'], lats),\n# 'lon': (['lon'], lons)})\n#\n#\n#\n#regridder = xe.Regridder(grid_in, grid_out, 'conservative')\n#regridder.clean_weight_file()\n\n# taux_clim = taux.mean(dim='time')\n# tauy_clim = tauy.mean(dim='time')\n\nsst_mean = sst.mean(dim='time')\n\nlats = sst.lat\nlons = sst.lon\n\n\n# Scale the SST anomalies by the ratio of the mixed layer temperature variance to the SST in ECCO \n# This is to account for lower variability when averaged over the entire mixed layer\n#sst = sst*Tmxlfrac\n\n\nrho = 1000\nc_p = 3850\ndt = 30*3600*24\n#C = rho*c_p*h\n\n#C_anom, C_clim = st.anom(C)\n\n#Cbar = C_clim.mean(dim='month')\n\nCbar = rho*c_p*hbar\n\n#Cbar = C.mean(dim='time')\n\n#if dataname == 'ECCO':\n# Cbar = rho*c_p*delz_sum\n\n\n\nh_anom, h_clim = st.anom(h)\n\nh_clim_std = h_clim.std(dim='month')\nh_bar = h_clim.mean(dim='month')\n\n\n# Compute monthly anomaly\nif anom_flag:\n Q_net_surf,Q_net_surf_clim = st.anom(Q_net_surf)\n thf,thf_clim = st.anom(thf)\n sst,sst_clim = st.anom(sst)\n ta,ta_clim = st.anom(ta)\n #Q_ek,Q_ek_clim= st.anom(Q_ek)\n sst_ECCO,sst_ECCO_clim= st.anom(sst_ECCO)\n Tmxl_ECCO,Tmxl_ECCO_clim= st.anom(Tmxl_ECCO)\n\n\n\n# Remove linear trend\nif detr: \n sst = sst.fillna(0.) \n sst = xr.DataArray(signal.detrend(sst, axis=0), dims=sst.dims, coords=sst.coords) \n \n ta = ta.fillna(0.) \n ta = xr.DataArray(signal.detrend(ta, axis=0), dims=ta.dims, coords=ta.coords) \n\n# h = h.fillna(0.) \n# h = xr.DataArray(signal.detrend(h, axis=0), dims=h.dims, coords=h.coords) \n \n thf = thf.fillna(0.) \n thf = xr.DataArray(signal.detrend(thf, axis=0), dims=thf.dims, coords=thf.coords) \n \n Q_net_surf = Q_net_surf.fillna(0.) \n Q_net_surf = xr.DataArray(signal.detrend(Q_net_surf, axis=0), dims=Q_net_surf.dims, coords=Q_net_surf.coords) \n \n\n\n# Mask zero values (continents) \nocean_points1 = ~(sst==0)\nocean_points2 = ~(xr.ufuncs.isnan(sst))\nocean_points = xr.ufuncs.logical_or(ocean_points1, ocean_points2)\nsst = sst.where(ocean_points)\nta = ta.where(ocean_points)\nthf = thf.where(ocean_points)\n# Q_net_surf = Q_net_surf.where(ocean_points)\n# Q_ek = Q_ek.where(ocean_points)\n#Q_ek = Q_ek.where(~(Q_ek==0))\n#Q_ek = Q_ek.where(np.abs(lats) > 0)\n# Compute SST tendency\n\n\nTmxl_var_ECCO = Tmxl_ECCO.var(dim='time')\nsst_var_ECCO = sst_ECCO.var(dim='time')\n\n# # Scale the SST tendency by the ratio of MXL temp tendency / SST tendency from ECCO (as an esimate for a more physical result)\nTmxlfrac = Tmxl_var_ECCO/sst_var_ECCO\nsst = sst*np.sqrt(Tmxlfrac)\n\n\n \ntendsst = (sst.shift(time=-2)-sst)[:-2]\n\ntendsst = tendsst/(2*dt)\n#tendsst = tendsst/dt\n\nnt = sst.shape[0]\n\nthf = thf.isel(time=slice(1,nt-1))\nQ_net_surf = Q_net_surf.isel(time=slice(1,nt-1))\nsst = sst.isel(time=slice(1,nt-1))\nta = ta.isel(time=slice(1,nt-1))\n\nnt = sst.shape[0]\n\n# Make sure sst tendency times match up with other fields\ntendsst.time.values = thf.time.values\n\n#Qr = Cbar*tendsst - (-thf + Q_net_surf) - Q_ek\n\nQ_r = Cbar*tendsst -(-thf + Q_net_surf)\nQ_r = Q_r.transpose('time','lat','lon')\n\nQ_r_mean = Q_r.mean(dim='time')\n\n\n#Q_s = -thf + Q_net_surf + Q_ek\n\nQ_s = -thf + Q_net_surf \n\nQ_tot = Q_r + Q_s\n\nnt = sst.shape[0]\n#timeslice = slice(0,nt)\n#timeslice = slice(int(Tn),nt-int(Tn))\n\n# Q_s = Q_s.isel(time=timeslice)\n# Q_r = Q_r.isel(time=timeslice)\n# tendsst = tendsst.isel(time=timeslice)\n# sst = sst.isel(time=timeslice)\n\norder = 5\nfs = 1 # sample rate, (cycles per month)\nTn = 4.*12.\ncutoff = 1/Tn # desired cutoff frequency of the filter (cycles per month)\n\n\nif lowpass:\n sst_lp = st.butter_lowpass_filter_xr(sst, cutoff, fs, order)\n r2corrs = st2.cor(sst_lp,sst_lp,lagx=2)\nelse:\n r2corrs = st2.cor(sst,sst,lagx=2)\n \n#Niño 3.4 (5N-5S, 170W-120W\n\norder = 5\nfs = 1 # sample rate, (cycles per month)\nTn_enso = 6.\ncutoff_enso = 1/Tn # desired cutoff frequency of the filter (cycles per month)\nenso = st2.spatial_ave_xr(sst.sel(lon=slice(190,240)), lats=lats.sel(lat=slice(-5,5)))\n\nenso = st.butter_lowpass_filter_xr(enso,cutoff_enso,fs,order) \n\nif not(lowpass):\n Tn = 0.\n \n\nif rENSO:\n sste = st2.regressout_x(enso, sst)\n Q_se = st2.regressout_x(enso, Q_s)\n Q_re = st2.regressout_x(enso, Q_r)\n Q_tote = st2.regressout_x(enso, Q_tot)\n \n\n\n# Scaling factor (to convert from units of W*K/(s*m^2) to K^2)\n#G = (2*dt**2/(Cbar**2*(1-r2corrs)))\n \n#lambdaQ_tot = st.cov(sste, Q_tote, lagx=-1)/st.cov(sste,sste, lagx=-1)\n\nlambdaQ_totlag0 = st.cov(sste, Q_tote, lagx=0)/st.cov(sste,sste, lagx=0)\nlambdaQ_totlag1 = st.cov(sste, Q_tote, lagx=-1)/st.cov(sste,sste, lagx=-1)\nlambdaQ_totlag2 = st.cov(sste, Q_tote, lagx=-2)/st.cov(sste,sste, lagx=-2)\nlambdaQ_totlag3 = st.cov(sste, Q_tote, lagx=-3)/st.cov(sste,sste, lagx=-3)\n\n#lambdaQ_tot = (lambdaQ_totlag1 + lambdaQ_totlag2 + lambdaQ_totlag3)/3.\nlambdaQ_tot = lambdaQ_totlag1\n\nlambdaQ_slag0 = st.cov(sste, Q_se, lagx=0)/st.cov(sste,sste, lagx=0)\nlambdaQ_slag1 = st.cov(sste, Q_se, lagx=-1)/st.cov(sste,sste, lagx=-1)\nlambdaQ_slag2 = st.cov(sste, Q_se, lagx=-2)/st.cov(sste,sste, lagx=-2)\nlambdaQ_slag3 = st.cov(sste, Q_se, lagx=-3)/st.cov(sste,sste, lagx=-3)\n\nlambdaQ_rlag0 = st.cov(sste, Q_re, lagx=0)/st.cov(sste,sste, lagx=0)\nlambdaQ_rlag1 = st.cov(sste, Q_re, lagx=-1)/st.cov(sste,sste, lagx=-1)\nlambdaQ_rlag2 = st.cov(sste, Q_re, lagx=-2)/st.cov(sste,sste, lagx=-2)\nlambdaQ_rlag3 = st.cov(sste, Q_re, lagx=-3)/st.cov(sste,sste, lagx=-3)\n\n#lambdaQ_s = (lambdaQ_slag1 + lambdaQ_slag2 + lambdaQ_slag3)/3.\n#lambdaQ_r = (lambdaQ_rlag1 + lambdaQ_rlag2 + lambdaQ_rlag3)/3.\n\nlambdaQ_s = lambdaQ_slag1\nlambdaQ_r = lambdaQ_rlag1\n\nlambdaQ_totT = lambdaQ_tot*sst\nlambdaQ_sT = lambdaQ_s*sst\nlambdaQ_rT = lambdaQ_r*sst\n\n#Q_totstar = Q_tot - lambdaQ_totT\nQ_sstar = Q_s - lambdaQ_sT\nQ_rstar = Q_r - lambdaQ_rT\n\nQ_totstar = Q_tot - lambdaQ_totT\n\nfreqs = fft.fftfreq(nt)\nfreqs = freqs[:int(nt/2)]\n\nT_A = fft.fft(sst,axis=0, norm='ortho')\nQtot_A = fft.fft(Q_tot,axis=0, norm='ortho')\nQtotstar_A = fft.fft(Q_totstar,axis=0, norm='ortho')\nQsstar_A = fft.fft(Q_sstar,axis=0,norm='ortho')\nQrstar_A = fft.fft(Q_rstar,axis=0,norm='ortho')\nQs_A = fft.fft(Q_s,axis=0,norm='ortho')\nQr_A = fft.fft(Q_r,axis=0,norm='ortho')\n\nTpower_temp = np.abs(T_A)**2\nQtotpower_temp = np.abs(Qtot_A)**2\nQtotstarpower_temp = np.abs(Qtotstar_A)**2\nQsstarpower_temp = np.abs(Qsstar_A)**2\nQrstarpower_temp = np.abs(Qrstar_A)**2\nQspower_temp = np.abs(Qsstar_A)**2\nQrpower_temp = np.abs(Qrstar_A)**2\n\nTpower_temp = 2*Tpower_temp[:int(nt/2)]\nQtotpower_temp = 2*Qtotpower_temp[:int(nt/2)]\nQtotstarpower_temp = 2*Qtotstarpower_temp[:int(nt/2)]\nQsstarpower_temp = 2*Qsstarpower_temp[:int(nt/2)]\nQrstarpower_temp = 2*Qrstarpower_temp[:int(nt/2)]\nQspower_temp = 2*Qsstarpower_temp[:int(nt/2)]\nQrpower_temp = 2*Qrstarpower_temp[:int(nt/2)]\n\nTpower_temp = Tpower_temp/nt\nQtotpower_temp = Qtotpower_temp/nt\nQtotstarpower_temp = Qtotstarpower_temp/nt\nQsstarpower_temp = Qsstarpower_temp/nt\nQrstarpower_temp = Qrstarpower_temp/nt\nQspower_temp = Qsstarpower_temp/nt\nQrpower_temp = Qrstarpower_temp/nt\n\nnfreq = len(freqs)\nlats = sst.lat\nlons = sst.lon\nnlat = len(lats)\nnlon = len(lons)\n\nf = xr.DataArray(np.nan*np.zeros([nfreq]), coords={'freq': freqs},dims=['freq'])\nf.values = freqs\n\nTpower = xr.DataArray(np.nan*np.zeros([nfreq,nlat,nlon]),\n coords={'freq': f, 'lat': lats, 'lon':lons},dims=['freq', 'lat','lon'])\n\nQtotpower = xr.DataArray(np.nan*np.zeros([nfreq,nlat,nlon]),\n coords={'freq': f, 'lat': lats, 'lon':lons},dims=['freq', 'lat','lon'])\n\nQtotstarpower = xr.DataArray(np.nan*np.zeros([nfreq,nlat,nlon]),\n coords={'freq': f, 'lat': lats, 'lon':lons},dims=['freq', 'lat','lon'])\n\n\nQsstarpower = xr.DataArray(np.nan*np.zeros([nfreq,nlat,nlon]),\n coords={'freq': f, 'lat': lats, 'lon':lons},dims=['freq', 'lat','lon'])\n\nQrstarpower = xr.DataArray(np.nan*np.zeros([nfreq,nlat,nlon]),\n coords={'freq': f, 'lat': lats, 'lon':lons},dims=['freq', 'lat','lon'])\n\nQspower = xr.DataArray(np.nan*np.zeros([nfreq,nlat,nlon]),\n coords={'freq': f, 'lat': lats, 'lon':lons},dims=['freq', 'lat','lon'])\n\nQrpower = xr.DataArray(np.nan*np.zeros([nfreq,nlat,nlon]),\n coords={'freq': f, 'lat': lats, 'lon':lons},dims=['freq', 'lat','lon'])\n\nTpower.values = Tpower_temp\nQtotpower.values = Qtotpower_temp\nQtotstarpower.values = Qtotstarpower_temp\nQsstarpower.values = Qsstarpower_temp\nQrstarpower.values = Qrstarpower_temp\nQspower.values = Qspower_temp\nQrpower.values = Qrpower_temp\n\nlowmaxi=3\nf = f[lowmaxi:]\nTpower = Tpower[lowmaxi:]\nQtotpower = Qtotpower[lowmaxi:]\nQtotstarpower = Qtotstarpower[lowmaxi:]\nQsstarpower = Qsstarpower[lowmaxi:]\nQrstarpower = Qrstarpower[lowmaxi:]\nQspower = Qspower[lowmaxi:]\nQrpower = Qrpower[lowmaxi:]\n\nF_o = Q_rstar\nF_a = Q_sstar\n\nT_var = sst.var(dim='time')\n\nF_o_var = F_o.var(dim='time')\nF_a_var = F_a.var(dim='time')\n\nQ_s_var = Q_s.var(dim='time')\nQ_o_var = Q_r.var(dim='time')\n\nQ_totstar_var = (Q_totstar).var(dim='time')\ncovQsQrstar = 0.5*(Q_totstar_var - F_a_var - F_o_var)\n\nQ_tot_var = (Q_totstar).var(dim='time')\ncovQsQr = 0.5*(Q_tot_var - Q_s_var - Q_o_var)\n\nQsQrpower = 0.5*(Qtotpower - Qspower - Qrpower)\nQsQrstarpower = 0.5*(Qtotstarpower - Qsstarpower - Qrstarpower)\n\n\n# Spatial averaging\n# latbounds_ave = [30,45]\n# lonbounds_ave = [160,220]\n\n#North pacific\n# lonbounds_ave = [120,270]\n# latbounds_ave = [20,60]\n\n#North Atlantic\n# latbounds_ave=[20,60]\n# lonbounds_ave=[270,360]\n\n#Global\n# lonbounds_ave = [0.5,359.5]\n# latbounds_ave = [-64.5,64.5]\n\n#Extratropical NH\nlonbounds_ave = [0.5,359.5]\nlatbounds_ave = [30,60]\n\nTpower_ave = st2.spatial_ave_xr(Tpower.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\nF_o_var_ave = st2.spatial_ave_xr(F_o_var.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\nF_a_var_ave = st2.spatial_ave_xr(F_a_var.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\nF_o_power_ave = st2.spatial_ave_xr(Qrstarpower.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\nF_a_power_ave = st2.spatial_ave_xr(Qsstarpower.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\nCbar_ave = st2.spatial_ave_xr(Cbar.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\n\nFaFo_power_ave = st2.spatial_ave_xr(QsQrstarpower.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\n\n\n# Compute power spectra for T_o and T_a based on solutions of the BB98 model\nmonthtosec=3600*24*30\nfreq_sec = f/monthtosec\n\nlambda_tot = lambdaQ_tot\n\n#lambda_tot = lambdaQ_s\n\ngamma_o = Cbar_ave\n\ncoef = lambda_tot**2 + (2*np.pi*gamma_o*freq_sec)**2\n# White-noise assumption\nF_a_pow_an = F_a_var_ave/len(f)\nF_o_pow_an = F_o_var_ave/len(f)\n\n#F_a_pow_an = F_a_power_ave\n#F_o_pow_an = F_o_power_ave\n#F_aF_o_pow_an = FaFo_power_ave\n\nT_o_Fa = (F_a_pow_an)/coef\nT_o_Fo = (F_o_pow_an)/coef\n\n#T_o_Fo[:] = 0\n\n#T_o_Fo = 0\n#T_o_FaFo = (F_aF_o_pow_an)/coef\n\n#T_o_Fa = T_o_Fa + T_o_FaFo\n#T_o_Fo = T_o_Fo + T_o_FaFo\n\nT_o_power_an = T_o_Fa + T_o_Fo \n\nT_o_power_an_ave = st2.spatial_ave_xr(T_o_power_an.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\nT_o_Fa_ave = st2.spatial_ave_xr(T_o_Fa.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\nT_o_Fo_ave = st2.spatial_ave_xr(T_o_Fo.sel(lon=slice(lonbounds_ave[0],lonbounds_ave[1])), lats=lats.sel(lat=slice(latbounds_ave[0],latbounds_ave[1])))\n\nT_o_var_sum = T_o_power_an.sum('freq')\nT_o_var_Fa = T_o_Fa.sum('freq')\nT_o_var_Fo = T_o_Fo.sum('freq')\n\n# Plotting\n# latbounds_plot = [15,60]\n# lonbounds_plot = [120,260]\n\nlonbounds_plot = [0,360]\nlatbounds_plot = [-65.,65.]\n\nbnds = [np.round(lonbounds_plot[0]-359), np.round(lonbounds_plot[1]-361), latbounds_plot[0], latbounds_plot[1]]\n\ncent = (bnds[0]+bnds[1])/2.\nprj = cart.crs.PlateCarree(central_longitude=cent)\n\nbnds[0] = bnds[0] + 1\nbnds[2] = bnds[2] + 2\n\npardiff = 30.\nmerdiff = 60.\nif lonbounds[1] - lonbounds[0] <= 180:\n merdiff = 15.\nif lonbounds[1] - lonbounds[0] <= 60:\n merdiff = 5.\nif np.abs(latbounds[1]-latbounds[0]) <= 30:\n pardiff=5.\npar = np.arange(-90.,91.,pardiff)\nmer = np.arange(-180.,180.+merdiff,merdiff)\nx, y = np.meshgrid(lons, lats)\n\n\norient = 'horizontal'\nif np.abs(latbounds[1] - latbounds[0]) > np.abs(lonbounds[1] - lonbounds[0]):\n orient = 'vertical'\n \n \nsstvmax = 5.0\nvmin=-1.0\nvmax=1.0\nvmin_pow = -1.0\nvmax_pow = 1.0\nif lowpass:\n sstvmax = 1.0\n vmin=-1.0\n vmax=1.0\n \n#sstcmap = cmocean.cm.plasma\n#sstcmap = plt.cm.cubehelix_r\n#fieldcmap = cmocean.cm.balance\nfieldcmap = plt.cm.RdBu_r\n\nsstcmap = cc.cm.CET_L17\n#fieldcmap = cc.cm.CET_D1A\n#fieldcmap = cc.cm.coolwarm\n\nx1 = lonbounds_ave[0]\nx2 = lonbounds_ave[1]\ny1 = latbounds_ave[0]\ny2 = latbounds_ave[1]\n\nvarmin=10**2\nvarmax=10**4\nlambdamax=60\n\n\ncbfrac=0.10\n\n#period in months\n#ftop = 1/f\n#period in years\n#ftop = ftop/12.\n\n# fmin = (1/25)/12.\n# fmax = 0.5\n\n# locmin = matplotlib.ticker.LogLocator(base=10.0,subs=(0.2,0.4,0.6,0.8),numticks=12)\n\n# plt.figure(figsize=(10,12))\n# plt.plot(f, T_o_power_an_ave, color='k', label='solution')\n# plt.plot(f, Tpower_ave, color='grey', label='observed')\n# plt.legend(loc='best')\n# ax=plt.gca()\n# ax.set_xscale('log')\n# ax.set_yscale('log')\n# ax.set_xlim(fmin,fmax)\n# ax.set_ylim(10**(-5),10**(0))\n# ax.set_xlabel('Frequency (month$^{-1}$)')\n# ax.set_ylabel('Variance (K$^{2}$')\n# ax2 = ax.twiny()\n# ax2.set_xlim((1/fmin)/12, (1/fmax)/12)\n# ax2.set_xscale('log')\n# ax2.xaxis.set_major_formatter(ScalarFormatter())\n# ax2.set_xticks([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xticklabels([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xlabel('Period (years)')\n# ax.tick_params(which='major', length=10)\n# ax.tick_params(which='minor', length=4)\n# ax2.tick_params(which='major', length=10)\n# ax2.tick_params(which='minor', length=0)\n# ax.xaxis.set_minor_locator(locmin)\n# ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# ax.yaxis.set_minor_locator(locmin)\n# ax.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# plt.savefig(fout + '{:s}_To_powerspectra_analytical_{:2.0f}Nto{:2.0f}N_detr{:s}.pdf'.format(dataname, latbounds_ave[0], latbounds_ave[1], str(detr)[0]))\n\nT_o_sum = T_o_power_an_ave.cumsum('freq')\nT_o_Fo_var = T_o_Fo_ave.cumsum('freq')\nT_o_Fa_var = T_o_Fa_ave.cumsum('freq')\n\nT_o_sum = T_o_sum[::-1]\nT_o_Fo_var = T_o_Fo_var[::-1]\nT_o_Fa_var = T_o_Fa_var[::-1]\n\n#T_o_sum = T_o_Fo_var\n\nfilter_lengths = (1/f[::-1])/12.\n\nfilter_lengths = filter_lengths - filter_lengths[0]\n\n#T_o_save = T_o_sum\n\n#T_o_save2 = T_o_save\n\nplt.figure(1)\nfig, axs = plot.subplots(ncols=1, nrows=2, aspect=1.2, tight=True, share=False, hratios=(3,2))\n#plot.subplots(ncols=2, nrows=3)\nh1=axs[0].plot(filter_lengths, T_o_Fa_var, color='C2', label=r'$F_a$', linewidth=2, zorder=5)\n#plt.plot(Tns/12., ave_T_var_thf, color='C3', label=r'$THF$')\n#plt.plot(Tns/12., ave_T_var_Rnet, color='C4', label=r'$R_{net}$')\nh2=axs[0].plot(filter_lengths, T_o_Fo_var, color='C0', label=r'$F_o$', linewidth=2, zorder=5)\n#plt.plot(Tns/12., ave_T_var_Qek, color='C1', label=r'$Q_{ek}$')\nh3=axs[0].plot(filter_lengths, T_o_sum, color='k', label=r'$\\sigma_T^2$', linewidth=2, zorder=5)\n#h4=axs[0].plot(filter_lengths, T_o_save2, color='grey', label=r'$\\sigma_{T^{2}_{nodyn}}$', linewidth=2, zorder=5)\n#h4=axs[0].plot(Tns/12., ave_T_var, color='C5', label=r'$\\sigma_T^2$')\naxs[0].axhline(0, color='k', linewidth=1)\n\n#hs=[h1,h2,h3,h4]\n#Global/tropics\naxs[0].set_ylim(-0.1,0.38)\n#axs[0].set_ylim(-0.1,0.6)\n#WBC\n#plt.ylim(-0.22,0.7)\n#NH\n# yticklbls = np.round(np.arange(-0.3,0.6,0.1),1)\n# axs[0].set_yticks(yticklbls)\n# axs[0].set_ylim(-0.35,0.55)\n#SH\n#ax1.set_ylim(-0.04,0.3)\n#NA\n#axs[0].set_ylim(-0.1,0.8)\n#plt.ylim(-0.02,0.6)\naxs[0].set_xlim(0,6)\naxs[0].set_ylabel('Contribution to $\\sigma_T^2$ (K$^{2}$)')\n\n#leg=fig.legend(hs, loc='r')\nfrac_T_var_Qs = T_o_Fa_var/T_o_sum\n#frac_T_var_thf = ave_T_var_thf/ave_T_var_sum\n#frac_T_var_Rnet = ave_T_var_Rnet/ave_T_var_sum\nfrac_T_var_Qr = T_o_Fo_var/T_o_sum\n#frac_T_var_Qnet = ave_T_var_Rnet/ave_T_var_sum\n#frac_T_var_Qek = ave_T_var_Qek/ave_T_var_sum\n\ny0=frac_T_var_Qr\ny1=y0+frac_T_var_Qs\n#y2=y1+frac_T_var_Qek\n\nyticklbls = np.array([0,0.2,0.4,0.6,0.8,1.0])\n\n#leg_texts = [leg[1].get_texts()[0], leg[2].get_texts()[0], leg[3].get_texts()[0], leg[4].get_texts()[0]]\n\n#maxs = ax1.panel('b', space=0.5, share=False)\naxs[1].fill_between(filter_lengths, y0, color='C0', label=r'$Q_o$', alpha=0.8, linewidth=0)\naxs[1].fill_between(filter_lengths, y1, y0, color='C2', label=r'$Q_s$', alpha=0.8, linewidth=0)\naxs[1].set_xlabel('Filter Length (years)')\naxs[1].set_ylim(0,1.0)\naxs[1].set_xlim(0,6)\naxs[1].set_yticks(yticklbls)\naxs[1].set_ylabel('Fractional Contribution')\n#plt.legend(loc='best')\n# shift = max([t.get_window_extent().width for t in leg_texts])\n# for t in leg_texts:\n# t.set_ha('right') # ha is alias for horizontalalignment\n# t.set_position((shift,0))\nfig.savefig(fout + '{:s}_MODELsstvarbudget_varytimefilter_{:2.0f}Nto{:2.0f}N.png'.format(dataname, latbounds[0], latbounds[1]))\nplt.close(fig)\n\n# #fig,ax=plt.subplots(nrows=5,ncols=1,sharex=True)\n# plt.figure(figsize=(10,12))\n# plt.plot(f,Tpower_ave,label='$T_o$',color='k')\n# plt.plot(f,F_a_power_ave,label='$F_a$',color='g')\n# plt.plot(f,F_o_power_ave,label='$F_o$',color='b')\n# plt.legend(loc='best')\n# ax=plt.gca()\n# ax.set_xscale('log')\n# ax.set_yscale('log')\n# ax.set_xlim(fmin,fmax)\n# ax.set_ylim(10**(-5),10**(2))\n# ax.set_xlabel('Frequency (month$^{-1}$)')\n# ax.set_ylabel('Variance')\n# ax2 = ax.twiny()\n# ax2.set_xlim((1/fmin)/12, (1/fmax)/12)\n# ax2.set_xscale('log')\n# ax2.xaxis.set_major_formatter(ScalarFormatter())\n# ax2.set_xticks([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xticklabels([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xlabel('Period (years)')\n# ax.tick_params(which='major', length=10)\n# ax.tick_params(which='minor', length=4)\n# ax2.tick_params(which='major', length=10)\n# ax2.tick_params(which='minor', length=0)\n# ax.xaxis.set_minor_locator(locmin)\n# ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# ax.yaxis.set_minor_locator(locmin)\n# ax.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# plt.savefig(fout + '{:s}_forcing_powerspectra_{:2.0f}Nto{:2.0f}N_detr{:s}.pdf'.format(dataname, latbounds_ave[0], latbounds_ave[1], str(detr)[0]))\n\n# #fig,ax=plt.subplots(nrows=5,ncols=1,sharex=True)\n# plt.figure(figsize=(10,12))\n# plt.plot(f,Tpower_ave,label='$T_o$',color='k')\n# plt.plot(f,Qspower_ave,label='$Q_s$',color='g')\n# plt.plot(f,Qopower_ave,label='$Q_o$',color='b')\n# plt.legend(loc='lower left')\n# ax=plt.gca()\n# ax.set_xscale('log')\n# ax.set_yscale('log')\n# ax.set_xlim(fmin,fmax)\n# ax.set_ylim(10**(-5),10**(2))\n# ax.set_xlabel('Frequency (month$^{-1}$)')\n# ax.set_ylabel('Variance')\n# ax2 = ax.twiny()\n# ax2.set_xlim((1/fmin)/12, (1/fmax)/12)\n# ax2.set_xscale('log')\n# ax2.xaxis.set_major_formatter(ScalarFormatter())\n# ax2.set_xticks([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xticklabels([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xlabel('Period (years)')\n# ax.tick_params(which='major', length=10)\n# ax.tick_params(which='minor', length=4)\n# ax2.tick_params(which='major', length=10)\n# ax2.tick_params(which='minor', length=0)\n# ax.xaxis.set_minor_locator(locmin)\n# ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# ax.yaxis.set_minor_locator(locmin)\n# ax.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# plt.savefig(fout + '{:s}_QsQo_powerspectra_{:2.0f}Nto{:2.0f}N_detr{:s}.pdf'.format(dataname, latbounds_ave[0], latbounds_ave[1], str(detr)[0]))\n\n# #fig,ax=plt.subplots(nrows=5,ncols=1,sharex=True)\n# plt.figure(figsize=(8,10))\n# #plt.plot(f,QsQopower_ave,label='$|Q_sQ_o|$',color='k')\n# plt.plot(f,FaFopower_ave,label='$|F_aF_o|$',color='k')\n# plt.legend(loc='best')\n# ax=plt.gca()\n# ax.set_xscale('log')\n# ax.set_ylim(-10**(2),10**(2))\n# ax.set_yscale('symlog')\n# ax.set_xlim(fmin,fmax)\n# ax.set_xlabel('Frequency (month$^{-1}$)')\n# ax.set_ylabel('Variance')\n# ax2 = ax.twiny()\n# ax.axhline(0, color='k')\n# ax2.set_xlim((1/fmin)/12, (1/fmax)/12)\n# ax2.set_xscale('log')\n# ax2.xaxis.set_major_formatter(ScalarFormatter())\n# ax2.set_xticks([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xticklabels([0.1,1.0,2.5,5.0,10,25])\n# ax2.set_xlabel('Period (years)')\n# ax.tick_params(which='major', length=10)\n# ax.tick_params(which='minor', length=4)\n# ax2.tick_params(which='major', length=10)\n# ax2.tick_params(which='minor', length=0)\n# ax.xaxis.set_minor_locator(locmin)\n# ax.xaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# ax.yaxis.set_minor_locator(locmin)\n# ax.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n# plt.savefig(fout + '{:s}_FaFo_cospectrum_{:2.0f}Nto{:2.0f}N_detr{:s}.pdf'.format(dataname, latbounds_ave[0], latbounds_ave[1], str(detr)[0]))\n\n# mapper = Mapper()\n# m,ax=mapper(T_var, log=True, bnds=bnds, title='$\\sigma^2_T$', units=r'K$^{2}$', cbfrac=cbfrac, cmap=sstcmap, vmin=0.01, vmax=sstvmax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1,linestyle='dashed',transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_Tvar_obs_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# lognorm=colors.SymLogNorm(linthresh=0.03, linscale=0.03, vmin=vmin, vmax=vmax)\n\n# mapper = Mapper()\n# m,ax=mapper(T_o_var_sum, log=True, bnds=bnds, title='sum', units=r'K$^{2}$', cbfrac=cbfrac, cmap=sstcmap, vmin=0.01, vmax=sstvmax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1,linestyle='dashed',transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_Tvar_sum_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# m,ax=mapper(T_o_var_Fo, norm=lognorm, bnds=bnds, title='$\\widetilde{F}_o$', units=r'K$^{2}$', cbfrac=cbfrac, cmap=fieldcmap, vmin=vmin, vmax=vmax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1,linestyle='dashed',transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_Tvar_Fo_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# m,ax=mapper(T_o_var_Fa, norm=lognorm, bnds=bnds, title='$\\widetilde{F}_a$', units=r'K$^{2}$', cbfrac=cbfrac, cmap=fieldcmap, vmin=vmin, vmax=vmax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1,linestyle='dashed',transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_Tvar_Fa_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n\n# m,ax=mapper(sstbar, bnds=bnds, logscale=False, log=False, title=r'$\\overline{T}_o$', units=r'K', cbfrac=cbfrac, cmap=sstcmap, vmin=280, vmax=300)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_sstbar_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(tabar, bnds=bnds, logscale=False, log=False, title=r'$\\overline{T}_a$', units=r'K', cbfrac=cbfrac, cmap=sstcmap, vmin=280, vmax=300)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_tabar_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(F_o_var, bnds=bnds, logscale=False, log=True, title=r'$\\overline{F^{\\,\\prime 2}_o}$', units=r'W$^2$ m$^{-4}$', cbfrac=cbfrac, cmap=sstcmap, vmin=varmin, vmax=varmax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_Fovar_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(F_a_var, bnds=bnds, logscale=False, log=True, title=r'$\\overline{F^{\\,\\prime 2}_a}$', units=r'W$^2$ m$^{-4}$', cbfrac=cbfrac, cmap=sstcmap, vmin=varmin, vmax=varmax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_Favar_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(lambda_o, logscale=False, bnds=bnds, title=r'$\\lambda_{o}$', units=r'W m$^{-2}$ K$^{-1}$', cbfrac=cbfrac, cmap=fieldcmap, vmin=-lambdamax, vmax=lambdamax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_lambda_o_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(lambda_odyn, logscale=False, bnds=bnds, title=r'$\\lambda_{o,dyn}$', units=r'W m$^{-2}$ K$^{-1}$', cbfrac=cbfrac, cmap=fieldcmap, vmin=-lambdamax, vmax=lambdamax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_lambda_odyn_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(lambda_a, logscale=False, bnds=bnds, title=r'$\\lambda_{a}$', units=r'W m$^{-2}$ K$^{-1}$', cbfrac=cbfrac, cmap=fieldcmap, vmin=-lambdamax, vmax=lambdamax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_lambda_a_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(lambda_sa, logscale=False, bnds=bnds, title=r'$\\lambda_{s,a}$', units=r'W m$^{-2}$ K$^{-1}$', cbfrac=cbfrac, cmap=fieldcmap, vmin=-lambdamax, vmax=lambdamax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_lambda_sa_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n# mapper = Mapper()\n# m,ax=mapper(lambda_so, logscale=False, bnds=bnds, title=r'$\\lambda_{s,o}$', units=r'W m$^{-2}$ K$^{-1}$', cbfrac=cbfrac, cmap=fieldcmap, vmin=-lambdamax, vmax=lambdamax)\n# if drawbox:\n# ax.add_patch(mpatches.Rectangle(xy=[x1,y1], width=(x2-x1), height=(y2-y1), facecolor='none', edgecolor='black',linewidth=1, linestyle='dashed', transform=cart.crs.PlateCarree()))\n# plt.savefig(fout + '{:s}_lambda_so_{:1.0f}LP_{:2.0f}Nto{:2.0f}N_detr{:s}.png'.format(dataname, Tn/12., latbounds[0], latbounds[1], str(detr)[0]))\n# plt.close()\n\n\n\n\n","sub_path":"BB98/FH77_varyfilter.py","file_name":"FH77_varyfilter.py","file_ext":"py","file_size_in_byte":46842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"119905005","text":"import chainer\nimport chainer.functions as F\nimport argparse\n\nfrom pathlib import Path\nfrom chainer import serializers\nfrom model import Generator, Discriminator, VGG\nfrom dataset import DatasetLoader\nfrom utils import set_optimizer\nfrom visualize import Visualizer\n\n\nclass ESRGANLossFunction:\n def __init__(self):\n pass\n\n @staticmethod\n def content_loss(y, t):\n return F.mean_absolute_error(y, t)\n\n @staticmethod\n def perceptual_loss(vgg, y, t):\n y_feat = vgg(y)\n t_feat = vgg(t)\n sum_loss = 0\n\n for yf, tf in zip(y_feat, t_feat):\n _, ch, h, w = yf.shape\n sum_loss += F.mean_squared_error(yf, tf) / (ch * h * w)\n\n return sum_loss\n\n @staticmethod\n def dis_hinge_loss(discrminator, y, t):\n fake = discrminator(y)\n real = discrminator(t)\n\n return 0.005 * F.mean(F.relu(1. - real)) + F.mean(F.relu(1. + fake))\n\n @staticmethod\n def gen_hinge_loss(discrminator, y):\n fake = discrminator(y)\n\n return - 0.005 * F.mean(fake)\n\n def __str__(self):\n return f\"func1: {self.content_loss.__name__}\"\n\n\ndef train(epochs,\n iterations,\n batchsize,\n validsize,\n outdir,\n modeldir,\n extension,\n crop_size,\n learning_rate,\n beta1,\n beta2,\n pretrained_epoch,\n content_weight,\n data_path):\n\n # Dataset Definition\n dataloader = DatasetLoader(data_path, extension, crop_size)\n print(dataloader)\n t_valid, x_valid = dataloader(validsize, mode=\"valid\")\n\n # Model & Optimizer Definition\n model = Generator()\n model.to_gpu()\n optimizer = set_optimizer(model, learning_rate, beta1, beta2)\n\n discriminator = Discriminator()\n discriminator.to_gpu()\n dis_opt = set_optimizer(discriminator, learning_rate, beta1, beta2)\n\n vgg = VGG()\n vgg.to_gpu()\n vgg_opt = set_optimizer(vgg, learning_rate, beta1, beta2)\n vgg.base.disable_update()\n\n # Loss Function Definition\n lossfunc = ESRGANLossFunction()\n print(lossfunc)\n\n # Evaluation Definition\n visualizer = Visualizer()\n\n for epoch in range(epochs):\n sum_dis_loss = 0\n sum_gen_loss = 0\n for batch in range(0, iterations, batchsize):\n\n if epoch > pretrained_epoch:\n t_train, x_train = dataloader(batchsize, mode=\"train\")\n\n y_train = model(x_train)\n y_train.unchain_backward()\n dis_loss = lossfunc.dis_hinge_loss(discriminator, y_train, t_train)\n\n discriminator.cleargrads()\n dis_loss.backward()\n dis_opt.update()\n dis_loss.unchain_backward()\n\n sum_dis_loss += dis_loss.data\n\n y_train = model(x_train)\n gen_loss = lossfunc.gen_hinge_loss(discriminator, y_train)\n gen_loss += content_weight * lossfunc.content_loss(y_train, t_train)\n gen_loss += lossfunc.perceptual_loss(vgg, y_train, t_train)\n\n model.cleargrads()\n vgg.cleargrads()\n gen_loss.backward()\n optimizer.update()\n vgg_opt.update()\n gen_loss.unchain_backward()\n\n sum_gen_loss += gen_loss.data\n\n else:\n t_train, x_train = dataloader(batchsize, mode=\"train\")\n y_train = model(x_train)\n\n gen_loss = lossfunc.content_loss(y_train, t_train)\n\n model.cleargrads()\n gen_loss.backward()\n optimizer.update()\n\n sum_gen_loss += gen_loss.data\n\n if batch == 0:\n serializers.save_npz(f\"{modeldir}/model_{epoch}.model\", model)\n\n with chainer.using_config('train', False):\n y_valid = model(x_valid)\n\n x = x_valid.data.get()\n y = y_valid.data.get()\n t = t_valid.data.get()\n\n visualizer(x, y, t, epoch, outdir)\n\n print(f\"epoch: {epoch}\")\n print(f\"dis loss: {sum_dis_loss / iterations} gen loss: {sum_gen_loss / iterations}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"RAM\")\n parser.add_argument('--e', type=int, default=1000, help=\"the number of epochs\")\n parser.add_argument('--i', type=int, default=2000, help=\"the number of iterations\")\n parser.add_argument('--b', type=int, default=16, help=\"batch size\")\n parser.add_argument('--v', type=int, default=12, help=\"valid size\")\n parser.add_argument('--outdir', type=Path, default=\"outdir\", help=\"output directory\")\n parser.add_argument('--modeldir', type=Path, default=\"modeldir\", help=\"model output directory\")\n parser.add_argument('--ext', type=str, default=\".jpg\", help=\"output directory\")\n parser.add_argument('--size', type=int, default=256, help=\"crop size of training images\")\n parser.add_argument('--lr', type=float, default=0.0001, help=\"learning rate of Adam\")\n parser.add_argument('--b1', type=float, default=0.5, help=\"beta1 of Adam\")\n parser.add_argument('--b2', type=float, default=0.999, help=\"beta2 of Adam\")\n parser.add_argument('--pe', type=int, default=1, help=\"switch point of train and pretrain\")\n parser.add_argument('--cw', type=float, default=0.01, help=\"weight of content loss\")\n parser.add_argument('--path', type=Path, help=\"path which contais training images\")\n args = parser.parse_args()\n\n outdir = args.outdir\n outdir.mkdir(exist_ok=True)\n\n modeldir = args.modeldir\n modeldir.mkdir(exist_ok=True)\n\n train(args.e, args.i, args.b, args.v, outdir, modeldir, args.ext, args.size,\n args.lr, args.b1, args.b2, args.pe, args.cw, args.path)\n","sub_path":"ESRGAN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"54143601","text":"# coding: utf-8\n\n\"\"\"\n Talon.One API\n\n Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you access the Campaign Manager at `https://yourbaseurl.talon.one/`, the URL for the [updateCustomerSessionV2](https://docs.talon.one/integration-api#operation/updateCustomerSessionV2) endpoint is `https://yourbaseurl.talon.one/v2/customer_sessions/{Id}` # noqa: E501\n\n The version of the OpenAPI document: \n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom talon_one.configuration import Configuration\n\n\nclass Attribute(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'id': 'int',\n 'created': 'datetime',\n 'account_id': 'int',\n 'entity': 'str',\n 'event_type': 'str',\n 'name': 'str',\n 'title': 'str',\n 'type': 'str',\n 'description': 'str',\n 'suggestions': 'list[str]',\n 'has_allowed_list': 'bool',\n 'restricted_by_suggestions': 'bool',\n 'editable': 'bool',\n 'subscribed_applications_ids': 'list[int]',\n 'subscribed_catalogs_ids': 'list[int]',\n 'allowed_subscriptions': 'list[str]',\n 'event_type_id': 'int'\n }\n\n attribute_map = {\n 'id': 'id',\n 'created': 'created',\n 'account_id': 'accountId',\n 'entity': 'entity',\n 'event_type': 'eventType',\n 'name': 'name',\n 'title': 'title',\n 'type': 'type',\n 'description': 'description',\n 'suggestions': 'suggestions',\n 'has_allowed_list': 'hasAllowedList',\n 'restricted_by_suggestions': 'restrictedBySuggestions',\n 'editable': 'editable',\n 'subscribed_applications_ids': 'subscribedApplicationsIds',\n 'subscribed_catalogs_ids': 'subscribedCatalogsIds',\n 'allowed_subscriptions': 'allowedSubscriptions',\n 'event_type_id': 'eventTypeId'\n }\n\n def __init__(self, id=None, created=None, account_id=None, entity=None, event_type=None, name=None, title=None, type=None, description=None, suggestions=None, has_allowed_list=False, restricted_by_suggestions=False, editable=None, subscribed_applications_ids=None, subscribed_catalogs_ids=None, allowed_subscriptions=None, event_type_id=None, local_vars_configuration=None): # noqa: E501\n \"\"\"Attribute - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._id = None\n self._created = None\n self._account_id = None\n self._entity = None\n self._event_type = None\n self._name = None\n self._title = None\n self._type = None\n self._description = None\n self._suggestions = None\n self._has_allowed_list = None\n self._restricted_by_suggestions = None\n self._editable = None\n self._subscribed_applications_ids = None\n self._subscribed_catalogs_ids = None\n self._allowed_subscriptions = None\n self._event_type_id = None\n self.discriminator = None\n\n self.id = id\n self.created = created\n self.account_id = account_id\n self.entity = entity\n if event_type is not None:\n self.event_type = event_type\n self.name = name\n self.title = title\n self.type = type\n self.description = description\n self.suggestions = suggestions\n if has_allowed_list is not None:\n self.has_allowed_list = has_allowed_list\n if restricted_by_suggestions is not None:\n self.restricted_by_suggestions = restricted_by_suggestions\n self.editable = editable\n if subscribed_applications_ids is not None:\n self.subscribed_applications_ids = subscribed_applications_ids\n if subscribed_catalogs_ids is not None:\n self.subscribed_catalogs_ids = subscribed_catalogs_ids\n if allowed_subscriptions is not None:\n self.allowed_subscriptions = allowed_subscriptions\n if event_type_id is not None:\n self.event_type_id = event_type_id\n\n @property\n def id(self):\n \"\"\"Gets the id of this Attribute. # noqa: E501\n\n Internal ID of this entity. # noqa: E501\n\n :return: The id of this Attribute. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this Attribute.\n\n Internal ID of this entity. # noqa: E501\n\n :param id: The id of this Attribute. # noqa: E501\n :type: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def created(self):\n \"\"\"Gets the created of this Attribute. # noqa: E501\n\n The time this entity was created. # noqa: E501\n\n :return: The created of this Attribute. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._created\n\n @created.setter\n def created(self, created):\n \"\"\"Sets the created of this Attribute.\n\n The time this entity was created. # noqa: E501\n\n :param created: The created of this Attribute. # noqa: E501\n :type: datetime\n \"\"\"\n if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501\n raise ValueError(\"Invalid value for `created`, must not be `None`\") # noqa: E501\n\n self._created = created\n\n @property\n def account_id(self):\n \"\"\"Gets the account_id of this Attribute. # noqa: E501\n\n The ID of the account that owns this entity. # noqa: E501\n\n :return: The account_id of this Attribute. # noqa: E501\n :rtype: int\n \"\"\"\n return self._account_id\n\n @account_id.setter\n def account_id(self, account_id):\n \"\"\"Sets the account_id of this Attribute.\n\n The ID of the account that owns this entity. # noqa: E501\n\n :param account_id: The account_id of this Attribute. # noqa: E501\n :type: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and account_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `account_id`, must not be `None`\") # noqa: E501\n\n self._account_id = account_id\n\n @property\n def entity(self):\n \"\"\"Gets the entity of this Attribute. # noqa: E501\n\n The name of the entity that can have this attribute. When creating or updating the entities of a given type, you can include an `attributes` object with keys corresponding to the `name` of the custom attributes for that type. # noqa: E501\n\n :return: The entity of this Attribute. # noqa: E501\n :rtype: str\n \"\"\"\n return self._entity\n\n @entity.setter\n def entity(self, entity):\n \"\"\"Sets the entity of this Attribute.\n\n The name of the entity that can have this attribute. When creating or updating the entities of a given type, you can include an `attributes` object with keys corresponding to the `name` of the custom attributes for that type. # noqa: E501\n\n :param entity: The entity of this Attribute. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and entity is None: # noqa: E501\n raise ValueError(\"Invalid value for `entity`, must not be `None`\") # noqa: E501\n allowed_values = [\"Account\", \"Application\", \"Campaign\", \"CustomerProfile\", \"CustomerSession\", \"CartItem\", \"Coupon\", \"Event\", \"Giveaway\", \"Referral\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and entity not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `entity` ({0}), must be one of {1}\" # noqa: E501\n .format(entity, allowed_values)\n )\n\n self._entity = entity\n\n @property\n def event_type(self):\n \"\"\"Gets the event_type of this Attribute. # noqa: E501\n\n\n :return: The event_type of this Attribute. # noqa: E501\n :rtype: str\n \"\"\"\n return self._event_type\n\n @event_type.setter\n def event_type(self, event_type):\n \"\"\"Sets the event_type of this Attribute.\n\n\n :param event_type: The event_type of this Attribute. # noqa: E501\n :type: str\n \"\"\"\n\n self._event_type = event_type\n\n @property\n def name(self):\n \"\"\"Gets the name of this Attribute. # noqa: E501\n\n The attribute name that will be used in API requests and Talang. E.g. if `name == \\\"region\\\"` then you would set the region attribute by including an `attributes.region` property in your request payload. # noqa: E501\n\n :return: The name of this Attribute. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this Attribute.\n\n The attribute name that will be used in API requests and Talang. E.g. if `name == \\\"region\\\"` then you would set the region attribute by including an `attributes.region` property in your request payload. # noqa: E501\n\n :param name: The name of this Attribute. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n name is not None and not re.search(r'^[A-Za-z]\\w*$', name)): # noqa: E501\n raise ValueError(r\"Invalid value for `name`, must be a follow pattern or equal to `/^[A-Za-z]\\w*$/`\") # noqa: E501\n\n self._name = name\n\n @property\n def title(self):\n \"\"\"Gets the title of this Attribute. # noqa: E501\n\n The human-readable name for the attribute that will be shown in the Campaign Manager. Like `name`, the combination of entity and title must also be unique. # noqa: E501\n\n :return: The title of this Attribute. # noqa: E501\n :rtype: str\n \"\"\"\n return self._title\n\n @title.setter\n def title(self, title):\n \"\"\"Sets the title of this Attribute.\n\n The human-readable name for the attribute that will be shown in the Campaign Manager. Like `name`, the combination of entity and title must also be unique. # noqa: E501\n\n :param title: The title of this Attribute. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and title is None: # noqa: E501\n raise ValueError(\"Invalid value for `title`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n title is not None and not re.search(r'^[A-Za-z][A-Za-z0-9_.!~*\\'() -]*$', title)): # noqa: E501\n raise ValueError(r\"Invalid value for `title`, must be a follow pattern or equal to `/^[A-Za-z][A-Za-z0-9_.!~*'() -]*$/`\") # noqa: E501\n\n self._title = title\n\n @property\n def type(self):\n \"\"\"Gets the type of this Attribute. # noqa: E501\n\n The data type of the attribute, a `time` attribute must be sent as a string that conforms to the [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) timestamp format. # noqa: E501\n\n :return: The type of this Attribute. # noqa: E501\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n \"\"\"Sets the type of this Attribute.\n\n The data type of the attribute, a `time` attribute must be sent as a string that conforms to the [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) timestamp format. # noqa: E501\n\n :param type: The type of this Attribute. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n allowed_values = [\"string\", \"number\", \"boolean\", \"time\", \"(list string)\", \"(list number)\", \"(list time)\", \"location\", \"(list location)\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\" # noqa: E501\n .format(type, allowed_values)\n )\n\n self._type = type\n\n @property\n def description(self):\n \"\"\"Gets the description of this Attribute. # noqa: E501\n\n A description of this attribute. # noqa: E501\n\n :return: The description of this Attribute. # noqa: E501\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Sets the description of this Attribute.\n\n A description of this attribute. # noqa: E501\n\n :param description: The description of this Attribute. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description\n\n @property\n def suggestions(self):\n \"\"\"Gets the suggestions of this Attribute. # noqa: E501\n\n A list of suggestions for the attribute. # noqa: E501\n\n :return: The suggestions of this Attribute. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._suggestions\n\n @suggestions.setter\n def suggestions(self, suggestions):\n \"\"\"Sets the suggestions of this Attribute.\n\n A list of suggestions for the attribute. # noqa: E501\n\n :param suggestions: The suggestions of this Attribute. # noqa: E501\n :type: list[str]\n \"\"\"\n if self.local_vars_configuration.client_side_validation and suggestions is None: # noqa: E501\n raise ValueError(\"Invalid value for `suggestions`, must not be `None`\") # noqa: E501\n\n self._suggestions = suggestions\n\n @property\n def has_allowed_list(self):\n \"\"\"Gets the has_allowed_list of this Attribute. # noqa: E501\n\n Whether or not this attribute has an allowed list of values associated with it. # noqa: E501\n\n :return: The has_allowed_list of this Attribute. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._has_allowed_list\n\n @has_allowed_list.setter\n def has_allowed_list(self, has_allowed_list):\n \"\"\"Sets the has_allowed_list of this Attribute.\n\n Whether or not this attribute has an allowed list of values associated with it. # noqa: E501\n\n :param has_allowed_list: The has_allowed_list of this Attribute. # noqa: E501\n :type: bool\n \"\"\"\n\n self._has_allowed_list = has_allowed_list\n\n @property\n def restricted_by_suggestions(self):\n \"\"\"Gets the restricted_by_suggestions of this Attribute. # noqa: E501\n\n Whether or not this attribute's value is restricted by suggestions (`suggestions` property) or by an allowed list of value (`hasAllowedList` property). # noqa: E501\n\n :return: The restricted_by_suggestions of this Attribute. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._restricted_by_suggestions\n\n @restricted_by_suggestions.setter\n def restricted_by_suggestions(self, restricted_by_suggestions):\n \"\"\"Sets the restricted_by_suggestions of this Attribute.\n\n Whether or not this attribute's value is restricted by suggestions (`suggestions` property) or by an allowed list of value (`hasAllowedList` property). # noqa: E501\n\n :param restricted_by_suggestions: The restricted_by_suggestions of this Attribute. # noqa: E501\n :type: bool\n \"\"\"\n\n self._restricted_by_suggestions = restricted_by_suggestions\n\n @property\n def editable(self):\n \"\"\"Gets the editable of this Attribute. # noqa: E501\n\n Whether or not this attribute can be edited. # noqa: E501\n\n :return: The editable of this Attribute. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._editable\n\n @editable.setter\n def editable(self, editable):\n \"\"\"Sets the editable of this Attribute.\n\n Whether or not this attribute can be edited. # noqa: E501\n\n :param editable: The editable of this Attribute. # noqa: E501\n :type: bool\n \"\"\"\n if self.local_vars_configuration.client_side_validation and editable is None: # noqa: E501\n raise ValueError(\"Invalid value for `editable`, must not be `None`\") # noqa: E501\n\n self._editable = editable\n\n @property\n def subscribed_applications_ids(self):\n \"\"\"Gets the subscribed_applications_ids of this Attribute. # noqa: E501\n\n A list of the IDs of the applications where this attribute is available. # noqa: E501\n\n :return: The subscribed_applications_ids of this Attribute. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._subscribed_applications_ids\n\n @subscribed_applications_ids.setter\n def subscribed_applications_ids(self, subscribed_applications_ids):\n \"\"\"Sets the subscribed_applications_ids of this Attribute.\n\n A list of the IDs of the applications where this attribute is available. # noqa: E501\n\n :param subscribed_applications_ids: The subscribed_applications_ids of this Attribute. # noqa: E501\n :type: list[int]\n \"\"\"\n\n self._subscribed_applications_ids = subscribed_applications_ids\n\n @property\n def subscribed_catalogs_ids(self):\n \"\"\"Gets the subscribed_catalogs_ids of this Attribute. # noqa: E501\n\n A list of the IDs of the catalogs where this attribute is available. # noqa: E501\n\n :return: The subscribed_catalogs_ids of this Attribute. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._subscribed_catalogs_ids\n\n @subscribed_catalogs_ids.setter\n def subscribed_catalogs_ids(self, subscribed_catalogs_ids):\n \"\"\"Sets the subscribed_catalogs_ids of this Attribute.\n\n A list of the IDs of the catalogs where this attribute is available. # noqa: E501\n\n :param subscribed_catalogs_ids: The subscribed_catalogs_ids of this Attribute. # noqa: E501\n :type: list[int]\n \"\"\"\n\n self._subscribed_catalogs_ids = subscribed_catalogs_ids\n\n @property\n def allowed_subscriptions(self):\n \"\"\"Gets the allowed_subscriptions of this Attribute. # noqa: E501\n\n A list of allowed subscription types for this attribute. **Note:** This only applies to attributes associated with the `CartItem` entity. # noqa: E501\n\n :return: The allowed_subscriptions of this Attribute. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._allowed_subscriptions\n\n @allowed_subscriptions.setter\n def allowed_subscriptions(self, allowed_subscriptions):\n \"\"\"Sets the allowed_subscriptions of this Attribute.\n\n A list of allowed subscription types for this attribute. **Note:** This only applies to attributes associated with the `CartItem` entity. # noqa: E501\n\n :param allowed_subscriptions: The allowed_subscriptions of this Attribute. # noqa: E501\n :type: list[str]\n \"\"\"\n allowed_values = [\"application\", \"catalog\"] # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n not set(allowed_subscriptions).issubset(set(allowed_values))): # noqa: E501\n raise ValueError(\n \"Invalid values for `allowed_subscriptions` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(allowed_subscriptions) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._allowed_subscriptions = allowed_subscriptions\n\n @property\n def event_type_id(self):\n \"\"\"Gets the event_type_id of this Attribute. # noqa: E501\n\n\n :return: The event_type_id of this Attribute. # noqa: E501\n :rtype: int\n \"\"\"\n return self._event_type_id\n\n @event_type_id.setter\n def event_type_id(self, event_type_id):\n \"\"\"Sets the event_type_id of this Attribute.\n\n\n :param event_type_id: The event_type_id of this Attribute. # noqa: E501\n :type: int\n \"\"\"\n\n self._event_type_id = event_type_id\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Attribute):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, Attribute):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"talon_one/models/attribute.py","file_name":"attribute.py","file_ext":"py","file_size_in_byte":22940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"648473006","text":"from bokeh.embed import components\nfrom bokeh.plotting import figure, curdoc, ColumnDataSource\nfrom bokeh.resources import INLINE\nfrom bokeh.util.string import encode_utf8\nfrom bokeh.models import CustomJS, LabelSet, Slider\nfrom bokeh.models.widgets import Slider\nfrom bokeh.models.layouts import WidgetBox, Row\nfrom bokeh.layouts import row, widgetbox\nfrom werkzeug import secure_filename\nfrom flask import Flask, render_template, flash, request, redirect, url_for, session\nfrom wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\nimport os\nfrom forms import *\nimport pandas as pd\nimport plotly\nimport plotly.plotly as py\nimport json\nimport numpy as np\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\n\nDEBUG = True\napp = Flask(__name__)\t#initialising flask\napp.config.from_object(__name__)\t#configuring flask\napp.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'\n\nCURRENT_YEAR = '2015'\n\n\ndata = pd.read_csv(\"test_data.csv\")\ndf = pd.DataFrame(data)\nexternal = df['Normalised_x'].tolist()\ninternal = df['Normalised_y'].tolist()\nnames = df['Country'].tolist()\n\ndata_mod = pd.read_excel('final.xlsx', sheet_name=CURRENT_YEAR)\ndf1 = pd.DataFrame(data_mod)\next = df1['External'].tolist()\nint = df1['Internal'].tolist()\nname = df1['Country'].tolist()\ncode = df1['Code'].tolist()\n\nmanip_name = [\"India\", \"Belgium\", \"England\"]\nmanip_y = [0.3,0.5,0.5]\n\nsource = ColumnDataSource(data=dict(external=ext, internal=int, names=name, c1=manip_y, c2=manip_name))\n\nUPLOAD_FOLDER = '/static/internal/'\nALLOWED_EXTENSIONS = set(['csv'])\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\nplotly.tools.set_credentials_file(username='rahulkumaran', api_key='04p6710F0Pcs8tmwLuSf')\n\n'''def callback(attr, old, new):\n data = source.data\n val = cb_obj.year.value\n x, y, names = ext, int, name\n for i in range(0,len(name)):\n if(name[i] in manip_name):\n y[i] = manip_y[manip_name.index(name[i])]\n\n print(\"here\")\n source.change.emit()'''\n\napp = Flask(__name__)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods = ['GET', 'POST'])\ndef index():\n\tif request.method == 'POST':\n # check if the post request has the file part\n \tif 'file' not in request.files:\n \t\tflash('No file part')\n \t\treturn redirect(request.url)\n \tfile = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n \tif file.filename == '':\n \t\tflash('No selected file')\n \t\treturn redirect(request.url)\n \tif file and allowed_file(file.filename):\n \t\tfilename = secure_filename(file.filename)\n \t\tfile.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n \t\treturn redirect(url_for('visualize'))\n\n\treturn render_template('upload.html')\n\n@app.route(\"/relative-market-attractive-index-heatmap\", methods=['GET', 'POST'])\ndef rmai():\n data = [ dict(\n type = 'choropleth',\n locations = code,\n z = ext,\n text = name,\n colorscale = [[0,\"rgb(5, 10, 172)\"],[0.35,\"rgb(40, 60, 190)\"],[0.5,\"rgb(70, 100, 245)\"],\\\n [0.6,\"rgb(90, 120, 245)\"],[0.7,\"rgb(106, 137, 247)\"],[1,\"rgb(220, 220, 220)\"]],\n autocolorscale = False,\n reversescale = True,\n marker = dict(\n line = dict (\n color = 'rgb(180,180,180)',\n width = 0.5\n ) ),\n colorbar = dict(\n autotick = False,\n tickprefix = 'Relative',\n title = 'Market
Attractive Index'),\n ) ]\n\n layout = dict(\n title = 'Relative Market Attractive Index Heatmap',\n geo = dict(\n showframe = False,\n showcoastlines = False,\n projection = dict(\n type = 'Mercator'\n )\n )\n )\n\n return render_template('heatmap.html')\n\n@app.route(\"/visualize\",methods=['GET','POST'])\ndef visualize():\n js_resources = INLINE.render_js()\n css_resources = INLINE.render_css()\n callback = CustomJS(args=dict(source=source), code=\"\"\"\n var data = source.data;\n\t\tvar val = year.value;\n var get_name = data['c2'];\n var get_y = data[c1];\n\t\tvar x = data['external'];\n\t\tvar y = data['internal'];\n var name = data['names'];\n\t\tfor (var i = 0; i < x.length; i++) {\n if(get_name.includes(name[i])){\n y[i] = y[i] + get_y[(get_name.indexOf(name[i]))];\n }\n\t\t}\n\t\tsource.change.emit();\n\t\"\"\")\n\n fig = figure(plot_width=1000, plot_height=600)\n fig.scatter('external', 'internal', source=source, marker=\"circle\", size=5,line_color=\"navy\", fill_color=\"green\", alpha=0.6)\n fig.xaxis[0].axis_label = 'External Index'\n fig.xaxis[0].axis_label = 'Internal Index'\n labels = LabelSet(x='external', y='internal', text='names', level='glyph', x_offset=5, y_offset=5, source=source, render_mode='canvas')\n fig.add_layout(labels)\n year = Slider(title=\"Year \", value=2000, start=1992, end=2030, step=1, width=250, callback=callback)\n callback.args[\"year\"] = year\n\n\n layout = row(\n fig,\n widgetbox(year),\n )\n\n script, div = components(layout)\n html = render_template(\n \t'index.html',\n \tlayout_script=script,\n \tlayout_div=div,\n \tjs_resources=js_resources,\n \tcss_resources=css_resources,\n )\n\n return encode_utf8(html)\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"557190980","text":"import unittest\nfrom selenium import webdriver\nfrom pageObjects.loginpage import LoginPage\nfrom pageObjects.notifications_page import Notific\n\n\n\n\nclass SendNotify(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.maximize_window()\n self.driver.get(\"http://brnd.stage.devlabs.me/notifications/\")\n\n def test_product_custom(self):\n ent = LoginPage(self.driver)\n ent.enter_the_site()\n note = Notific(self.driver)\n note.send_notif()\n note.is_notif_send()\n\n\n def tearDown(self):\n\n self.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/Test_9_send_notif.py","file_name":"Test_9_send_notif.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"442478210","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n\n # Brute Force\n # for i in range(len(nums)):\n # if nums[i] == target:\n # return i\n # return -1\n\n # Binary Search\n size = len(nums)\n if size == 0:\n return -1\n\n left = 0\n right = size - 1\n while left < right:\n\t #注意,这里选用的是左中位数\n mid = left + (right-left)//2\n #左半部分有序\n if nums[left] < nums[mid]:\n if nums[left] <= target <= nums[mid]:\n right = mid\n else:\n left = mid + 1\n #右半部分有序\n else:\n\t\t #为了使这里left和right的更新和上面一样,所以使用nums[mid+1]\n if nums[mid+1] <= target <= nums[right]:\n left = mid + 1 \n else:\n right = mid\n # 后处理\n return left if nums[left] == target else -1","sub_path":"Week_04/Algo/search-in-rotated-sorted-array.py","file_name":"search-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"97970147","text":"#game_of_eights() function goes here\ndef game_of_eights(a_list):\n for x, value in enumerate(a_list):\n if x+1 < len(a_list):\n if int(a_list[x]) == 8 and int(a_list[x+1]) == 8:\n return True\n return False\n\ndef main():\n a_list = input(\"Enter elements of list separated by commas: \").split(',')\n # remainder of main() goes here\n letterCheck = [letter for letter in a_list if letter.isalpha()]\n if len(letterCheck) > 0:\n print(\"Error. Please enter only integers.\")\n else:\n print(game_of_eights(a_list))\nmain()","sub_path":"Assignment12/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"316924482","text":"class Solution(object):\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n res = [1 for i in range(n)]\n a, b, c = 0, 0, 0\n for i in range(1, n):\n temp = min(res[a]*2, res[b]*3, res[c] * 5)\n res[i] = temp\n if temp == res[a]*2:\n a += 1\n if temp == res[b]*3:\n b += 1\n if temp == res[c]*5:\n c += 1\n \n return res[-1]\n\n\nx = Solution()\nprint(x.nthUglyNumber(100))","sub_path":"DP/264.py","file_name":"264.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"524576682","text":"# The MIT License (MIT)\n\n# Copyright (c) 2015 Joel Robichaud\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nif __name__ == \"__main__\":\n import sys\n import convert\n\n converter = convert.AudioToSheetMusicConverter()\n\n if len(sys.argv) < 2:\n import wx\n import gui\n\n # Launch the graphic user interface if no command-line arguments are supplied\n app = wx.App(False)\n frame = gui.MainFrame(converter)\n app.MainLoop()\n else:\n import os\n import argparse\n\n # Parse command-line arguments\n parser = argparse.ArgumentParser(description=\"convert polyphonic multi-track audio to sheet music\")\n parser.add_argument(\"input\", metavar=\"INPUT\", type=str, nargs=\"+\", help=\"input file(s) path(s)\")\n parser.add_argument(\"--output\", type=str, nargs=1, help=\"output file path (without extension)\")\n\n args = parser.parse_args(sys.argv[1:])\n\n # Expand path arguments into absolute paths\n input = [os.path.abspath(filename) for filename in args.input if os.path.exists(filename)]\n output = args.output[0] if args.output else \"output\"\n output = os.path.abspath(output)\n\n # Convert input files and output the result\n for progress in converter.convert(input, output): continue\n os.remove(output)\n","sub_path":"polyscribe.py","file_name":"polyscribe.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"143975870","text":"import json\nimport bokeh\nimport rioxarray\nimport numpy as np\nimport panel as pn\nimport xarray as xr\nimport geoviews as gv\nimport geopandas as gpd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport deafrica_tools.app.widgetconstructors as deawidgets\n\nfrom io import BytesIO\nfrom traitlets import Unicode\nfrom IPython.display import display\nfrom deafrica_tools.spatial import xr_rasterize\nfrom deafrica_tools.spatial import reverse_geocode\nfrom deafrica_tools.dask import create_local_dask_cluster\nfrom ipywidgets import GridspecLayout, Button, Layout, HBox, VBox, HTML, Output\nfrom ipyleaflet import (\n WMSLayer,\n basemaps,\n basemap_to_tiles,\n Map,\n DrawControl,\n WidgetControl,\n LayerGroup,\n LayersControl,\n GeoData,\n)\n\n# Load the bokeh extension.\ngv.extension(\"bokeh\", logo=False)\n\n# Turn off all warnings.\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\nwarnings.simplefilter(\"ignore\")\n\n\ndef make_box_layout():\n \"\"\"\n Defines a number of CSS properties that impact how a widget is laid out.\n \"\"\"\n return Layout( # border='solid 1px black',\n margin=\"0px 10px 10px 0px\",\n padding=\"5px 5px 5px 5px\",\n width=\"100%\",\n height=\"100%\",\n )\n\n\ndef create_expanded_button(description, button_style):\n \"\"\"\n Defines a number of CSS properties to create a button to handle mouse clicks.\n \"\"\"\n return Button(\n description=description,\n button_style=button_style,\n layout=Layout(width=\"auto\", height=\"auto\"),\n )\n\n\ndef update_map_layers(self):\n \"\"\"\n Updates map widget to add new basemap when selected\n using menu options.\n \"\"\"\n # Clear data load parameters to trigger data reload.\n self.gfclayer_ds = None\n\n # Remove all layers from the map_layers Layers Group.\n self.map_layers.clear_layers()\n # Add the selected basemap to the layer Group.\n self.map_layers.add_layer(self.basemap)\n\n\ndef load_gfclayer(self):\n \"\"\"\n Loads the selected Global Forest Change layer for the\n area drawn on the map widget.\n \"\"\"\n # Configure local dask cluster.\n client = create_local_dask_cluster(return_client=True, display_client=True)\n\n # Get the coordinates of the top-left corner for each Global Forest Change tile,\n # covering the area of interest.\n min_lat, max_lat = (\n self.gdf_drawn.bounds.miny.item(),\n self.gdf_drawn.bounds.maxy.item(),\n )\n min_lon, max_lon = (\n self.gdf_drawn.bounds.minx.item(),\n self.gdf_drawn.bounds.maxx.item(),\n )\n\n lats = np.arange(\n np.floor(min_lat / 10) * 10, np.ceil(max_lat / 10) * 10, 10\n ).astype(int)\n lons = np.arange(\n np.floor(min_lon / 10) * 10, np.ceil(max_lon / 10) * 10, 10\n ).astype(int)\n\n coord_list = []\n for lat in lats:\n lat = lat + 10\n if lat >= 0:\n lat_str = f\"{lat:02d}N\"\n else:\n lat_str = f\"{abs(lat):02d}S\"\n for lon in lons:\n if lon >= 0:\n lon_str = f\"{lon:03d}E\"\n else:\n lon_str = f\"{abs(lon):03d}W\"\n coord_str = f\"{lat_str}_{lon_str}\"\n coord_list.append(coord_str)\n\n # Load each Global Forest Change tile covering the area of interest.\n base_url = f\"https://storage.googleapis.com/earthenginepartners-hansen/GFC-2021-v1.9/Hansen_GFC-2021-v1.9_{self.gfclayer}_\"\n dask_chunks = dict(x=2048, y=2048)\n\n tile_list = []\n for coord in coord_list:\n tile_url = f\"{base_url}{coord}.tif\"\n # Load the tile as an xarray.DataArray.\n tile = rioxarray.open_rasterio(tile_url, chunks=dask_chunks).squeeze()\n tile_list.append(tile)\n\n # Merge the tiles into a single xarray.DataArray.\n ds = xr.combine_by_coords(tile_list)\n # Clip the dataset using the bounds of the area of interest.\n ds = ds.rio.clip_box(\n minx=min_lon - 0.00025,\n miny=min_lat - 0.00025,\n maxx=max_lon + 0.00025,\n maxy=max_lat + 0.00025,\n )\n # Rename the y and x variables for DEA convention on xarray.DataArrays where crs=\"EPSG:4326\".\n ds = ds.rename({\"y\": \"latitude\", \"x\": \"longitude\"})\n\n # Mask pixels representing no loss (encoded as 0) in the \"lossyear\" layer.\n if self.gfclayer == \"lossyear\":\n ds = ds.where(ds != 0)\n # Mask pixels representing no gain (encoded as 0) in the \"gain\" layer. \n elif self.gfclayer == \"gain\":\n ds = ds.where(ds != 0)\n\n # Create a mask from the area of interest GeoDataFrame.\n mask = xr_rasterize(self.gdf_drawn, ds)\n # Mask the dataset.\n ds = ds.where(mask)\n # Convert the xarray.DataArray to a dataset.\n ds = ds.to_dataset(name=self.gfclayer)\n # Compute.\n ds = ds.compute()\n # Assign the \"EPSG:4326\" CRS to the dataset.\n ds.rio.write_crs(4326, inplace=True)\n ds = ds.transpose(\"latitude\", \"longitude\")\n # Close down the dask client.\n client.close()\n return ds\n\n\ndef get_basemap(basemap_url):\n \"\"\"\n Gets the geoviews tile to use as a basemap based on a url\n \"\"\"\n if basemap_url == \"https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png\":\n basemap = gv.tile_sources.OSM\n elif (\n basemap_url\n == \"http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}\"\n ):\n basemap = gv.tile_sources.EsriUSATopo\n\n return basemap\n\n\ndef plot_gfclayer(self):\n \n # Create the basemap.\n plot_basemap = get_basemap(self.basemap.url).opts(height=500, width=700)\n \n # Pass the ds xarray.Dataset to hv.Dataset\n # to create and object called \"dataset.\"\n dataset = gv.Dataset(\n data=self.gfclayer_ds,\n kdims=list(self.gfclayer_ds.dims),\n vdims=self.gfclayer,\n )\n\n if self.gfclayer == \"gain\":\n # Color map to use to plot. \n cmap = \"Greens\"\n # Ticks to be displayed on the colorbar.\n ticks = [1]\n ticker = bokeh.models.FixedTicker(ticks=ticks)\n # Ticklabels for the displayed ticks on the colorbar.\n ticklabels = [\"gain\"]\n major_label_overrides = dict(zip(ticks, ticklabels))\n \n # Pass the dataset to gv.image to create an object called \"image\" which is\n # an image element.\n # Elements are the simplest viewable components in HoloViews/GeoViews.\n image = dataset.to(gv.Image).opts(\n colorbar=True,\n cmap=cmap,\n title=f\"Forest cover gain during the period 2000–2012\",\n clabel=\"Global forest cover gain 2000–2012\",\n colorbar_opts={\n \"ticker\": ticker,\n \"major_label_overrides\": major_label_overrides,\n },\n height=500,\n width=700,\n )\n \n if self.gfclayer == \"treecover2000\":\n # Color map to use to plot.\n cmap = \"Greens\"\n image = dataset.to(gv.Image).opts(\n colorbar=True,\n cmap=cmap,\n title=f\"Tree cover in the year 2000\",\n clabel=\"Percentage tree canopy cover for the year 2000\",\n height=500,\n width=700,\n )\n \n if self.gfclayer == \"lossyear\":\n # Number of years from 2000 represented in the GFC lossyear.\n no_years = 21\n # Color map to use to plot.\n cmap = plt.get_cmap(name=\"gist_rainbow_r\", lut=no_years)\n color_list = color_list = [mcolors.rgb2hex(cmap(i)[:3]) for i in range(cmap.N)]\n # Location of transition from one color to the next.\n color_levels = list(np.arange(1 - 0.5, no_years + 1, 1))\n # Ticks to be displayed on the colorbar.\n ticks = list(range(1, 1 + no_years, 1))\n ticker = bokeh.models.FixedTicker(ticks=ticks)\n # Ticklabels for the displayed ticks on the colorbar.\n ticklabels = [str(i) for i in range(2001, 2001 + no_years, 1)]\n major_label_overrides = dict(zip(ticks, ticklabels))\n\n # Pass the dataset to gv.image to create an object called \"image\" which is\n # an image element.\n # Elements are the simplest viewable components in HoloViews/GeoViews.\n image = dataset.to(gv.Image).opts(\n colorbar=True,\n cmap=color_list,\n color_levels=color_levels,\n title=f\"Forest loss during the period 2000–20{no_years}\",\n clabel=\"Year of gross forest cover loss event\",\n colorbar_opts={\n \"ticker\": ticker,\n \"major_label_overrides\": major_label_overrides,\n },\n height=500,\n width=700,\n )\n \n # Overlays are a collection of HoloViews objects to be displayed overlaid\n # on one another with the same axes.\n # Overlays are containers created by using the * operator on elements.\n overlay = plot_basemap * image\n # Convert the geoviews object to a displayable pane.\n map_pane = pn.panel(overlay)\n # Convert the pane to an ipywidget.\n map_widget = pn.ipywidget(map_pane)\n map_widget.layout = make_box_layout()\n return map_widget\n\n\nclass forest_monitoring_app(HBox):\n def __init__(self):\n super().__init__()\n\n ##################\n # HEADER FOR APP #\n ##################\n\n # Create the header widget.\n header_title_text = \"

Digital Earth Africa Forest Change

\"\n instruction_text = \"\"\"

Select the desired Global Forest Change layer, then zoom in and draw a polygon to\n select an area for which to plot the selected Global Forest Change layer.

\"\"\"\n self.header = deawidgets.create_html(\n value=f\"{header_title_text}{instruction_text}\"\n )\n self.header.layout = make_box_layout()\n\n ############################\n # WIDGETS FOR APP CONTROLS #\n ############################\n\n ## Selection widget for selecting the basemap to use for the map widget.\n ## and when plotting the Global Forest Change Layer.\n # Basemaps available for selection for the map widget.\n self.basemap_list = [\n (\"Open Street Map\", basemap_to_tiles(basemaps.OpenStreetMap.Mapnik)),\n (\"ESRI World Imagery\", basemap_to_tiles(basemaps.Esri.WorldImagery)),\n ]\n # Set the default basemap to be used for the map widget / initial value for the widget.\n self.basemap = self.basemap_list[0][1]\n # Dropdown selection widget.\n dropdown_basemap = deawidgets.create_dropdown(\n options=self.basemap_list, value=self.basemap\n )\n # Register the update function to run when a new value is selected\n # on the dropdown_basemap widget.\n dropdown_basemap.observe(self.update_basemap, \"value\")\n # Text to accompany the dropdown selection widget.\n basemap_selection_html = deawidgets.create_html(\n value=f\"
Map overlay:\"\n )\n # Combine the basemap_selection_html text and the dropdown_basemap widget in a single container.\n basemap_selection = VBox([basemap_selection_html, dropdown_basemap])\n\n ## Selection widget for selecting the Global Forest change layer to plot.\n # Global Forest Change layers available plotting.\n self.gfclayers_list = [\n (\"Year of gross forest cover loss event\", \"lossyear\"),\n (\"Global forest cover gain 2000–2012\", \"gain\"),\n (\"Tree canopy cover for the year 2000\", \"treecover2000\")\n ]\n # Set the default GFC layer to be plotted / initial value for the widget.\n self.gfclayer = self.gfclayers_list[0][1]\n # Set the initial parameter for the GFC layer dataset.\n self.gfclayer_ds = None\n # Dropdown selection widget.\n dropdown_gfclayer = deawidgets.create_dropdown(\n options=self.gfclayers_list, value=self.gfclayer\n )\n # Register the update function to run when a new value is selected\n # on the dropdown_gfclayer widget.\n dropdown_gfclayer.observe(self.update_gfclayer, \"value\")\n # Text to accompany the dropdown selection widget.\n gfclayer_selection_html = deawidgets.create_html(\n value=f\"
Global Forest Change Layer:\"\n )\n # Combine the gfclayer_selection_html text and the dropdown_gfclayer widget in a single container.\n gfclayer_selection = VBox([gfclayer_selection_html, dropdown_gfclayer])\n\n ## Add a checkbox for whether to overide the limit to the size of polygon drawn on the\n ## map widget.\n # Initial value of the widget.\n self.max_size = False\n # CheckBox widget.\n checkbox_max_size = deawidgets.create_checkbox(\n value=self.max_size, description=\"Enable\", layout={\"width\": \"95%\"}\n )\n # Text to accompany the CheckBox widget.\n checkbox_max_size_html = deawidgets.create_html(\n value=f\"\"\"
Override maximum size limit: \n (use with caution; may cause memory issues/crashes)\"\"\"\n )\n # Register the update function to run when the checkbox is ticked.\n # on the checkbox_max_size CheckBox\n checkbox_max_size.observe(self.update_checkbox_max_size, \"value\")\n # # Combine the checkbox_max_size_html text and the checkbox_max_size widget in a single container.\n enable_max_size = VBox([checkbox_max_size_html, checkbox_max_size])\n\n ## Put the app controls widgets into a single container.\n parameter_selection = VBox(\n [basemap_selection, gfclayer_selection, enable_max_size]\n )\n parameter_selection.layout = make_box_layout()\n\n ## Button to click to run the app.\n run_button = create_expanded_button(\n description=\"Generate plot\", button_style=\"info\"\n )\n # Register the update function to be called when the run_button button\n # is clicked.\n run_button.on_click(self.run_app)\n\n ###########################\n # WIDGETS FOR APP OUTPUTS #\n ###########################\n\n self.status_info = Output(layout=make_box_layout())\n self.output_plot = Output(layout=make_box_layout())\n\n #################################\n # MAP WIDGET WITH DRAWING TOOLS #\n #################################\n\n # Create the map widget.\n self.m = deawidgets.create_map(\n map_center=(-18.45, 28.93),\n zoom_level=11,\n )\n self.m.layout = make_box_layout()\n\n # Create an empty Layer Group.\n self.map_layers = LayerGroup(layers=())\n # Name of the Layer Group layer.\n self.map_layers.name = \"Map Overlays\"\n # Add the empty Layer Group as a single layer to the map widget.\n self.m.add_layer(self.map_layers)\n\n # Create the desired drawing tools.\n desired_drawtools = [\"rectangle\", \"polygon\"]\n draw_control = deawidgets.create_drawcontrol(desired_drawtools)\n # Add drawing tools to the map widget.\n self.m.add_control(draw_control)\n # Set the initial parameters for the drawing tools.\n self.target = None\n self.action = None\n self.gdf_drawn = None\n\n #####################################\n # HANDLER FUNCTION FOR DRAW CONTROL #\n #####################################\n\n def handle_draw(target, action, geo_json):\n\n \"\"\"\n Defines the action to take once something is drawn on the\n map widget.\n \"\"\"\n\n self.target = target\n self.action = action\n\n # Clear data load parameters to trigger data reload.\n self.gfclayer_ds = None\n\n # Convert the drawn polygon geojson to a GeoDataFrame.\n json_data = json.dumps(geo_json)\n binary_data = json_data.encode()\n io = BytesIO(binary_data)\n io.seek(0)\n gdf = gpd.read_file(io)\n gdf.crs = \"EPSG:4326\"\n\n # Convert the GeoDataFrame to WGS 84 / NSIDC EASE-Grid 2.0 Global and compute the area.\n gdf_drawn_nsidc = gdf.copy().to_crs(\"EPSG:6933\")\n m2_per_ha = 10000\n area = gdf_drawn_nsidc.area.values[0] / m2_per_ha\n\n polyarea_label = (\n f\"Total area of Global Forest Change {self.gfclayer} layer to load\"\n )\n polyarea_text = f\"{polyarea_label}: {area:.2f} ha\"\n\n # Test the size of the polygon drawn.\n if self.max_size:\n confirmation_text = \"\"\" \n (Overriding maximum size limit; use with caution as may lead to memory issues)\"\"\"\n self.header.value = (\n header_title_text\n + instruction_text\n + polyarea_text\n + confirmation_text\n )\n self.gdf_drawn = gdf\n elif area <= 50000:\n confirmation_text = \"\"\"\n (Area to extract falls within\n recommended 50000 ha limit)\"\"\"\n self.header.value = (\n header_title_text\n + instruction_text\n + polyarea_text\n + confirmation_text\n )\n self.gdf_drawn = gdf\n else:\n warning_text = \"\"\"\n (Area to extract is too large,\n please select an area less than 50000 )\"\"\"\n self.header.value = (\n header_title_text + instruction_text + polyarea_text + warning_text\n )\n self.gdf_drawn = None\n\n # Register the handler for draw events.\n draw_control.on_draw(handle_draw)\n\n ###############################\n # SPECIFICATION OF APP LAYOUT #\n ###############################\n\n # Create the app layout.\n grid_rows = 12\n grid_columns = 10\n grid_height = \"1500px\"\n grid_width = \"auto\"\n grid = GridspecLayout(\n grid_rows, grid_columns, height=grid_height, width=grid_width\n )\n\n # Place app widgets and components in app layout.\n # [rows, columns]\n grid[0, :] = self.header\n grid[1:4, 0:3] = parameter_selection\n grid[4, 0:3] = run_button\n grid[5:, 0:3] = self.status_info\n grid[4:, 3:] = self.output_plot\n grid[1:4, 3:] = self.m\n # Display using HBox children attribute\n self.children = [grid]\n\n ######################################\n # DEFINITION OF ALL UPDATE FUNCTIONS #\n ######################################\n\n def update_basemap(self, change):\n \"\"\"\n Updates the basemap on the map widget based on the\n selected value of the dropdown_basemap widget.\n \"\"\"\n self.basemap = change.new\n self.output_plot_basemap = get_basemap(self.basemap.url)\n update_map_layers(self)\n\n def update_gfclayer(self, change):\n \"\"\"\n Updates the Global Forest Change layer to be plotted\n based on the selected value of the dropdown_gfclayer widget.\n \"\"\"\n self.gfclayer = change.new\n\n def update_checkbox_max_size(self, change):\n \"\"\"\n Sets the value of self.max_size to True when the\n checkbox_max_size CheckBox is checked.\n \"\"\"\n self.max_size = change.new\n\n def run_app(self, change):\n\n # Clear progress bar and output areas before running.\n self.status_info.clear_output()\n self.output_plot.clear_output()\n\n # Verify that the polygon was drawn.\n if self.gdf_drawn is not None:\n # Load the seleced Global Forest Change layer\n # and add it to the self.gfclayer_ds attribute.\n with self.status_info:\n if self.gfclayer_ds is None:\n self.gfclayer_ds = load_gfclayer(self)\n else:\n print(\"Using previously loaded data\")\n\n # Plot the selected Global Forest Change layer.\n if self.gfclayer_ds is not None:\n with self.output_plot:\n map_widget = plot_gfclayer(self)\n print(f\"Plotting Global Forest Change {self.gfclayer} layer:\")\n display(map_widget)\n else:\n with self.status_info:\n print(\n f\"\"\"No Global Forest Change {self.gfclayer} layer \n data found in the selected area. Please select \n a new polygon over an area with data.\"\"\"\n )\n\n # If no valid polygon was drawn.\n else:\n with self.status_info:\n print(\n 'Please draw a valid polygon on the map, then click on \"Geneate plot\"'\n )","sub_path":"Tools/deafrica_tools/app/forestmonitoring.py","file_name":"forestmonitoring.py","file_ext":"py","file_size_in_byte":21058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"348065687","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nimport numpy as np\n\nimport akg\nfrom akg import tvm\nfrom akg import composite\nfrom akg.utils import CUDA\nfrom tests.common.base import get_rtol_atol\nfrom tests.common.gen_random import random_gaussian\nfrom tests.common.tensorio import compare_tensor\nfrom akg.utils import kernel_exec as utils\nfrom akg.utils.result_analysis import target_profiling\nfrom akg.utils.format_transform import to_tvm_nd_array\n\ndef csr2coo(indptr, nnz, target=CUDA):\n return composite.csr2coo((indptr,), {\"nnz\": nnz})\n\ndef gen_data(shape, nnz, dtype):\n indptr_choice = np.arange(1, nnz, dtype=dtype)\n indptr = np.sort(np.random.choice(indptr_choice, shape[0] - 2, replace=True))\n indptr = np.concatenate((np.array([0], dtype=dtype), indptr, np.array([nnz], dtype=dtype)))\n expect = np.zeros(nnz, dtype=dtype)\n for i in range(shape[0] - 1):\n row_start = indptr[i]\n row_end = indptr[i + 1]\n expect[row_start : row_end] = i\n return indptr, expect\n\ndef csr2coo_run(shape, nnz, dtype, poly_sch=True, attrs=None):\n if not attrs:\n attrs = {\"target\": \"cuda\"}\n # gen data\n op_attrs = [nnz]\n indptr, expect = gen_data(shape, nnz, dtype)\n output_shape = expect.shape\n attrs[\"is_csr\"] = True\n\n mod = utils.op_build_test(csr2coo, [shape], [dtype], op_attrs=op_attrs, polyhedral=poly_sch,\n attrs=attrs, kernel_name=\"csr2coo\")\n\n if len(expect.shape) == 0:\n output_shape = (1, )\n output = np.zeros(output_shape, expect.dtype)\n output = utils.mod_launch(mod, (indptr, output), expect=expect)\n atol, rtol = get_rtol_atol(\"csr2coo\", dtype)\n res = compare_tensor(output, expect, rtol=rtol, atol=atol)\n print(\"Test {}\".format(\"Pass\" if res else \"Failed\"))\n target_name = attrs[\"target\"].split()[0]\n if not res:\n mod_source = mod\n if target_name != \"llvm\":\n mod_source = mod.imported_modules[0]\n print(\"Error {}:========================\".format(target_name))\n print(mod_source.get_source())\n raise AssertionError(\"Test fail\")\n if attrs[\"profiling\"]:\n args_list = to_tvm_nd_array(\n [indptr, output, expect], akg.tvm.context(target_name, 0))\n target_profiling(mod, *args_list, target=target_name, repeat_time=attrs[\"repeat_time\"])\n return (indptr,), output, expect, res\n","sub_path":"tests/common/test_run/csr2coo_run.py","file_name":"csr2coo_run.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"37921682","text":"from collections import OrderedDict\nimport sys\n\nimport pandas as pd\n\n\ndef process_file(file, object_type):\n\tif object_type == 'csv':\n\t\tdata = pd.read_csv(file)\n\telif object_type == 'json':\n\t\tdata = pd.read_json(file, orient='columns')\n\t\tdata = pd.read_json((data['refunds']).to_json(),orient='index')\n\telse:\n\t\tprint('Sorry, this data type is not supported yet.')\n\treturn data\n\ndef max_lengths(input_file, object_type):\n\tdata = process_file(input_file, object_type)\n\tcolumns = data.columns\n\toutput = OrderedDict()\n\tfor header in columns:\n\t\theader_length = len(str(header))+1\n\t\tvalue_max_length = data[header].map(lambda x: len(str(x))).max()\n\t\tmax_value = pd.Series([header_length,value_max_length]).max()\n\t\tprint(max_value)\n\t\toutput[header] = max_value\n\treturn output\n\ndef values(input_file, object_type):\n\tdata = process_file(input_file, object_type)\n\toutput = OrderedDict()\n\tcolumns = data.columns\n\tfor n in columns:\n\t\toutput[n] = data[n].tolist()\n\treturn output\n","sub_path":"app/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"607681315","text":"\"\"\" helper function\n\nauthor baiyu\n\"\"\"\n\nimport sys\n\nimport numpy as np\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\n\n\n# from dataset import CIFAR100Train, CIFAR100Test\n\ndef get_network(args, use_gpu=True, num_train=0):\n \"\"\" return given network\n \"\"\"\n if args.dataset == 'cifar-10':\n num_classes = 10\n elif args.dataset == 'cifar-100':\n num_classes = 100\n else:\n num_classes = 0\n\n if args.ignoring:\n if args.net == 'resnet18':\n from models.resnet_ign import resnet18_ign\n criterion = nn.CrossEntropyLoss(reduction='none')\n net = resnet18_ign(criterion, num_classes=num_classes, num_train=num_train,softmax=args.softmax,isalpha=args.isalpha)\n\n else:\n if args.net == 'vgg16':\n from models.vgg import vgg16_bn\n net = vgg16_bn()\n elif args.net == 'vgg13':\n from models.vgg import vgg13_bn\n net = vgg13_bn()\n elif args.net == 'vgg11':\n from models.vgg import vgg11_bn\n net = vgg11_bn()\n elif args.net == 'vgg19':\n from models.vgg import vgg19_bn\n net = vgg19_bn()\n elif args.net == 'densenet121':\n from models.densenet import densenet121\n net = densenet121()\n elif args.net == 'densenet161':\n from models.densenet import densenet161\n net = densenet161()\n elif args.net == 'densenet169':\n from models.densenet import densenet169\n net = densenet169()\n elif args.net == 'densenet201':\n from models.densenet import densenet201\n net = densenet201()\n elif args.net == 'googlenet':\n from models.googlenet import googlenet\n net = googlenet()\n elif args.net == 'inceptionv3':\n from models.inceptionv3 import inceptionv3\n net = inceptionv3()\n elif args.net == 'inceptionv4':\n from models.inceptionv4 import inceptionv4\n net = inceptionv4()\n elif args.net == 'inceptionresnetv2':\n from models.inceptionv4 import inception_resnet_v2\n net = inception_resnet_v2()\n elif args.net == 'xception':\n from models.xception import xception\n net = xception()\n elif args.net == 'resnet18':\n from models.resnet import resnet18\n net = resnet18(num_classes=num_classes)\n elif args.net == 'resnet34':\n from models.resnet import resnet34\n net = resnet34(num_classes=num_classes)\n elif args.net == 'resnet50':\n from models.resnet import resnet50\n net = resnet50(num_classes=num_classes)\n elif args.net == 'resnet101':\n from models.resnet import resnet101\n net = resnet101(num_classes=num_classes)\n elif args.net == 'resnet152':\n from models.resnet import resnet152\n net = resnet152(num_classes=num_classes)\n elif args.net == 'preactresnet18':\n from models.preactresnet import preactresnet18\n net = preactresnet18()\n elif args.net == 'preactresnet34':\n from models.preactresnet import preactresnet34\n net = preactresnet34()\n elif args.net == 'preactresnet50':\n from models.preactresnet import preactresnet50\n net = preactresnet50()\n elif args.net == 'preactresnet101':\n from models.preactresnet import preactresnet101\n net = preactresnet101()\n elif args.net == 'preactresnet152':\n from models.preactresnet import preactresnet152\n net = preactresnet152()\n elif args.net == 'resnext50':\n from models.resnext import resnext50\n net = resnext50()\n elif args.net == 'resnext101':\n from models.resnext import resnext101\n net = resnext101()\n elif args.net == 'resnext152':\n from models.resnext import resnext152\n net = resnext152()\n elif args.net == 'shufflenet':\n from models.shufflenet import shufflenet\n net = shufflenet()\n elif args.net == 'shufflenetv2':\n from models.shufflenetv2 import shufflenetv2\n net = shufflenetv2()\n elif args.net == 'squeezenet':\n from models.squeezenet import squeezenet\n net = squeezenet()\n elif args.net == 'mobilenet':\n from models.mobilenet import mobilenet\n net = mobilenet()\n elif args.net == 'mobilenetv2':\n from models.mobilenetv2 import mobilenetv2\n net = mobilenetv2()\n elif args.net == 'nasnet':\n from models.nasnet import nasnet\n net = nasnet()\n elif args.net == 'attention56':\n from models.attention import attention56\n net = attention56()\n elif args.net == 'attention92':\n from models.attention import attention92\n net = attention92()\n elif args.net == 'seresnet18':\n from models.senet import seresnet18\n net = seresnet18()\n elif args.net == 'seresnet34':\n from models.senet import seresnet34\n net = seresnet34()\n elif args.net == 'seresnet50':\n from models.senet import seresnet50\n net = seresnet50()\n elif args.net == 'seresnet101':\n from models.senet import seresnet101\n net = seresnet101()\n elif args.net == 'seresnet152':\n from models.senet import seresnet152\n net = seresnet152()\n\n else:\n print('the network name you have entered is not supported yet')\n sys.exit()\n\n if use_gpu:\n net = net.cuda()\n\n return net\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef get_training_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):\n \"\"\" return training dataloader\n Args:\n mean: mean of cifar100 training dataset\n std: std of cifar100 training dataset\n path: path to cifar100 training python dataset\n batch_size: dataloader batchsize\n num_workers: dataloader num_works\n shuffle: whether to shuffle \n Returns: train_data_loader:torch dataloader object\n \"\"\"\n\n transform_train = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n # cifar100_training = CIFAR100Train(path, transform=transform_train)\n cifar100_training = torchvision.datasets.CIFAR100(root='./data', train=True, download=True,\n transform=transform_train)\n cifar100_training_loader = DataLoader(\n cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return cifar100_training_loader, len(cifar100_training)\n\n\ndef get_test_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):\n \"\"\" return training dataloader\n Args:\n mean: mean of cifar100 test dataset\n std: std of cifar100 test dataset\n path: path to cifar100 test python dataset\n batch_size: dataloader batchsize\n num_workers: dataloader num_works\n shuffle: whether to shuffle \n Returns: cifar100_test_loader:torch dataloader object\n \"\"\"\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n # cifar100_test = CIFAR100Test(path, transform=transform_test)\n cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)\n cifar100_test_loader = DataLoader(\n cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n\n return cifar100_test_loader\n\n\nclass Cutout(object):\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\n\ndef _data_transforms_cifar10(args):\n CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]\n CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n return train_transform, valid_transform\n\ndef _data_transforms_cifar100(args):\n CIFAR_MEAN = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]\n CIFAR_STD = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n return train_transform, valid_transform\n\n\ndef compute_mean_std(cifar100_dataset):\n \"\"\"compute the mean and std of cifar100 dataset\n Args:\n cifar100_training_dataset or cifar100_test_dataset\n witch derived from class torch.utils.data\n \n Returns:\n a tuple contains mean, std value of entire dataset\n \"\"\"\n\n data_r = np.dstack([cifar100_dataset[i][1][:, :, 0] for i in range(len(cifar100_dataset))])\n data_g = np.dstack([cifar100_dataset[i][1][:, :, 1] for i in range(len(cifar100_dataset))])\n data_b = np.dstack([cifar100_dataset[i][1][:, :, 2] for i in range(len(cifar100_dataset))])\n mean = np.mean(data_r), np.mean(data_g), np.mean(data_b)\n std = np.std(data_r), np.std(data_g), np.std(data_b)\n\n return mean, std\n\n\nclass WarmUpLR(_LRScheduler):\n \"\"\"warmup_training learning rate scheduler\n Args:\n optimizer: optimzier(e.g. SGD)\n total_iters: totoal_iters of warmup phase\n \"\"\"\n\n def __init__(self, optimizer, total_iters, last_epoch=-1):\n self.total_iters = total_iters\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self):\n \"\"\"we will use the first m batches, and set the learning\n rate to base_lr * m / total_iters\n \"\"\"\n return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]\n\n\ndef copy_state_dict(net, checkpoint, parallel=False, prefix=''):\n pre_state_dict = checkpoint['state_dict']\n\n if parallel:\n cur_state_dict = net.module.state_dict()\n else:\n cur_state_dict = net.state_dict()\n\n for k in cur_state_dict.keys():\n v = _get_params(k, pre_state_dict, prefix=prefix)\n try:\n if v is None:\n print('parameter {} not found'.format(k))\n continue\n cur_state_dict[k].copy_(v)\n except:\n print('copy param {} failed'.format(k))\n continue\n\n net.alphas = checkpoint['alpha_state_dict']\n net.alphas_parameters=[net.alphas]\n\n\ndef copy_optimizer_state_dict(cur_state_dict, pre_state_dict, prefix=''):\n def _get_params(key):\n key = prefix + key\n if key in pre_state_dict:\n return pre_state_dict[key]\n return None\n\n for k in cur_state_dict.keys():\n v = _get_params(k)\n try:\n if v is None:\n print('parameter {} not found'.format(k))\n continue\n cur_state_dict[k].copy_(v)\n except:\n print('copy param {} failed'.format(k))\n continue\n\n\ndef _get_params(key, pre_state_dict, prefix=''):\n key = prefix + key\n if key in pre_state_dict:\n return pre_state_dict[key]\n return None\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409407545","text":"__author__ = 'p02user'\n\n# a mus to avoid stupid things on reimport\nfrom PyQt4.Qt import Qt as BASE\n\n# Toggling TT plot position selecting modes\nSHORTCUT_TOGGLEPOSITION = BASE.Key_1\nSHORTCUT_TOGGLEPOINT = BASE.Key_2\nSHORTCUT_TOGGLEZOOM = BASE.Key_3\n\n# Autoscale button\nSHORTCUT_AUTOSCALE = (BASE.Key_Escape, BASE.Key_A)\n\n# moving markers in TT plot window\nSHORTCUT_CLEARMARKER = BASE.Key_Delete\n\nSHORTCUT_MOVELEFT = (BASE.Key_Left)\nSHORTCUT_MOVERIGHT = (BASE.Key_Right)\n\n# one could also use a string 'Ctrl+4'\nSHORTCUT_JUMPLEFT = (BASE.ALT+BASE.Key_Left)\nSHORTCUT_JUMPRIGHT = (BASE.ALT+BASE.Key_Right)\n\nSHORTCUT_YMAX = BASE.Key_Home\nSHORTCUT_YMIN = BASE.Key_End\n\nSHORTCUT_XMAX = (BASE.Key_PageUp, BASE.Key_7)\nSHORTCUT_XMIN = (BASE.Key_PageDown, BASE.Key_9)\n\nSHORTCUT_XLEFTEDGE = BASE.Key_BracketLeft\nSHORTCUT_XRIGHTEDGE = BASE.Key_BracketRight\n\nSHORTCUT_CENTERDERIVATIVE = BASE.Key_5\nSHORTCUT_CENTERDERIVATIVENOISY = BASE.ALT+BASE.Key_5\nSHORTCUT_CENTERCMS = BASE.Key_8\nSHORTCUT_CENTERNEGATIVECMS = BASE.ALT+BASE.Key_8\n\nSHORTCUT_CENTERBETWEENCLICKS = (BASE.Key_C)\n\n","sub_path":"ScanTracker/app/config/shortcuts.py","file_name":"shortcuts.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"598389610","text":"from bs4 import BeautifulSoup\nimport urllib\nimport scraper\nimport nltk\nimport string\n\nnltk.download('stopwords')\n\nfrom nltk.corpus import stopwords\n\nstop_words = set(stopwords.words('english'))\n\n\"\"\"\nreference of code online: \nhttps://stackoverflow.com/questions/328356/extracting-text-from-html-file-using-python\n\"\"\"\n\ndef page_token(url):\n try:\n response = urllib.request.urlopen(url)\n if response.getcode() >= 200 and response.getcode() <= 299:\n html_content = response.read()\n soup = BeautifulSoup(html_content)\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n text = text.strip().lower().split()\n table = str.maketrans('', '', string.punctuation)\n strippedtext = [w.translate(table) for w in text]\n\n\n for i in strippedtext:\n if i in stop_words or len(i) < 3:\n strippedtext.remove(i)\n\n if len(strippedtext) < 200:\n return len(strippedtext)\n\n for i in strippedtext:\n if i not in scraper.words:\n scraper.words[i] = 1\n else:\n scraper.words[i] += 1\n\n if scraper.longest_page:\n if len(strippedtext) > scraper.longest_page[1]:\n scraper.longest_page = (url, len(strippedtext))\n else:\n scraper.longest_page = (url, len(strippedtext))\n except:\n return 200\n\n return len(strippedtext)\n","sub_path":"tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"437002209","text":"import sys\nimport os \n\nr1=sys.argv[1]\no1=sys.argv[2]\nr2=sys.argv[3]\no2=sys.argv[4]\n\ndef pre(a,b):\n if os.path.exists(b):\n #os.system(\"unlink %s\" % b)\n #os.system(\"ln -s %s %s\" %(a,b))\n print(\"Waring : rawdata links have existed\")\n else:\n os.system(\"ln -s %s %s\" %(a,b))\n \npre(r1,o1)\npre(r2,o2)\n\n","sub_path":"scripts/pre_data.py","file_name":"pre_data.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"391273681","text":"from PIL import Image\nimport numpy as np\nimport os \nimport sys\nimport random\nfrom ssa_clustering import *\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\nfrom skimage import exposure\n\ndef build_dataset(neg, pos, trans=True, usessa=False, argue=False):\n nlist = [ neg + x for x in os.listdir(neg) \n if os.path.isfile(neg + x) and x.endswith(\"jpg\")]\n plist = [ pos + x for x in os.listdir(pos)\n if os.path.isfile(pos + x) and x.endswith(\"jpg\")]\n\n nims = [ np.asarray(Image.open(im)).flatten() for im in nlist]\n pims = [ np.asarray(Image.open(im)).flatten() for im in plist]\n if argue:\n seed = 42\n random.seed(seed)\n dataset = [(x, 0) for x in nlist] + [(x, 1) for x in plist]\n rate = float(len(nlist)) / len(plist)\n random.shuffle(dataset)\n T = int(0.7 * len(dataset))\n train_set = []\n for im, label in dataset[:T]:\n img = Image.open(im)\n# img.rotate(90).show()\n train_set.append((np.asarray(img).flatten(), label))\n train_set.append((np.asarray(img.rotate(90)).flatten(), label))\n train_set.append((np.asarray(img.rotate(180)).flatten(), label))\n train_set.append((np.asarray(img.rotate(270)).flatten(), label))\n test_set = []\n for im, label in dataset[T:]:\n img = Image.open(im)\n test_set.append((np.asarray(img).flatten(), label))\n return train_set, test_set, rate\n if usessa:\n nims = []\n for im in nlist:\n r, pr, r_max = ssa_clustering(im, 40, 2, 1, 0.2, False, 42)\n nims.append(exposure.equalize_hist(np.asarray(r_max)).flatten())\n\n if len(nims) % 10 == 0:\n print(\"Progress: {0}/{1}...\".format(len(nims), len(nlist)))\n print(\"...Negative samples processed!\")\n pims = []\n for im in plist:\n r, pr, r_max = ssa_clustering(im, 40, 2, 1, 0.2, False, 42)\n pims.append(exposure.equalize_hist(np.asarray(r_max)).flatten())\n if len(pims) % 10 == 0:\n print(\"Progress: {0}/{1}...\".format(len(pims), len(plist)))\n print(\"...Positive samples processed!\")\n rate = float(len(nims)) / len(pims)\n dataset = [(x, 0) for x in nims] + [(x, 1) for x in pims]\n return dataset, rate\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Decompse a image and reconsturct it with 2d-ssa than do the classfication ')\n parser.add_argument('--NEGATIVE_DIR', required=True, help=\"The path to neg_img\")\n parser.add_argument('--POSITIVE_DIR', required=True, help=\"The path to posi_img\")\n parser.add_argument('--usessa', action='store_true', help=\"Ust the ssa\")\n parser.add_argument('--argue', action='store_true', help=\"arguement of dataset\")\n parser.add_argument('--linear', action='store_true')\n parser.add_argument('--grid_search', action='store_true')\n opt = parser.parse_args()\n\n NEGATIVE_DIR = opt.NEGATIVE_DIR\n POSITIVE_DIR = opt.POSITIVE_DIR\n\n if opt.argue:\n train_set, test_set, rate = build_dataset(NEGATIVE_DIR, POSITIVE_DIR, argue=opt.argue)\n l = len(train_set)\n print(\"...Dataset building complepted.\")\n print(\"\"\"\n -------------------------------\n Dataset Size: {0}\n Negative/Positvie: {1:4.2f}\n -------------------------------\n \"\"\".format(l, rate))\n scaler = StandardScaler()\n X = scaler.fit_transform([x[0] for x in train_set])\n y = np.asarray([x[1] for x in train_set])\n X_test = scaler.transform([x[0] for x in test_set])\n y_test = np.asarray([x[1] for x in test_set])\n else: \n dataset, rate = build_dataset(NEGATIVE_DIR, POSITIVE_DIR, usessa=opt.usessa)\n l = len(dataset)\n print(\"...Dataset building complepted.\")\n print(\"\"\"\n -------------------------------\n Dataset Size: {0}\n Negative/Positvie: {1:4.2f}\n -------------------------------\n \"\"\".format(l, rate))\n seed = 42\n random.seed(seed)\n random.shuffle(dataset)\n scaler = StandardScaler()\n T = int(0.7 * l)\n X = scaler.fit_transform([x[0] for x in dataset[:T]])\n y = np.asarray([x[1] for x in dataset[:T]])\n X_test = scaler.transform([x[0] for x in dataset[T:]])\n y_test = np.asarray([x[1] for x in dataset[T:]])\n if opt.linear:\n lsvc = LinearSVC(C=10,class_weight='balanced').fit(X,y)\n else:\n lsvc = SVC(C=10, gamma=0.0001,kernel='rbf', class_weight='balanced').fit(X, y)\n acc = accuracy_score(lsvc.predict(X_test), y_test)\n print(acc)\n cross_validation=opt.grid_search\n if cross_validation:\n X = scaler.fit_transform(np.asarray([x[0] for x in dataset]))\n y = np.asarray([x[1] for x in dataset])\n C_range = np.logspace(-2, 10, 13)\n gamma_range = np.logspace(-9, 3, 13)\n param_grid = dict(gamma=gamma_range, C=C_range)\n cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\n grid = GridSearchCV(SVC(class_weight='balanced'), param_grid=param_grid, cv=cv)\n grid.fit(X, y)\n \n print(\"The best parameters are %s with a score of %0.2f\"\n % (grid.best_params_, grid.best_score_))\n","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"458455589","text":"import numpy as np\nimport copy\n\n\nnp.random.seed(1)\n\n\n############################################################\n## Part 1: Prapare dataset and the sigmoid function ########\n############################################################\n\n# prepare the dataset\n# max dim of binary numbers we use (also the sequence length)\nmax_dim = 8\n# store binary numbers in a look-up table X\nX = np.unpackbits(np.array([range(2**max_dim)], dtype=np.uint8).T, axis=1)\n\n\ndef get_sample():\n # generate a sample\n x1_int = np.random.randint(2**(max_dim-1))\n x2_int = np.random.randint(2**(max_dim-1))\n y_int = x1_int + x2_int\n # look up the binary numbers\n x1 = X[x1_int]\n x2 = X[x2_int]\n y = X[y_int]\n\n return x1, x2, y, x1_int, x2_int, y_int\n\n\n# let's see some examples\nprint('shape of X', X.shape)\nx1, x2, y, x1_int, x2_int, y_int = get_sample()\nprint('')\nprint('x1: '+str(x1)+' '+str(x1_int))\nprint('x2: '+str(x2)+' '+str(x2_int))\nprint('y : '+str(y)+' '+str(y_int))\nprint('-'*20)\n\n# sigmoid function\n# output has the same dim with x\n\n\ndef sigmoid(x):\n output = 1 / (1 + np.exp(-x))\n return output\n\n\n# test your sigmoid\nprint('sigmoid(2) = %f, the result should equal 0.880797' % (sigmoid(2.0)))\n\n# grad of input\n\n\ndef sigmoid_grad_input(output):\n input_grad = output * (1 - output)\n return input_grad\n\n\n# test your sigmoid grad\nprint('sigmoid_grad_input(0.5) = %f, the result should equal 0.25' %\n (sigmoid_grad_input(0.5)))\nprint('-'*20)\n\n\n############################################################\n## Part 2: Initialize the rnn ########\n############################################################\n\n# learning rate\nlr = 0.05\ninput_dim = 2\noutput_dim = 1\nhidden_dim = 128\n\n# initialize neural network weights\nW1 = 2*np.random.random((input_dim, hidden_dim)) - 1\nW2 = 2*np.random.random((hidden_dim, output_dim)) - 1\nWh = 2*np.random.random((hidden_dim, hidden_dim)) - 1\n# grad for W\nW1_grad = np.zeros_like(W1)\nW2_grad = np.zeros_like(W2)\nWh_grad = np.zeros_like(Wh)\n\n\n############################################################\n## Part 3: Training the rnn ########\n############################################################\n# in each iter we first\n# 1) forward the max_dim steps, then\n# 2) backward the grad for each W and update W\nfor i in range(5000):\n\n # get a sample\n x1, x2, y, x1_int, x2_int, y_int = get_sample()\n\n # value of the hidden layer\n hiddens = list()\n hiddens.append(np.zeros(hidden_dim))\n # value of the output\n outs = list()\n\n # prediction of the rnn\n loss = 0\n pred = np.zeros_like(y)\n\n # forward\n for j in range(max_dim):\n\n # get input and output\n position = max_dim-j-1\n x_t = np.array([[x1[position], x2[position]]])\n y_t = np.array([[y[position]]]).T\n hidden_t = sigmoid(np.dot(x_t, W1) + np.dot(hiddens[j], Wh))\n # store hidden_t\n hiddens.append(copy.deepcopy(hidden_t))\n out_t = sigmoid(np.dot(hidden_t, W2))\n # store hidden_t\n outs.append(copy.deepcopy(out_t))\n # sum up the loss\n loss += -(y_t*np.log(out_t) + (1-y_t)*np.log(1-out_t))[0][0]\n # make the prediction, according to out_t\n if out_t > 0.5:\n pred[position] = 1\n else:\n pred[position] = 0\n\n # store the grad of t+1 hidden layer\n hidden_next_grad = np.zeros(hidden_dim)\n # backward\n for j in range(max_dim):\n\n # get input and output\n x_t = np.array([[x1[j], x2[j]]])\n y_t = np.array([[y[j]]]).T\n out_t = outs[-j-1]\n hidden_t = hiddens[-j-1]\n # t-1 hidden layer\n hidden_t_grad = np.transpose(W2) * (out_t - y_t) + np.dot(hidden_next_grad, np.dot(\n np.diag(sigmoid_grad_input(hiddens[-j]).reshape(-1)), np.transpose(Wh)))\n W2_grad += (out_t - y_t) * np.transpose(hidden_t)\n Wh_grad += np.dot(np.dot(hiddens[-j-2].reshape(-1, 1), hidden_t_grad),\n np.diag(sigmoid_grad_input(hidden_t).reshape(-1)))\n W1_grad += np.dot(np.dot(np.transpose(x_t), hidden_t_grad),\n np.diag(sigmoid_grad_input(hidden_t).reshape(-1)))\n\n # store the grad of t hidden layer\n hidden_next_grad = hidden_t_grad\n\n # update W, lr is the learning rate\n W1 -= W1_grad * lr\n W2 -= W2_grad * lr\n Wh -= Wh_grad * lr\n\n # reset the grad of W\n W1_grad *= 0\n W2_grad *= 0\n Wh_grad *= 0\n\n # print out progress\n if(i % 100 == 0):\n print('i:'+str(i))\n print(\"loss:\" + str(loss))\n print('x1: '+str(x1)+' '+str(x1_int))\n print('x2: '+str(x2)+' '+str(x2_int))\n print('y : '+str(y)+' '+str(y_int))\n pred_int = 0\n for index, x in enumerate(reversed(pred)):\n pred_int += x*(2**index)\n print('pred: '+str(pred)+' '+str(pred_int))\n print('------------')\n","sub_path":"python/Media_and_Cognition/chap7_hw/code_RNN/rnn_problem.py","file_name":"rnn_problem.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"643184775","text":"print(\"I will Give you 3 hints,guess what animal I am\")\nclass Animal:\n def __init__(self,name):\n self.name = name\n \n def guess_who_am_i(self):\n if self.name == \"elephant\":\n questions = ['I have exceptional memory','I am the largest land-Living mammal in the world','I have the big trunk']\n elif self.name == \"tiger\":\n questions = ['I am the biggest cat','I come in black and white or orange and black','I am the symbol of strength and courage']\n elif self.name == \"bat\":\n questions = ['I use Echo-location','I can Fly','I see well in dark']\n false_count = 0\n for quote in questions:\n print(quote)\n input_name = input(\"Who am I?\")\n if (input_name == self.name):\n print(\"You got it! I am \",self.name)\n break\n else:\n false_count = false_count + 1\n print(\"Try Again!!\")\n #break\n if false_count == 3:\n print(\"I am out of Hints! The answer is: \",self.name)\n false_count = 0\n \ne= Animal(\"elephant\")\nt= Animal(\"tiger\")\nb= Animal(\"bat\")\ne.guess_who_am_i()\nt.guess_who_am_i()\nb.guess_who_am_i()\n","sub_path":"AnimalFacts.py","file_name":"AnimalFacts.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"165688697","text":"\"\"\"\nURLConf for Django calendering..\n\nRecommended usage is a call to ``include()`` in your project's root\nURLConf to include this URLConf for any URL beginning with\n``/calendars/``.\n\"\"\"\n\nfrom django.conf.urls.defaults import *\nfrom calloway import views\n\nurlpatterns = patterns('',\n (r'^/$', views.calendar_month),\n #(r'^(?P[^/]+)/(?P[^/]+)/event/create/$', views.create_event ),\n #(r'^(?P[^/]+)/(?P[^/]+)/$', views.calendar_month, { 'year': 2008, 'month': 'sep'} ),\n (r'^event/(?P[^/]+)/$', views.event_detail ),\n #(r'^(?P[^/]+)/(?P[^/]+)/(?P\\d{4})/(?P[a-z]{3})/(?P\\w{1,2})/$', views.calendar_day ),\n (r'^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/week/(?P\\d{4})/(?P[a-z]{3})/(?P\\w{1,2})/$', views.calendar_week ),\n #(r'^(?P[^/]+)/(?P[^/]+)/(?P\\d{4})/(?P[a-z]{3})/$', views.calendar_month ),\n #(r'^(?P[^/]+)/(?P[^/]+)/(?P\\d{4})/$', views.calendar_year ),\n (r'^list/group/(?P[^/]+)/', views.list_users_in_group_calendars ),\n (r'^list/(?P[^/]+)/(?P[^/]+)/', views.list_calendars ),\n )","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"382213290","text":"from BaseModels import BaseNeuralNet\nfrom trades import trades_loss\nimport torch\nfrom torchvision import datasets\nimport numpy as np\ncuda = torch.device('cuda:0')\nfrom utils import (cifar10_std_tup, cifar10_mu_tup, cifar10_std, cifar10_mu, cifar10_upper_limit, cifar10_lower_limit)\nfrom utils import (cifar100_std_tup, cifar100_mu_tup, cifar100_std, cifar100_mu, cifar100_upper_limit, cifar100_lower_limit)\nfrom utils import SnapshotEnsembleScheduler\n\ndef adjust_learning_rate(optimizer, epoch, config):\n \"\"\"decrease the learning rate\"\"\"\n lr = config[\"lr_wl\"]\n if epoch >= 75:\n lr = config[\"lr_wl\"] * 0.1\n if epoch >= 90:\n lr = config[\"lr_wl\"] * 0.01\n if epoch >= 100:\n lr = config[\"lr_wl\"] * 0.001\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\nclass TradesBasedTrainingCIFAR10(BaseNeuralNet):\n def __init__(self, attack_eps, model_base):\n super().__init__(model_base, attack_eps)\n \n def fit(self, train_loader, test_loader, config):\n np.random.seed(config[\"seed_wl\"])\n torch.manual_seed(config[\"seed_wl\"])\n torch.cuda.manual_seed(config[\"seed_wl\"])\n \n if config[\"dataset_name\"] == \"cifar10\":\n (std_tup, mu_tup, std, mu, upper_limit, lower_limit) = (cifar10_std_tup, cifar10_mu_tup, cifar10_std, cifar10_mu, cifar10_upper_limit, cifar10_lower_limit)\n elif config[\"dataset_name\"] == \"cifar100\":\n (std_tup, mu_tup, std, mu, upper_limit, lower_limit) = (cifar100_std_tup, cifar100_mu_tup, cifar100_std, cifar100_mu, cifar100_upper_limit, cifar100_lower_limit)\n\n val_X = None\n val_y = None\n for data in test_loader:\n val_X = data[0].to(cuda)\n val_y = data[1].to(cuda)\n break\n\n model = self.model\n model.train()\n\n opt = torch.optim.SGD(model.parameters(), lr=config['lr_wl'], momentum=config['momentum_wl'], weight_decay=config['weight_decay_wl'])\n \n epoch_size = len(train_loader.dataset)\n num_epochs = max(1, config[\"num_samples_wl\"] // epoch_size)\n lr_steps = num_epochs * len(train_loader) # For cyclic LR if we want\n if config[\"lr_schedule_wl\"] == \"snapshot\":\n scheduler = SnapshotEnsembleScheduler(opt, lr_steps, config[\"snapshot_cycles_wl\"], config[\"snapshot_a0_wl\"])\n \n # Training\n currSamples = 0 # added\n snapshots = 0\n for epoch in range(num_epochs):\n print(\"Epoch: \", epoch)\n if config[\"lr_schedule_wl\"] != \"snapshot\":\n adjust_learning_rate(opt, epoch, config)\n \n for i, data in enumerate(train_loader):\n X, y = data[0], data[1]\n currSamples += train_loader.batch_size # added\n X, y = X.cuda(), y.cuda()\n if i % 100 == 99:\n self.record_accuracies(currSamples, val_X=val_X, val_y=val_y, train_X=X, train_y=y, attack_iters=config[\"attack_iters_val_wl\"], \n restarts=config[\"training_valrestarts\"], val_attacks=config[\"val_attacks\"], dataset_name=config[\"dataset_name\"])\n loss = trades_loss(model=model,\n x_natural=X,\n y=y,\n optimizer=opt,\n step_size=config[\"step_size_wl\"],\n epsilon=config[\"train_eps_wl\"],\n perturb_steps=config[\"num_steps_wl\"],\n beta=config[\"beta_wl\"])\n loss.backward()\n opt.step()\n \n if config[\"lr_schedule_wl\"] == \"snapshot\":\n scheduler.step()\n \n if config[\"lr_schedule_wl\"] == \"snapshot\" and scheduler.snapshot():\n print(\"doing snapshot, lr = \", scheduler.get_last_lr()[0])\n torch.save(model.state_dict(), config[\"save_dir\"] + str(config[\"snapshot_cycles_wl\"] - snapshots - 1) + '.pth')\n snapshots+=1\n\n","sub_path":"TradesBasedTraining.py","file_name":"TradesBasedTraining.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"400262043","text":"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom collections import OrderedDict\nfrom pprint import pformat\n\nfrom ludwig.data.postprocessing import postprocess\nfrom ludwig.data.preprocessing import preprocess_for_prediction\nfrom ludwig.features.feature_registries import output_type_registry\nfrom ludwig.globals import LUDWIG_VERSION, is_on_master, set_on_master\nfrom ludwig.globals import TRAIN_SET_METADATA_FILE_NAME\nfrom ludwig.models.model import load_model_and_definition\nfrom ludwig.utils.data_utils import save_csv\nfrom ludwig.utils.data_utils import save_json\nfrom ludwig.utils.misc import get_from_registry\nfrom ludwig.utils.print_utils import logging_level_registry, repr_ordered_dict\nfrom ludwig.utils.print_utils import print_boxed\nfrom ludwig.utils.print_utils import print_ludwig\n\n\ndef full_predict(\n model_path,\n data_csv=None,\n data_hdf5=None,\n dataset_type='generic',\n split='test',\n batch_size=128,\n skip_save_unprocessed_output=False,\n output_directory='results',\n evaluate_performance=True,\n gpus=None,\n gpu_fraction=1.0,\n use_horovod=False,\n debug=False,\n **kwargs\n):\n # setup directories and file names\n experiment_dir_name = output_directory\n suffix = 0\n while os.path.exists(experiment_dir_name):\n experiment_dir_name = output_directory + '_' + str(suffix)\n suffix += 1\n\n if is_on_master():\n logging.info('Dataset type: {}'.format(dataset_type))\n logging.info('Dataset path: {}'.format(\n data_csv if data_csv is not None else data_hdf5))\n logging.info('Model path: {}'.format(model_path))\n logging.info('Output path: {}'.format(experiment_dir_name))\n logging.info('')\n\n train_set_metadata_json_fp = os.path.join(\n model_path,\n TRAIN_SET_METADATA_FILE_NAME\n )\n\n # preprocessing\n dataset, train_set_metadata = preprocess_for_prediction(\n model_path,\n split,\n dataset_type,\n data_csv,\n data_hdf5,\n train_set_metadata_json_fp,\n evaluate_performance\n )\n\n # run the prediction\n if is_on_master():\n print_boxed('LOADING MODEL')\n model, model_definition = load_model_and_definition(model_path,\n use_horovod=use_horovod)\n\n prediction_results = predict(\n dataset,\n train_set_metadata,\n model,\n model_definition,\n batch_size,\n evaluate_performance,\n gpus,\n gpu_fraction,\n debug\n )\n model.close_session()\n\n if is_on_master():\n os.mkdir(experiment_dir_name)\n\n # postprocess\n postprocessed_output = postprocess(\n prediction_results,\n model_definition['output_features'],\n train_set_metadata,\n experiment_dir_name,\n skip_save_unprocessed_output or not is_on_master()\n )\n\n save_prediction_outputs(postprocessed_output, experiment_dir_name)\n\n if evaluate_performance:\n print_prediction_results(prediction_results)\n save_prediction_statistics(prediction_results, experiment_dir_name)\n\n logging.info('Saved to: {0}'.format(experiment_dir_name))\n\n\ndef predict(\n dataset,\n train_set_metadata,\n model,\n model_definition,\n batch_size=128,\n evaluate_performance=True,\n gpus=None,\n gpu_fraction=1.0,\n debug=False\n):\n \"\"\"Computes predictions based on the computed model.\n :param dataset: Dataset containing the data to calculate\n the predictions from.\n :type dataset: Dataset\n :param model: The trained model used to produce the predictions.\n :type model: Model\n :param model_definition: The model definition of the model to use\n for obtaining predictions\n :type model_definition: Dictionary\n :param batch_size: The size of batches when computing the predictions.\n :type batch_size: Integer\n :param evaluate_performance: If this parameter is False, only the predictions\n will be returned, if it is True, also performance metrics\n will be calculated on the predictions. It requires the data\n to contain also ground truth for the output features, otherwise\n the metrics cannot be computed.\n :type evaluate_performance: Bool\n :type gpus: List\n :type gpu_fraction: Integer\n :param debug: If true turns on tfdbg with inf_or_nan checks.\n :type debug: Boolean\n\n :returns: A dictionary containing the predictions of each output feature,\n alongside with statistics on the quality of those predictions\n (if evaluate_performance is True).\n \"\"\"\n if is_on_master():\n print_boxed('PREDICT')\n test_stats = model.predict(\n dataset,\n batch_size,\n evaluate_performance=evaluate_performance,\n gpus=gpus,\n gpu_fraction=gpu_fraction\n )\n\n if evaluate_performance:\n calculate_overall_stats(\n test_stats,\n model_definition['output_features'],\n dataset,\n train_set_metadata\n )\n\n return test_stats\n\n\ndef calculate_overall_stats(test_stats, output_features, dataset,\n train_set_metadata):\n for output_feature in output_features:\n feature = get_from_registry(\n output_feature['type'],\n output_type_registry\n )\n feature.calculate_overall_stats(\n test_stats, output_feature, dataset, train_set_metadata\n )\n\n\ndef save_prediction_outputs(\n postprocessed_output,\n experiment_dir_name,\n skip_output_types=None\n):\n if skip_output_types is None:\n skip_output_types = set()\n csv_filename = os.path.join(experiment_dir_name, '{}_{}.csv')\n for output_field, outputs in postprocessed_output.items():\n for output_type, values in outputs.items():\n if output_type not in skip_output_types:\n save_csv(csv_filename.format(output_field, output_type), values)\n\n\ndef save_prediction_statistics(prediction_stats, experiment_dir_name):\n test_stats_fn = os.path.join(\n experiment_dir_name,\n 'prediction_statistics.json'\n )\n save_json(test_stats_fn, prediction_stats)\n\n\ndef print_prediction_results(prediction_stats):\n for output_field, result in prediction_stats.items():\n if (output_field != 'combined' or\n (output_field == 'combined' and len(prediction_stats) > 2)):\n logging.info('\\n===== {} ====='.format(output_field))\n for measure in sorted(list(result)):\n if measure != 'confusion_matrix' and measure != 'roc_curve':\n value = result[measure]\n if isinstance(value, OrderedDict):\n value_repr = repr_ordered_dict(value)\n else:\n value_repr = pformat(result[measure], indent=2)\n logging.info(\n '{0}: {1}'.format(\n measure,\n value_repr\n )\n )\n\n\ndef cli(sys_argv):\n parser = argparse.ArgumentParser(\n description='This script loads a pretrained model '\n 'and uses it to predict.',\n prog='ludwig predict',\n usage='%(prog)s [options]'\n )\n\n # ---------------\n # Data parameters\n # ---------------\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\n '--data_csv',\n help='input data CSV file. '\n 'If it has a split column, it will be used for splitting '\n '(0: train, 1: validation, 2: test), '\n 'otherwise the dataset will be randomly split'\n )\n group.add_argument(\n '--data_hdf5',\n help='input data HDF5 file. It is an intermediate preprocess version of'\n ' the input CSV created the first time a CSV file is used in the '\n 'same directory with the same name and a hdf5 extension'\n )\n parser.add_argument(\n '--train_set_metadata_json',\n help='input metadata JSON file. It is an intermediate preprocess file '\n 'containing the mappings of the input CSV created the first time '\n 'a CSV file is used in the same directory with the same name and '\n 'a json extension'\n )\n\n parser.add_argument(\n '-s',\n '--split',\n default='test',\n choices=['training', 'validation', 'test', 'full'],\n help='the split to test the model on'\n )\n\n # ----------------\n # Model parameters\n # ----------------\n parser.add_argument(\n '-m',\n '--model_path',\n help='model to load',\n required=True\n )\n\n # -------------------------\n # Output results parameters\n # -------------------------\n parser.add_argument(\n '-od',\n '--output_directory',\n type=str,\n default='results',\n help='directory that contains the results'\n )\n parser.add_argument(\n '-ssuo',\n '--skip_save_unprocessed_output',\n help='skips saving intermediate NPY output files',\n action='store_true', default=False\n )\n\n # ------------------\n # Generic parameters\n # ------------------\n parser.add_argument(\n '-bs',\n '--batch_size',\n type=int,\n default=128,\n help='size of batches'\n )\n parser.add_argument(\n '-ep',\n '--evaluate_performance',\n action='store_true',\n default=False,\n help='performs performance metrics calculation.'\n 'Requires that the dataset contains one column '\n 'for each output feature the model predicts '\n 'to use as ground truth for the performance calculation.'\n )\n\n # ------------------\n # Runtime parameters\n # ------------------\n parser.add_argument(\n '-g',\n '--gpus',\n type=int,\n default=0,\n help='list of gpu to use'\n )\n parser.add_argument(\n '-gf',\n '--gpu_fraction',\n type=float,\n default=1.0,\n help='fraction of gpu memory to initialize the process with'\n )\n parser.add_argument(\n '-uh',\n '--use_horovod',\n action='store_true',\n default=False,\n help='uses horovod for distributed training'\n )\n parser.add_argument(\n '-dbg',\n '--debug',\n action='store_true',\n default=False,\n help='enables debugging mode'\n )\n parser.add_argument(\n '-l',\n '--logging_level',\n default='info',\n help='the level of logging to use',\n choices=['critical', 'error', 'warning', 'info', 'debug', 'notset']\n )\n\n args = parser.parse_args(sys_argv)\n\n logging.basicConfig(\n stream=sys.stdout,\n level=logging_level_registry[args.logging_level],\n format='%(message)s'\n )\n\n set_on_master(args.use_horovod)\n\n if is_on_master():\n print_ludwig('Predict', LUDWIG_VERSION)\n\n full_predict(**vars(args))\n\n\nif __name__ == '__main__':\n cli(sys.argv[1:])\n","sub_path":"ludwig/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":12165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"371132816","text":"import pymysql\nfrom typing import Union\n\nconf = {\n \"host\": \"127.0.0.1\",\n \"port\": 3306,\n \"user\": \"root\",\n \"passwd\": \"mysql1234\",\n \"charset\": \"utf8mb4\",\n \"cursorclass\": pymysql.cursors.DictCursor,\n \"database\": \"test\"\n}\nconn = pymysql.connect(**conf)\ncursor = conn.cursor()\n\n\nclass DB:\n \"\"\"该类用于生成sql语句,减少手写sql的复杂和可维护性\"\"\"\n\n def __init__(self):\n self._cursor = cursor\n self._conn = conn\n\n def query(self, table_name: str, *fields: str, **conditions: Union[int, str]):\n \"\"\"通用查询语句\"\"\"\n # 生成字段名\n if not fields:\n field = \"*\"\n elif len(fields) == 1:\n field = fields[0]\n else:\n field = \", \".join(fields)\n # 生成条件\n condition = self._convert_dict(conditions)\n sql = \"SELECT {0} FROM {1} {2}\".format(field, table_name, condition)\n self._cursor.execute(sql)\n return self._cursor.fetchall()\n\n def insert(self, table_name: str, **item: Union[int, str]):\n \"\"\"通用插入语句\"\"\"\n if not item:\n raise ValueError(\"必须传入item的值\")\n # 生成字段名\n filed, value = zip(*item.items())\n filed = \", \".join(filed)\n # 生成对应的值\n value_list = []\n for v in value:\n if isinstance(v, str):\n value_list.append(\"'\" + v + \"'\")\n else:\n value_list.append(str(v))\n value = \", \".join(value_list)\n sql = \"INSERT INTO {0}({1}) VALUES ({2})\".format(table_name, filed, value)\n self._cursor.execute(sql)\n conn.commit()\n\n def update(self, table_name, filed_dict, condition_dict):\n \"\"\"通用更新语句\"\"\"\n filed = self._convert_dict(filed_dict, flag=1)\n condition = self._convert_dict(condition_dict, flag=2)\n sql = \"UPDATE {0} SET {1} {2}\".format(table_name, filed, condition)\n self._cursor.execute(sql)\n self._conn.commit()\n\n def delete(self, table_name, **conditions):\n \"\"\"通用删除语句\"\"\"\n condition = self._convert_dict(conditions, flag=2)\n sql = \"DELETE FROM {0} WHERE {1}\".format(table_name, condition.replace(\"WHERE \", \"\"))\n self._cursor.execute(sql)\n self._conn.commit()\n\n def execute(self, sql):\n \"\"\"直接执行SQL指令\"\"\"\n self._cursor.execute(sql)\n self._conn.commit()\n return self._cursor.fetchall()\n\n def _convert_dict(self, input_dict, flag=2):\n \"\"\"转换字典为字段或条件设置\n dict(a=b) -> [\"a = b\"]\n\n Args:\n input_dict: 用来设置数据或条件的字典\n flag: 1 -> 转化成字段,2 -> 转化成条件语句\n\n Returns:\n (str):转换后的列表\n Raises:\n ValueError: flag未能设置为1或者2,抛出该异常\n \"\"\"\n condition_list = []\n while input_dict:\n key, value = input_dict.popitem()\n if isinstance(value, str):\n # 对字符串进行转义\n value = \"'\" + self._conn.escape_string(value) + \"'\"\n condition_list.append(\"{0} = {1}\".format(key, value))\n if flag not in [1, 2]:\n raise ValueError(\"flag 必须是1或者2,1 -> 转化成字段,2 -> 转化成条件语句\")\n if flag == 1:\n filed = \", \".join(condition_list) if len(condition_list) > 1 else condition_list.pop()\n return filed\n else: # flag==2\n if not condition_list:\n condition = \"\"\n else:\n condition = \" AND \".join(condition_list) if len(condition_list) > 1 else condition_list.pop()\n condition = \"WHERE \" + condition\n return condition\n\n\ndb = DB()\n","sub_path":"myblog/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"321508701","text":"from glob import glob\nfrom os import chdir, path, makedirs, rename\nimport re\n\nOUTPUT_PATH = \"/home/remi/Torrents/\"\nINPUT_PATH = \"/home/remi/Torrents/Ranger\"\nSERIES = [{\n \"nom\" : \"The 100\",\n \"search\" : \"*The.100.*.avi\"\n}, {\n \"nom\" : \"Game Of Thrones\",\n \"search\" : \"*Game.*hrones.*.avi\"\n}, {\n \"nom\" : \"The Big Bang Theory\",\n \"search\" : \"*The.Big.Bang.Theory.*.avi\"\n}, {\n \"nom\" : \"The Arrow\",\n \"search\" : \"*Arrow.*.avi\"\n}, {\n \"nom\" : \"The Flash\",\n \"search\" : \"*The.Flash.*.avi\"\n}]\n\nchdir(INPUT_PATH)\n\ndef mkdir(f) :\n if not path.exists(f) :\n makedirs(f)\n\nfor s in SERIES :\n mkdir(OUTPUT_PATH + s[\"nom\"])\n files = glob(s[\"search\"]) + glob(\"**/\" + s[\"search\"])\n for f in files :\n recherche = re.search(\"S..E..\", f).group(0)\n saison = recherche[1:3]\n episode = recherche[4:]\n dossier = OUTPUT_PATH + s[\"nom\"] + \"/\" + \"Saison \" + str(int(saison)) + \"/\"\n nomEpisode = s[\"nom\"] + \" - s\"+saison+\"e\"+episode+\".avi\"\n mkdir(dossier)\n rename(INPUT_PATH + f, dossier + nomEpisode)\n print(f + \" -> \" + nomEpisode)","sub_path":"cleanseries.py","file_name":"cleanseries.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"24412730","text":"import asyncio\nfrom unittest import TestCase\n\nfrom tests.example_apps.music.tables import Band, Manager\n\nfrom ..base import postgres_only\n\n\nclass TestAtomic(TestCase):\n def test_error(self):\n \"\"\"\n Make sure queries in a transaction aren't committed if a query fails.\n \"\"\"\n transaction = Band._meta.db.atomic()\n transaction.add(\n Manager.create_table(),\n Band.create_table(),\n Band.raw(\"MALFORMED QUERY ... SHOULD ERROR\"),\n )\n try:\n transaction.run_sync()\n except Exception:\n pass\n self.assertTrue(not Band.table_exists().run_sync())\n self.assertTrue(not Manager.table_exists().run_sync())\n\n def test_succeeds(self):\n transaction = Band._meta.db.atomic()\n transaction.add(Manager.create_table(), Band.create_table())\n transaction.run_sync()\n\n self.assertTrue(Band.table_exists().run_sync())\n self.assertTrue(Manager.table_exists().run_sync())\n\n transaction.add(\n Band.alter().drop_table(), Manager.alter().drop_table()\n )\n transaction.run_sync()\n\n\nclass TestTransaction(TestCase):\n def tearDown(self):\n for table in (Band, Manager):\n if table.table_exists().run_sync():\n table.alter().drop_table().run_sync()\n\n def test_error(self):\n \"\"\"\n Make sure queries in a transaction aren't committed if a query fails.\n \"\"\"\n\n async def run_transaction():\n try:\n async with Band._meta.db.transaction():\n Manager.create_table()\n Band.create_table()\n Band.raw(\"MALFORMED QUERY ... SHOULD ERROR\")\n except Exception:\n pass\n\n asyncio.run(run_transaction())\n\n self.assertTrue(not Band.table_exists().run_sync())\n self.assertTrue(not Manager.table_exists().run_sync())\n\n def test_succeeds(self):\n async def run_transaction():\n async with Band._meta.db.transaction():\n await Manager.create_table().run()\n await Band.create_table().run()\n\n asyncio.run(run_transaction())\n\n self.assertTrue(Band.table_exists().run_sync())\n self.assertTrue(Manager.table_exists().run_sync())\n\n @postgres_only\n def test_transaction_id(self):\n \"\"\"\n An extra sanity check, that the transaction id is the same for each\n query inside the transaction block.\n \"\"\"\n\n async def run_transaction():\n responses = []\n async with Band._meta.db.transaction():\n responses.append(\n await Manager.raw(\"SELECT txid_current()\").run()\n )\n responses.append(\n await Manager.raw(\"SELECT txid_current()\").run()\n )\n return [i[0][\"txid_current\"] for i in responses]\n\n txids = asyncio.run(run_transaction())\n assert len(set(txids)) == 1\n\n # Now run it again and make sure the transaction ids differ.\n next_txids = asyncio.run(run_transaction())\n assert txids != next_txids\n","sub_path":"tests/engine/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"226529418","text":"# -*- coding: utf-8 -*-\n\n# import the necessary packages\nfrom imutils import contours\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport imutils\nimport cv2\nimport math\nimport os\nimport sys\n\ndef to_contours_image(contours, ref_image):\n blank_background = np.zeros_like(ref_image)\n img_contours = cv2.drawContours(blank_background, contours, -1, (255, 255, 255), thickness=2)\n return img_contours\n\n\ndef locate(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"gray\", gray)\n\n rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 6))\n sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (6, 6))\n\n tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)\n cv2.imshow(\"tophat\", tophat)\n\n # compute the Scharr gradient of the tophat image, then scale\n # the rest back into the range [0, 255]\n gradX = cv2.Sobel(tophat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\n gradX = np.absolute(gradX)\n (minVal, maxVal) = (np.min(gradX), np.max(gradX))\n gradX = (255 * ((gradX - minVal) / (maxVal - minVal)))\n gradX = gradX.astype(\"uint8\")\n\n # apply a closing operation using the rectangular kernel to help\n # cloes gaps in between digits, then apply\n # Otsu's thresholding method to binarize the image\n gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)\n cv2.imshow(\"gradX\", gradX)\n\n\n thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n cv2.imshow(\"thresh1\", thresh)\n\n # apply a second closing operation to the binary image, again\n # to help close gaps between digit number regions\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)\n\n cv2.imshow(\"thresh2\", thresh)\n\n refCnts, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # refCnts, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n print(\"find total contours: \", len(refCnts))\n if (len(refCnts) < 1):\n return None, None\n\n leftCnts = []\n roi = []\n img = image.copy()\n # 循环浏览轮廓,提取符合设定的对象\n for (i, c) in enumerate(refCnts):\n (x, y, w, h) = cv2.boundingRect(c)\n area = cv2.contourArea(c)\n if area > 100:\n img = cv2.rectangle(img, (x-1,y-1), (x+w+1,y+h+1), (0,0,255), 1)\n # if (h >= 30 and h <= 60 and w / h <= 4 and w / h > 2.5):\n if (w / h <= 4 and w / h > 2.5):\n leftCnts.append(c)\n roi = image[y:y + h, x - 2:x + w + 2]\n\n cv2.imshow(\"contours 1\", img)\n #cv2.waitKey(0)\n\n print(\"After filte, total contours: \", len(leftCnts))\n if (len(leftCnts) < 1):\n return None, None\n\n return roi, leftCnts\n\n\nif __name__ =='__main__':\n pics_path = sys.argv[1] #获取所给图片目录\n image = cv2.imread(pics_path)\n image = imutils.resize(image, width=600)\n roi, contours = locate(image)\n\n if roi is None or contours is None:\n print(\"digit region locate failed!\")\n exit(0)\n\n # show the found roi\n cv2.imshow(\"roi\", roi)\n\n # 循环浏览轮廓,提取符合设定的对象\n for (i, c) in enumerate(contours):\n # compute the bounding box for the digit, extract it, and resize\n # it to a fixed size\n (x, y, w, h) = cv2.boundingRect(c)\n image = cv2.rectangle(image, (x-1,y-1), (x+w+1,y+h+1), (0,0,255), 2)\n\n cv2.imshow(\"contours 2\", image)\n\n cv2.waitKey(0)\n\n","sub_path":"locate.py","file_name":"locate.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"213212967","text":"class Solution:\n def __init__(self):\n self.unique = []\n\n def fourSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n\n result = 0\n len_set = len(nums)\n for i in range(len_set):\n self.scan(nums[i])\n\n len_unique = len(self.unique)\n for i in range(len_unique):\n result += self.unique[i]\n\n return result\n\n def scan(self, set):\n for i in range(4):\n if set[i] not in self.unique:\n self.unique.append(set[i])\n else:\n continue\n\n\nif __name__ == '__main__':\n set = [[-1, 0, 0, 1], [-2, -1, 1, 2], [-2, 0, 0, 2]]\n t = Solution()\n print(t.fourSum(set, 0))\nx","sub_path":"array/18.4Sum.py","file_name":"18.4Sum.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"417447023","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom copy import deepcopy\nfrom utils.city_data import get_city_dict\nfrom utils.Regular_Expression import regularExpression, regularExpression02, category\nimport re\nfrom utils.STMP import send_mail_when_error\nfrom utils.parse_content import pc\n\n# http://www.jztb.gov.cn/jyxx/077001/077001001/1.html @ 锦州市公共资源交易管理办公室\nclass jinzhouSpiderSpider(scrapy.Spider):\n name = 'jinzhou_city_gov_spider'\n\n def __init__(self):\n\n self.city_dict = get_city_dict()\n self.category = category\n\n self.baseUrl = 'http://www.jztb.gov.cn'\n\n self.xpath_rule = {\n 'title_rule': './/a/text()',\n 'url_rule': './/a/@href',\n 'web_time_rule': './span/text()',\n 'content_rule' : r'
(.*?)'\n }\n\n self.error_count = 0\n self.source_name = '锦州市公共资源交易管理办公室'\n\n self.regularExpression = regularExpression\n self.regularExpression02 = regularExpression02\n\n self.addr_id = '411'\n\n self.headers = {\n 'Host': 'www.jztb.gov.cn',\n 'Referer': 'www.jztb.gov.cn'\n }\n\n self.pc = pc\n\n self.start_urls = [\n # 政府采购 公告共397页 结果共258 变更共14页(更新频率均1页)\n ('招标公告', 'http://www.jztb.gov.cn/jyxx/077001/077001001/{}.html', 3),\n ('招标结果', 'http://www.jztb.gov.cn/jyxx/077001/077001002/{}.html', 3),\n ('变更公告', 'http://www.jztb.gov.cn/jyxx/077001/077001003/{}.html', 3),\n # 工程建设 招标公告112 中标候选人72 中标公示61(更新频率均1页)\n ('招标公告', 'http://www.jztb.gov.cn/jyxx/077002/077002001/{}.html', 3),\n ('招标结果', 'http://www.jztb.gov.cn/jyxx/077002/077002002/{}.html', 3),\n ('招标结果', 'http://www.jztb.gov.cn/jyxx/077002/077002003/{}.html', 3),\n # 药品器械采购公告 采购结果 均13页\n ('招标公告', 'http://www.jztb.gov.cn/jyxx/077005/077005001/{}.html', 3),\n ('招标结果', 'http://www.jztb.gov.cn/jyxx/077005/077005002/{}.html', 3),\n\n ]\n\n def start_requests(self):\n for url_info in self.start_urls:\n urls = [url_info[1].format(i) for i in range(1, url_info[2])]\n for url in urls:\n items = {}\n items['type_id'] = self.category[url_info[0]]\n yield scrapy.Request(url, callback=self.parse, meta={\"items\": deepcopy(items)}, headers = self.headers)\n\n\n def parse(self, response):\n items = response.meta['items']\n\n infos = response.xpath('//div[@id=\"jt\"]/ul/li')\n\n for each_li in infos:\n items['title'] = ''\n items['url'] = ''\n items['web_time'] = ''\n items['intro'] = ''\n items['addr_id'] = ''\n\n try:\n items['title'] = each_li.xpath(self.xpath_rule['title_rule']).extract_first().strip()\n except:\n pass\n\n try:\n items['url'] = self.baseUrl + each_li.xpath(self.xpath_rule['url_rule']).extract_first()\n except:\n msg = self.name + ', 该爬虫详情页获取url失败'\n send_mail_when_error(msg)\n self.error_count += 1\n if self.error_count > 3:\n quit()\n msg = self.name + ', 该爬虫因详情页获取失败被暂停'\n send_mail_when_error(msg)\n pass\n\n try:\n items['web_time'] = each_li.xpath(self.xpath_rule['web_time_rule']).extract_first().strip()\n except:\n pass\n\n yield scrapy.Request(items['url'], callback = self.parse_article, headers = self.headers, meta = {'items' : deepcopy(items)})\n\n\n def parse_article(self, response):\n items = response.meta['items']\n\n try:\n items['intro'] = self.pc.get_clean_content(self.xpath_rule['content_rule'], self.regularExpression, self.regularExpression02, response.text)\n except:\n pass\n\n\n items['addr_id'] = self.addr_id\n\n if items['addr_id'] == '':\n for city in self.city_dict:\n if city in items['title']:\n items['addr_id'] = self.city_dict[city]\n break\n\n items[\"source_name\"] = self.source_name\n yield items","sub_path":"china_canton_railway/china_canton_railway/spiders/jinzhou_city_gov_spider.py","file_name":"jinzhou_city_gov_spider.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"16101635","text":"import RPi.GPIO as GPIO\nimport time\nfrom CarUtility import *\nimport subprocess\n\ntrigPin = 16\nechoPin = 18\nledPin = 11\nMAX_DISTANCE = 220\n\ndef Destroy():\n Fari.stopFari\n GPIO.cleanup()\n proc.terminate()\n\n\ndef Luce():\n Fari.getFari()\n\n\ndef IoTinputReader(proc):\n # leggi posizione, decodificala da binary ad ASCII, e infine elimina ogni white space\n result = proc.stdout.readline().decode(\"utf-8\").strip()\n if (result == \"Left\"):\n print(\"Left\")\n elif (result == \"Right\"):\n print(\"Right\")\n elif (result == \"Forward\"):\n print(\"Forward\")\n elif (result == \"BackWard\"):\n print(\"BackWard\")\n elif (result == \"None\"):\n print(\"Someone is online\")\n elif (result == \"\"):\n proc.terminate()\n elif (result == \"Error\"):\n print(\"\\nAn error occured !\")\n # termina esecuzione processo\n Destroy()\n return\n# chiama il comando 'flask run' da shell/cmd\nproc = subprocess.Popen(\"flask run\", stdout=subprocess.PIPE, shell=True)\nprint(\"Server running on : http://127.0.0.1:5000/\")\nif __name__ == '__main__':\n try:\n Fari.setup(ledPin)\n while True:\n IoTinputReader(proc)\n Luce()\n except KeyboardInterrupt:\n Destroy()\n","sub_path":"Car_NOT_Automatica.py","file_name":"Car_NOT_Automatica.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"262228889","text":"# Name: Matthew Zhang - mlz855\n# Date: 5/21/16\n# Description: Regular Bayes Classifier. \n# To perform tenfold testing, do:\n# x = Bayes_Classifier\n# x.train() (optional: only if pickled files are already present)\n# x.test()\n\nimport math, os, pickle, re\nfrom random import shuffle\n\nclass Bayes_Classifier:\n\n def __init__(self):\n \"\"\"This method initializes and trains the Naive Bayes Sentiment Classifier. If a \n cache of a trained classifier has been stored, it loads this cache. Otherwise, \n the system will proceed through training. After running this method, the classifier \n is ready to classify input text.\"\"\"\n #Initialize dictionaries for positive words and negative words\n self.posDict = {}\n self.negDict = {}\n #Iinitialize list of files that are positive and list of files that are negative\n self.negativeList = []\n self.positiveList = []\n #If the pickled files exist, load them\n if (os.path.isfile(\"posDict\") and os.path.isfile(\"negDict\")):\n self.posDict = self.load(\"posDict\")\n self.negDict = self.load(\"negDict\")\n #If the pickled files don't exist, generate them through train\n else:\n self.train()\n\n def train(self): \n \"\"\"Trains the Naive Bayes Sentiment Classifier.\"\"\"\n #initialize the list of files\n lFileList = []\n #add each file in the movies_reviews/ directory to lFileList\n for fFileObj in os.walk('movies_reviews/'):\n lFileList = fFileObj[2]\n break\n #iterate through each file in lFileList\n for files in lFileList:\n #gets the star rating from the filename\n num = int(files[7])\n #tokenize the contents of the file\n tokens = self.tokenize(self.loadFile(files))\n #if the file is negative\n if (num==1):\n #append the file to the negative file list\n self.negativeList.append(files)\n #go through each word in the token\n for word in tokens:\n #if the word isn't already present in the dictionary, initialize it\n if (self.negDict.has_key(word) is False):\n self.negDict[word] = 1\n #otherwise, update the frequency of the word in the dictionary by 1\n else:\n self.negDict[word] += 1\n #if the file is positive\n elif (num==5):\n #append the file to the positive file list\n self.positiveList.append(files)\n #go through each word in the token\n for word in tokens:\n #if the word isn't already present in the positive dictionary, initialize it\n if (self.posDict.has_key(word) is False):\n self.posDict[word] = 1\n #otherwise, update the frequency in the positive dictionary\n else:\n self.posDict[word] += 1\n\n #pickle the negative and positive dictionaries\n self.save(self.negDict, \"negDict\")\n self.save(self.posDict, \"posDict\")\n\n\n def classify(self, sText, sigma=0):\n \"\"\"Given a target string sText, this function returns the most likely document\n class to which the target string belongs (i.e., positive, negative or neutral).\n \"\"\"\n #initialize the number of positive and negative words in each dictionary\n posWords = 0\n negWords = 0\n\n #find the number of words in the positive dictionary\n for val in self.posDict.values():\n posWords+=val\n #find the number of words in the negative dictionary\n for val in self.negDict.values():\n negWords+=val\n\n #initialize the probability of positve/negative given features\n posProb = 0\n negProb = 0\n\n #tokenize the input string\n tokens = self.tokenize(sText)\n\n #iterate through each word in the token\n for words in tokens:\n #if the word isn't already present in the positive dictionary, the numerator will be 1 (w/ add one smoothing)\n if (self.posDict.has_key(words) is False):\n #update the probability w/ log of probability of feature given that the document is positive (addresses underflow)\n posProb += math.log(float(1) / (posWords) )\n #otherwise, calculate probability normally\n else:\n #update the probability w/ log of probability of feature given that the document is positive\n posProb += math.log(float(1+self.posDict[words]) / (posWords))\n #do the same as above, only with the negative dictionary\n if (self.negDict.has_key(words) is False):\n negProb += math.log(float(1) / (negWords) )\n else:\n negProb += math.log(float(1+self.negDict[words]) / (negWords) )\n\n #if the positive probability is higher than negative with sigma value, return positive\n if posProb - negProb > sigma:\n return 'positive'\n #if the negative probability is higher than positive with sigma value, return positive\n elif negProb - posProb > sigma:\n return 'negative'\n #if neither positive/negative probablities are greater than each other by sigma, assign document to be neutral\n else:\n return 'neutral'\n\n def testTrain(self, posTrainingList, negTrainingList): \n \"\"\"Trains the Naive Bayes Sentiment Classifier given lists \n of positive and negative files for tenfold testing purposes.\"\"\"\n \n #re-initalizes the positive and negative dictionaries\n self.posDict = {}\n self.negDict = {}\n #iterates through the given list of positive files and updates the dictionary with each word\n for files in posTrainingList:\n tokens = self.tokenize(self.loadFile(files))\n #iterates through each word and updates the positve dictionary with it\n for word in tokens:\n if (self.posDict.has_key(word) is False):\n self.posDict[word] = 1\n else:\n self.posDict[word] += 1\n #iterates through given list of negative files\n for files in negTrainingList:\n tokens = self.tokenize(self.loadFile(files))\n #iterates through each word and updates the negative dictionary\n for word in tokens:\n if (self.negDict.has_key(word) is False):\n self.negDict[word] = 1\n else:\n self.negDict[word] += 1\n\n def test(self):\n \"\"\"Applies tenfold testing to the Sentiment Classifier.\"\"\"\n\n #initialize the number of true/false positives and true/false negatives\n correctPos = 0\n falsePos = 0\n correctNeg = 0\n falseNeg = 0\n\n #randomizes the order of positive and negative files\n shuffle(self.negativeList)\n shuffle(self.positiveList)\n\n #calculate length of list of negative files\n negLen = len(self.negativeList)\n #shorten list of positive files to match length of negative files\n newPositiveList = self.positiveList[0:negLen]\n #length of newPositiveList\n posLen = len(newPositiveList)\n\n #calculate length of each partition for tenfold testing\n posPartLen = posLen / 10\n negPartLen = negLen / 10\n\n #iterate ten times for tenfold testing\n for i in range(0,10):\n\n #slice positive/negative lists to generate 9/10 of files for training\n posTrainingList = newPositiveList[0:posPartLen*i] + newPositiveList[posPartLen*(i+1)-1:posLen]\n negTrainingList = self.negativeList[0:negPartLen*i] + self.negativeList[negPartLen*(i+1)-1:negLen]\n #train classifier on those 9/10 of files\n self.testTrain(posTrainingList, negTrainingList)\n\n #slice positive/negative lists to generate 1/10 of files for testing\n posTestingList = newPositiveList[posPartLen*i:posPartLen*(i+1)]\n negTestingList = self.negativeList[negPartLen*i:negPartLen*(i+1)]\n \n #do testing for each positive file\n for files in posTestingList:\n words = self.loadFile(files)\n #classify document\n res = self.classify(words)\n #if it thinks positive file is positive, it's correctPos so update it\n if (res == 'positive'):\n correctPos += 1\n #if it thinks positive file is negative, update falseNegative\n else:\n falseNeg += 1\n\n #do testing for each negative file\n for files in negTestingList:\n words = self.loadFile(files)\n #classify document\n res = self.classify(words)\n #if it classifies negative file as negative, update correctNegative\n if (res == 'negative'):\n correctNeg += 1\n #otherwise, it's a false positive\n else:\n falsePos += 1\n\n #calculate recall by dividing correctly-identified files by total # of files that were actually pos/negative\n posRecall = float(correctPos)/(correctPos+falseNeg)\n negRecall = float(correctNeg)/(correctNeg+falsePos)\n\n #calculate precision by dividing correctly-identified positive by total # of files that were classified as pos/negative\n posPrec = float(correctPos)/(correctPos+falsePos)\n negPrec = float(correctNeg)/(correctNeg+falseNeg)\n\n #calculate f-measure using formula\n posF = (2*posRecall*posPrec)/(posRecall+posPrec)\n negF = (2*negRecall*negPrec)/(negRecall+negPrec)\n\n #macro-average positive and negative classes\n averageRecall = (posRecall+negRecall)/2\n averagePrec = (posPrec + negPrec)/2\n averageF = (posF + negF)/2\n\n #return macroaveraged recall, precision, f-measure\n return \"Recall: %f Precision: %f F-measure: %f\" % (averageRecall, averagePrec, averageF)\n\n\n def loadFile(self, sFilename):\n \"\"\"Given a file name, return the contents of the file as a string.\"\"\"\n sFilename = 'movies_reviews/' + sFilename\n f = open(sFilename, \"r\")\n sTxt = f.read()\n f.close()\n return sTxt\n \n def save(self, dObj, sFilename):\n \"\"\"Given an object and a file name, write the object to the file using pickle.\"\"\"\n\n f = open(sFilename, \"w\")\n p = pickle.Pickler(f)\n p.dump(dObj)\n f.close()\n \n def load(self, sFilename):\n \"\"\"Given a file name, load and return the object stored in the file.\"\"\"\n\n f = open(sFilename, \"r\")\n u = pickle.Unpickler(f)\n dObj = u.load()\n f.close()\n return dObj\n\n def tokenize(self, sText): \n \"\"\"Given a string of text sText, returns a list of the individual tokens that \n occur in that string (in order).\"\"\"\n\n lTokens = []\n sToken = \"\"\n for c in sText:\n if re.match(\"[a-zA-Z0-9]\", str(c)) != None or c == \"\\\"\" or c == \"_\" or c == \"-\":\n sToken += c\n else:\n if sToken != \"\":\n lTokens.append(sToken)\n sToken = \"\"\n if c.strip() != \"\":\n lTokens.append(str(c.strip()))\n \n if sToken != \"\":\n lTokens.append(sToken)\n\n return lTokens\n","sub_path":"bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":10990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"326015552","text":"# DO NOT USE THIS SCRIPT IN PLACES WHERE SECURITY IS IMPORTANT! Telnet is NOT a secure protocol.\n# This script is used in an isolated student/education environment.\n# It serves as a simple way for a lab assistant to reset device configurations after students do lab assignments.\n# Used for resetting Cisco IOS 12.X switches/routers.\n# Running this in an IDE will probably not display the password prompt, given how getpass() works.\n\nfrom mini_menu import Menu\nfrom telnet_device import TeleCisco, remove_telnet_chars\nimport os, uuid\n\n##################################################################\n# PUT THE STARTING DIRECTORY FOR LOCATING CONFIG FILES HERE\n# You have to use forward slashes instead of backslashes on Windows\nDEFAULT_CONFIGS_LOCATION = \"\"\n##################################################################\n\n\ndef find_single_line_value(config_as_list, starts_with_field):\n # Look inside list for element str that starts with starts_with_field value, return it.\n # return empty str if nothing found\n try:\n value = \"\".join([i for i in config_as_list if i.strip().startswith(starts_with_field)][0])\n value = value.replace(starts_with_field, \"\").strip()\n value = value.split(\" \")[0].strip() # Ensure there isn't extra garbage in the same line\n return value\n except IndexError:\n return \"\"\n\n\nclass UserMenu(Menu):\n # Handles most user command line interaction. Creates menus, prompts for user input, etc.\n\n def __init__(self):\n # Initial device connection + selection stuff, then show menu interface\n super().__init__()\n self.tele_instance = TeleCisco()\n self.configs_location = DEFAULT_CONFIGS_LOCATION\n self.config_file_path = \"\"\n self.config_file_name = \"\"\n self.config_file_selection()\n self.main_menu()\n\n def change_conf_file(self):\n self.configs_location = \"\"\n self.config_file_path = \"\"\n self.config_file_name = \"\"\n self.config_file_selection()\n\n def host_connect(self):\n try:\n if self.tele_instance.connection:\n self.tele_instance.username = \"\"\n self.tele_instance.password = \"\"\n self.tele_instance.connection = None\n self.tele_instance.is_privileged_user = False\n self.tele_instance.telnet_to_device()\n self.tele_instance.ios_login_and_elevate()\n except EOFError as e:\n # Connection probably terminated.\n print(e)\n\n def new_host_connection(self):\n self.tele_instance.host = \"\"\n self.host_connect()\n\n @staticmethod\n def get_path():\n path = \"\"\n while True:\n path = input(\"Enter a path to the directory to save the config in, excluding the file name:\")\n path = path.replace(\"\\\\\\\\\", \"\\\\\")\n if not os.path.isdir(path):\n print(\"Invalid directory path!\")\n continue\n try: # Test write permissions of the directory\n print(\"Testing write permissions...\")\n path_to_file = path + \"/\" + str(uuid.uuid4())\n with open(path_to_file,'w') as test_file:\n test_file.write(\"test\")\n os.remove(path_to_file)\n except Exception as e:\n print(\"Write issue:\",e)\n else:\n break\n return path\n\n def save_config(self, config_name):\n config_list = self.tele_instance.ios_fetch_and_store_conf(config_name, \"show\")\n print(\"(The file should be displayed below if no errors occurred).\")\n self.divider()\n print(\"\\n\".join(i for i in config_list))\n self.divider()\n inpt = input(\"Save the above config? [y/n]:\")\n if inpt.strip().lower() in ['n', 'no']:\n return\n path = self.get_path()\n unique_path = path + \"/\" + config_name + \"-\" + str(uuid.uuid4()) # Random uuid at end to make it unique\n with open(unique_path, 'w') as file_to_write_to:\n for line in config_list:\n file_to_write_to.write(line + \"\\n\")\n print(\"Saved at: \" + unique_path)\n\n def save_submenu(self):\n def save_running():\n self.save_config(\"running-config\")\n\n def save_startup():\n self.save_config(\"startup-config\")\n\n menu = {\n 1: save_running,\n 2: save_startup,\n }\n while True:\n selected_option = self.get_menu(\"SAVE\",\n [\n \"Save Current running-config to Local Machine.\",\n \"Save Current startup-config to Local Machine.\"\n ],\n \"*Enter a value or [r]return, [q]uit.\\n>>>\")\n if selected_option == 'r':\n return\n if not selected_option:\n continue\n try:\n menu[selected_option]()\n except KeyError:\n pass\n except (ConnectionAbortedError, EOFError) as e:\n self.tele_instance.connection = None\n print(\"\\n\", e)\n\n def start_over(self):\n self.tele_instance.host = \"\"\n self.tele_instance.username = \"\"\n self.tele_instance.password = \"\"\n self.tele_instance.connection = None\n self.tele_instance.is_privileged_user = False\n self.config_file_selection()\n\n def main_menu(self):\n # Displays main menu and gets user input\n menu = {\n 1: self.start_over,\n 2: self.new_host_connection,\n 3: self.change_conf_file,\n 4: self.view_submenu,\n 5: self.compare_submenu,\n 6: self.save_submenu,\n 7: self.update_submenu,\n 8: self.switch_to_cli\n }\n while True:\n path_display = self.config_file_path + self.config_file_name\n selected_option = self.get_menu(\"MAIN\",\n [\n \"Restart with New File & Host.\",\n \"Connected Host: \" + (self.tele_instance.host if (self.tele_instance.host and self.tele_instance.connection) else \"(NOT CONNECTED)\"),\n \"Using Config File: \" + (path_display if path_display.strip() else \"(NO PATH SELECTED)\"),\n \"View Configs.\",\n \"Compare Configs.\",\n \"Save Configs.\",\n \"Update Device Configs.\",\n \"Switch to Device CLI.\",\n ],\n \"*Enter a value or [q]uit.\\n>>>\",False)\n if selected_option == 'r':\n continue\n if not selected_option:\n continue\n try:\n menu[selected_option]()\n except KeyError:\n pass\n except (ConnectionAbortedError, EOFError, AttributeError) as e:\n self.tele_instance.connection = None\n print(\"\\n\", e)\n\n def view_temp_file(self):\n # Print out contents of temporary file stored on device.\n # This file is later deleted from the device after copying.\n config_as_list = self.tele_instance.ios_fetch_and_store_conf(self.tele_instance.TEMP_FILE_NAME, \"more\")\n if not config_as_list:\n print(\"Config file was either empty, not found, or not read properly.\")\n return True # Indicate not safe to copy\n print(\"(The file should be displayed below if no errors occurred).\")\n self.divider()\n print(\"\\n\".join(config_as_list))\n self.divider()\n host_name = find_single_line_value(config_as_list, \"hostname\")\n print(\"\\nHOSTNAME: \" + host_name if host_name else \"(Hostname not found in file)\")\n\n def view_selected_file(self):\n # Prints content of selected local config file.\n # Does not re-read from a local file, as it's stored as a list.\n if not self.tele_instance.config_file:\n print(\"No config file selected...\")\n return\n print(\"(The file should be displayed below if no errors occurred).\")\n self.divider()\n print(\"\\n\".join(self.tele_instance.config_file))\n self.divider()\n\n def switch_to_cli(self):\n print(\"Switching to command line. You will not be able to return to the program unless you restart it.\\n\"\n \"Press Enter several times to see the command line.\")\n self.tele_instance.connection.interact()\n\n def view_run(self):\n # Prints out content of device's current running-config\n config = \"\\n\".join(self.tele_instance.ios_fetch_and_store_conf(\"running-config\", \"show\"))\n print(\"(The file should be displayed below if no errors occurred).\")\n self.divider()\n print(config)\n self.divider()\n\n def view_startup(self):\n # Prints out content of device's current startup-config\n config = \"\\n\".join(self.tele_instance.ios_fetch_and_store_conf(\"startup-config\", \"show\"))\n print(\"(The file should be displayed below if no errors occurred).\")\n self.divider()\n print(config)\n self.divider()\n\n def view_submenu(self):\n # Displays submenu for viewing files.\n menu = {\n 1: self.view_run,\n 2: self.view_startup,\n 3: self.view_selected_file\n }\n\n while True:\n selected_option = Menu().get_menu(\"VIEW\",\n [\n \"View running-config.\",\n \"View startup-config.\",\n \"View Local Config.\"\n ],\n \"*Enter a value or [r]eturn, [q]uit.\\n>>>\")\n if selected_option == 'r':\n return\n if not selected_option:\n continue\n try:\n menu[selected_option]()\n except KeyError:\n pass\n\n def update_submenu(self):\n # Displays submenu for copying/updating various files.\n\n def cpy_running():\n # Tells device to copy the local config to the device as a temp file, then copy it to running-config\n self.tele_instance.ios_tclsh()\n if self.view_temp_file():\n print(\"Copy operation canceled.\")\n return # If temp file viewer indicated it's not safe to copy, then return instead\n if input(\"\\n*Try to copy this config to the running-config? [y/n]:\").strip().lower() in ['y', 'yes']:\n self.tele_instance.ios_copy_to_config(self.tele_instance.TEMP_FILE_NAME, \"running-config\")\n self.tele_instance.ios_remove_temp_file()\n\n def cpy_startup():\n # Tells device to copy the local config to the device as a temp file, then copy it to startup-config\n self.tele_instance.ios_tclsh()\n if self.view_temp_file():\n print(\"Copy operation canceled.\")\n return # If temp file viewer indicated it's not safe to copy, then return instead\n if input(\"\\n*Try to copy this config to the startup-config? [y/n]:\").strip().lower() in ['y', 'yes']:\n self.tele_instance.ios_copy_to_config(self.tele_instance.TEMP_FILE_NAME, \"startup-config\")\n self.tele_instance.ios_remove_temp_file()\n\n def cpy_startup_to_run():\n # Tells device to copy the startup-config to the running-config\n if input(\"\\n*Copy the startup-config to the running-config? [y/n]:\").strip().lower() in ['y', 'yes']:\n self.tele_instance.ios_copy_to_config(\"startup-config\", \"running-config\")\n\n def init_reload():\n # Tells device to reload, saving any changes made to the running-config\n if input(\"\\n*Reload the device? The telnet connection will close. [y/n]:\").strip().lower() in ['y', 'yes']:\n self.tele_instance.ios_reload()\n\n menu = {\n 1: cpy_running,\n 2: cpy_startup,\n 3: cpy_startup_to_run,\n 4: init_reload\n }\n while True:\n selected_option = Menu().get_menu(\"UPDATE\",\n [\n \"Copy Local Config to running-config.\",\n \"Copy Local Config to startup-config.\",\n \"Copy startup-config to running-config.\",\n \"Reload the device.\",\n ],\n \"*Enter a value or [r]eturn, [q]uit.\\n>>>\")\n if selected_option == 'r':\n return\n if not selected_option:\n continue\n try:\n menu[selected_option]()\n except KeyError:\n pass\n\n def compare_submenu(self):\n # Displays menu for viewing differences between various files.\n # Gets the configs upon entering submenu, so they aren't reloaded constantly.\n # Comparison functionality is fairly basic at the moment.\n current_running = self.tele_instance.ios_fetch_and_store_conf(\"running-config\", \"show\")\n current_startup = self.tele_instance.ios_fetch_and_store_conf(\"startup-config\", \"show\")\n\n def list_difference(list_1, list_2, list_1_name, list_2_name):\n # Shows differences of both lists\n # Field size surrounding file name label\n name_spacing = (len(list_1_name)) if len(list_1_name) > len(list_2_name) else (len(list_2_name))\n diff_list_1 = []\n diff_list_2 = []\n\n for line_no, list_1_element in enumerate(list_1):\n if list_1_element.strip() and list_1_element not in list_2:\n diff_list_1.append(str(line_no + 1).ljust(5,' ') + list_1_name.ljust(name_spacing,' ') + \": \" + list_1_element)\n for line_no, list_2_element in enumerate(list_2):\n if list_2_element.strip() and list_2_element not in list_1:\n diff_list_2.append(str(line_no + 1).ljust(5,' ') + list_2_name.ljust(name_spacing,' ') + \": \" + list_2_element)\n return diff_list_1, diff_list_2\n\n def format_list_diffs(list_1, list_2, file_name_1, file_name_2):\n # For printing diff lists nicely\n diff_list_1, diff_list_2 = list_difference(list_1, list_2, file_name_1, file_name_2)\n print(\"\\n\\n\") # Jump a couple lines to reduce immediate clutter\n self.divider()\n print(\"Lines in \" + file_name_1 + \", but not in \" + file_name_2 + \":\")\n self.divider()\n print(\"\\n\".join(diff_list_1))\n print(\"\\n\", end='')\n self.divider()\n print(\"Lines in \" + file_name_2 + \", but not in \" + file_name_1 + \":\")\n self.divider()\n print(\"\\n\".join(diff_list_2))\n print(\"\\n\", end='')\n self.divider()\n\n def run_vs_startup():\n # Prints out differences between these files, separated by line.\n format_list_diffs(current_running,current_startup,\"running-config\", \"startup-config\")\n\n def run_vs_selected():\n # Prints out differences between these files, separated by line.\n local_conf_name = \"(local config)\"\n format_list_diffs(current_running, self.tele_instance.config_file, \"running-config\",local_conf_name)\n\n def startup_vs_selected():\n # Prints out differences between these files, separated by line.\n local_conf_name = \"(local config)\"\n format_list_diffs(current_startup, self.tele_instance.config_file, \"startup-config\",local_conf_name)\n\n menu = {\n 1: run_vs_selected,\n 2: startup_vs_selected,\n 3: run_vs_startup\n }\n while True:\n selected_option = Menu().get_menu(\"COMPARE\",\n [\n \"Compare running-config to Local Config.\",\n \"Compare startup-config to Local Config.\",\n \"Compare running-config to startup-config.\"\n ],\n \"*Enter a value or [r]eturn, [q]uit.\\n>>>\")\n if selected_option == 'r':\n return\n if not selected_option:\n continue\n try:\n menu[selected_option]()\n except KeyError:\n pass\n\n def config_file_selection_prompts(self, config_as_list, abs_path, file_name):\n # Prompt for whether or not file should be used. Prompt for usage of hostname or password from config file\n host_name = find_single_line_value(config_as_list, \"hostname\") # Find \"hostname\" field in file\n self.tele_instance.password = find_single_line_value(config_as_list, \"password\") # Find \"password\" field in file\n self.tele_instance.username = find_single_line_value(config_as_list, \"username\") # Find \"username\" field in file\n\n print(\"\\nPATH: \" + abs_path + file_name)\n print(\"HOSTNAME: \" + host_name if host_name else \"(Hostname not found in file)\")\n\n user_approval = input(\"\\n*Continue using this file? [y/n]:\")\n if user_approval.strip().lower() in [\"y\", \"yes\"]:\n self.tele_instance.config_file = config_as_list\n self.config_file_path = abs_path\n self.config_file_name = file_name\n if host_name:\n use_this_host = input(\"\\n*Attempt to connect using the hostname '\" + host_name + \"'? [y/n]:\")\n if use_this_host.strip().lower() in [\"y\", \"yes\"]:\n self.tele_instance.host = host_name\n self.host_connect()\n elif not self.tele_instance.connection:\n self.new_host_connection()\n # Return True, indicating the user wants to use this file.\n return True\n else:\n return False\n\n def input_configs_location(self):\n # If user didn't specify DEFAULT_CONFIGS_LOCATION, prompt for it.\n if not self.configs_location:\n self.configs_location = \\\n input(\"Enter an absolute path to a config file repository or a config file itself:\")\n\n def config_file_selection(self):\n # Ensure user entered a file\n self.input_configs_location()\n # Keep displaying the menu until the user decides on a file\n use_this_file = False\n while not use_this_file:\n try:\n # If DEFAULT_CONFIGS_LOCATION is a directory, spawn a menu, else use that file\n if os.path.isdir(self.configs_location):\n abs_path, file_name = Menu().get_path_menu(self.configs_location)\n self.configs_location = abs_path # Move dir path here, in case user decides not to use file\n else:\n abs_path = os.path.abspath(self.configs_location)\n file_name = abs_path[abs_path.rfind(\"\\\\\") + 1:]\n abs_path = abs_path.replace(file_name, \"\")\n self.configs_location = abs_path # Move dir path here, in case user decides not to use file\n except ValueError:\n return\n except Exception as e:\n print(\"Config path issue:\", e, \"\\n\")\n return\n # Remove CRLF without stripping spaces\n try:\n local_config = list(remove_telnet_chars(i) for i in open(abs_path + file_name))\n except (UnicodeDecodeError, OSError) as e:\n print(\"Bad path:\", e)\n return\n except FileNotFoundError as e:\n print(\"File selected does not exist:\", e)\n continue\n print(\"(The file should be displayed below if no errors occurred).\")\n self.divider()\n print(\"\\n\".join(local_config))\n self.divider()\n # Ask user if they want to use the file + other prompts\n use_this_file = self.config_file_selection_prompts(local_config, abs_path, file_name)\n print(\"\\nFile Selected!\")\n\n\nUserMenu()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"79518438","text":"# Создать метод класса Person, возвращающий полное имя.\n\nclass Person:\n def __init__(self, name='', surname='', age=-1):\n self.name = name\n self.surname = surname\n self.age = age\n\n def full_name(self):\n \"\"\"Возвращает имя с фамилией\"\"\"\n\n return self.name + \" \" + self.surname\n\n def get_older(self, years=1):\n \"\"\"Увеличивает возраст на years лет, по умолчанию на 1\"\"\"\n\n self.age += years\n\n def __str__(self):\n \"\"\"Переопределяем преобразование в строку\"\"\"\n return \"\".format(self.name, self.surname)\n\n # def __add__(self, person):\n # \"\"\"Переовпределяем сложене людей\"\"\"\n # return Couple()\n\n# class Couple:\n#\n# def __init__(self):\n\n\n\n\nif __name__ == \"__main__\":\n Vasya = Person(\"Vasya\", \"Pupkin\", 18)\n print(Vasya.full_name())\n Vasya.get_older()\n print(Vasya.age)\n print(Vasya)\n print(str(Vasya))\n Petya = Person(\"Petya\", \"Sidorov\", 20)\n VasyaPetya = Vasya + Petya\n print(VasyaPetya)","sub_path":"Practice2.py","file_name":"Practice2.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"441607545","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\derek_2\\Google Drive\\nvda-addon-exploded\\notepad++\\scons-local-2.5.0\\SCons\\Tool\\gfortran.py\n# Compiled at: 2016-07-07 03:21:34\n\"\"\"SCons.Tool.gfortran\n\nTool-specific initialization for gfortran, the GNU Fortran 95/Fortran\n2003 compiler.\n\nThere normally shouldn't be any need to import this module directly.\nIt will usually be imported through the generic SCons.Tool.Tool()\nselection method.\n\n\"\"\"\n__revision__ = 'src/engine/SCons/Tool/gfortran.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog'\nimport SCons.Util, fortran\n\ndef generate(env):\n \"\"\"Add Builders and construction variables for gfortran to an\n Environment.\"\"\"\n fortran.generate(env)\n for dialect in ['F77', 'F90', 'FORTRAN', 'F95', 'F03', 'F08']:\n env['%s' % dialect] = 'gfortran'\n env['SH%s' % dialect] = '$%s' % dialect\n if env['PLATFORM'] in ('cygwin', 'win32'):\n env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS' % dialect)\n else:\n env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)\n env['INC%sPREFIX' % dialect] = '-I'\n env['INC%sSUFFIX' % dialect] = ''\n\n\ndef exists(env):\n return env.Detect('gfortran')","sub_path":"pycfiles/NVDA-addonTemplate-0.5.2/gfortran.py","file_name":"gfortran.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"90310766","text":"import os\nimport time\nimport argparse\nimport pandas as pd\nimport pickle as pkl\n\nfrom mu2e import mu2e_ext_path\nfrom hallprobesim_redux import *\nfrom mu2e.fieldfitter_redux2 import FieldFitter\n\ndef validatefit(param_load_name='Mau13', sample=None, savename='Bmaps/Mau13_standard_fit_df.p'):\n starttime = time.time()\n print('Loading Validation Grid Points')\n df_Mu2e = pd.read_pickle(mu2e_ext_path+'Bmaps/Mu2e_DSMap_V13.p')\n df_Mu2e = df_Mu2e.query(\"(R >= 25e-3) & (R <= 0.8) & (Z >= 4.2) & (Z <= 13.9)\")\n # df_Mu2e = df_Mu2e.query(\"(R >= 25e-3)\")\n if sample is not None:\n df_Mu2e = df_Mu2e.sample(sample)\n\n cfg_pickle_Mau13_recreate = cfg_pickle(use_pickle=True, save_pickle=False, load_name=param_load_name,save_name=param_load_name, recreate=True)\n\n print('Initializing FieldFitter')\n ff = FieldFitter(df_Mu2e)\n print('Running Fit')\n ff.fit(cfg_params_DS_Mau13, cfg_pickle_Mau13_recreate)\n print('Merging data fit residuals')\n ff.merge_data_fit_res()\n print('Saving to pickle')\n ff.input_data.to_pickle(mu2e_ext_path+savename)\n print(f'Completed in {(time.time()-starttime):.1f} seconds')\n\n\nif __name__=='__main__':\n # parse command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--param_name', help='Parameter load name (from Mu2E package)')\n parser.add_argument('-s', '--save_name', help='Output dataframe save name (mu2e_ext_path already included)')\n parser.add_argument('-N', '--num_sample', help='Number of samples to calculate for (default no limit)')\n args = parser.parse_args()\n # fill defaults where needed\n if args.param_name is None:\n args.param_name = 'Mau13'\n if args.save_name is None:\n args.save_name = 'Bmaps/Mau13_standard_fit_df.p'\n if not args.num_sample is None:\n args.num_sample = int(args.num_sample)\n # run validation\n validatefit(args.param_name, args.num_sample, args.save_name)\n","sub_path":"scripts/FieldFitting/validate_Bfit.py","file_name":"validate_Bfit.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"448599105","text":"import os\nfrom pathlib import Path\n\nfrom hypergol.cli.create_task import create_task\nfrom hypergol.hypergol_project import HypergolProject\nfrom tests.cli.hypergol_create_test_case import HypergolCreateTestCase\n\nTEST_SOURCE = \"\"\"\nfrom hypergol import Source\n\n\nclass TestSource(Source):\n\n def __init__(self, exampleParameter, *args, **kwargs):\n super(TestSource, self).__init__(*args, **kwargs)\n # TODO: Source tasks are single threaded, no need for members to be pickle-able\n self.exampleParameter = exampleParameter\n\n def source_iterator(self):\n raise NotImplementedError(f'{self.__class__.__name__} must implement source_iterator()')\n # TODO: use yield in this function instead of return while your are consuming your source data\n yield exampleData\n\n def run(self, data):\n raise NotImplementedError(f'{self.__class__.__name__} must implement run()')\n return exampleOutputObject\n\"\"\".lstrip()\n\nTEST_TASK = \"\"\"\nfrom hypergol import SimpleTask\n\n\nclass TestTask(SimpleTask):\n\n def __init__(self, exampleParameter, *args, **kwargs):\n super(TestTask, self).__init__(*args, **kwargs)\n # TODO: all member variables must be pickle-able, otherwise use the \"Delayed\" methodology\n # TODO: (e.g. for a DB connection), see the documentation \n self.exampleParameter = exampleParameter\n\n def init(self):\n # TODO: initialise members that are NOT \"Delayed\" here (e.g. load spacy model)\n pass\n\n def run(self, exampleInputObject1, exampleInputObject2):\n raise NotImplementedError(f'{self.__class__.__name__} must implement run()')\n return exampleOutputObject\n\"\"\".lstrip()\n\n\nclass TestCreateTask(HypergolCreateTestCase):\n\n def __init__(self, methodName):\n super(TestCreateTask, self).__init__(projectName='TestProject', methodName=methodName)\n self.allPaths = [\n Path(self.projectDirectory, 'tasks', 'test_task.py'),\n Path(self.projectDirectory, 'tasks'),\n Path(self.projectDirectory)\n ]\n self.project = None\n\n def setUp(self):\n super().setUp()\n self.project = HypergolProject(projectDirectory=self.projectDirectory)\n self.project.create_project_directory()\n self.project.create_tasks_directory()\n\n def test_create_task_creates_files(self):\n create_task(className='TestTask', projectDirectory=self.projectDirectory, simple=True)\n for filePath in self.allPaths:\n self.assertEqual(os.path.exists(filePath), True)\n\n def test_create_task_creates_content(self):\n content = create_task(className='TestTask', projectDirectory=self.projectDirectory, simple=True, dryrun=True)\n self.assertEqual(content, TEST_TASK)\n\n def test_create_task_creates_content_source(self):\n content = create_task(className='TestSource', source=True, simple=False, projectDirectory=self.projectDirectory, dryrun=True)\n self.assertEqual(content, TEST_SOURCE)\n","sub_path":"src/tests/cli/test_create_task.py","file_name":"test_create_task.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"}